1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 28 * Copyright (c) 2017, Joyent, Inc. 29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved. 31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved. 32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved. 33 */ 34 35 #include "ixgbe_sw.h" 36 37 static char ixgbe_ident[] = "Intel 10Gb Ethernet"; 38 39 /* 40 * Local function protoypes 41 */ 42 static int ixgbe_register_mac(ixgbe_t *); 43 static int ixgbe_identify_hardware(ixgbe_t *); 44 static int ixgbe_regs_map(ixgbe_t *); 45 static void ixgbe_init_properties(ixgbe_t *); 46 static int ixgbe_init_driver_settings(ixgbe_t *); 47 static void ixgbe_init_locks(ixgbe_t *); 48 static void ixgbe_destroy_locks(ixgbe_t *); 49 static int ixgbe_init(ixgbe_t *); 50 static int ixgbe_chip_start(ixgbe_t *); 51 static void ixgbe_chip_stop(ixgbe_t *); 52 static int ixgbe_reset(ixgbe_t *); 53 static void ixgbe_tx_clean(ixgbe_t *); 54 static boolean_t ixgbe_tx_drain(ixgbe_t *); 55 static boolean_t ixgbe_rx_drain(ixgbe_t *); 56 static int ixgbe_alloc_rings(ixgbe_t *); 57 static void ixgbe_free_rings(ixgbe_t *); 58 static int ixgbe_alloc_rx_data(ixgbe_t *); 59 static void ixgbe_free_rx_data(ixgbe_t *); 60 static void ixgbe_setup_rings(ixgbe_t *); 61 static void ixgbe_setup_rx(ixgbe_t *); 62 static void ixgbe_setup_tx(ixgbe_t *); 63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 65 static void ixgbe_setup_rss(ixgbe_t *); 66 static void ixgbe_setup_vmdq(ixgbe_t *); 67 static void ixgbe_setup_vmdq_rss(ixgbe_t *); 68 static void ixgbe_setup_rss_table(ixgbe_t *); 69 static void ixgbe_init_unicst(ixgbe_t *); 70 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); 71 static void ixgbe_setup_multicst(ixgbe_t *); 72 static void ixgbe_get_hw_state(ixgbe_t *); 73 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); 74 static void ixgbe_get_conf(ixgbe_t *); 75 static void ixgbe_init_params(ixgbe_t *); 76 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 77 static void ixgbe_driver_link_check(ixgbe_t *); 78 static void ixgbe_sfp_check(void *); 79 static void ixgbe_overtemp_check(void *); 80 static void ixgbe_phy_check(void *); 81 static void ixgbe_link_timer(void *); 82 static void ixgbe_local_timer(void *); 83 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 84 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 85 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 86 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 87 static boolean_t is_valid_mac_addr(uint8_t *); 88 static boolean_t ixgbe_stall_check(ixgbe_t *); 89 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 90 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 91 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 92 static int ixgbe_alloc_intrs(ixgbe_t *); 93 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 94 static int ixgbe_add_intr_handlers(ixgbe_t *); 95 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 96 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 97 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t); 98 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t); 99 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t); 100 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index); 101 static int ixgbe_map_intrs_to_vectors(ixgbe_t *); 102 static void ixgbe_setup_adapter_vector(ixgbe_t *); 103 static void ixgbe_rem_intr_handlers(ixgbe_t *); 104 static void ixgbe_rem_intrs(ixgbe_t *); 105 static int ixgbe_enable_intrs(ixgbe_t *); 106 static int ixgbe_disable_intrs(ixgbe_t *); 107 static uint_t ixgbe_intr_legacy(void *, void *); 108 static uint_t ixgbe_intr_msi(void *, void *); 109 static uint_t ixgbe_intr_msix(void *, void *); 110 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 111 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 112 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); 113 static void ixgbe_get_driver_control(struct ixgbe_hw *); 114 static int ixgbe_addmac(void *, const uint8_t *); 115 static int ixgbe_remmac(void *, const uint8_t *); 116 static void ixgbe_release_driver_control(struct ixgbe_hw *); 117 118 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 119 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 120 static int ixgbe_resume(dev_info_t *); 121 static int ixgbe_suspend(dev_info_t *); 122 static int ixgbe_quiesce(dev_info_t *); 123 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 124 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 125 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *); 126 static int ixgbe_intr_cb_register(ixgbe_t *); 127 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int); 128 129 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 130 const void *impl_data); 131 static void ixgbe_fm_init(ixgbe_t *); 132 static void ixgbe_fm_fini(ixgbe_t *); 133 134 char *ixgbe_priv_props[] = { 135 "_tx_copy_thresh", 136 "_tx_recycle_thresh", 137 "_tx_overload_thresh", 138 "_tx_resched_thresh", 139 "_rx_copy_thresh", 140 "_rx_limit_per_intr", 141 "_intr_throttling", 142 "_adv_pause_cap", 143 "_adv_asym_pause_cap", 144 NULL 145 }; 146 147 #define IXGBE_MAX_PRIV_PROPS \ 148 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t)) 149 150 static struct cb_ops ixgbe_cb_ops = { 151 nulldev, /* cb_open */ 152 nulldev, /* cb_close */ 153 nodev, /* cb_strategy */ 154 nodev, /* cb_print */ 155 nodev, /* cb_dump */ 156 nodev, /* cb_read */ 157 nodev, /* cb_write */ 158 nodev, /* cb_ioctl */ 159 nodev, /* cb_devmap */ 160 nodev, /* cb_mmap */ 161 nodev, /* cb_segmap */ 162 nochpoll, /* cb_chpoll */ 163 ddi_prop_op, /* cb_prop_op */ 164 NULL, /* cb_stream */ 165 D_MP | D_HOTPLUG, /* cb_flag */ 166 CB_REV, /* cb_rev */ 167 nodev, /* cb_aread */ 168 nodev /* cb_awrite */ 169 }; 170 171 static struct dev_ops ixgbe_dev_ops = { 172 DEVO_REV, /* devo_rev */ 173 0, /* devo_refcnt */ 174 NULL, /* devo_getinfo */ 175 nulldev, /* devo_identify */ 176 nulldev, /* devo_probe */ 177 ixgbe_attach, /* devo_attach */ 178 ixgbe_detach, /* devo_detach */ 179 nodev, /* devo_reset */ 180 &ixgbe_cb_ops, /* devo_cb_ops */ 181 NULL, /* devo_bus_ops */ 182 ddi_power, /* devo_power */ 183 ixgbe_quiesce, /* devo_quiesce */ 184 }; 185 186 static struct modldrv ixgbe_modldrv = { 187 &mod_driverops, /* Type of module. This one is a driver */ 188 ixgbe_ident, /* Discription string */ 189 &ixgbe_dev_ops /* driver ops */ 190 }; 191 192 static struct modlinkage ixgbe_modlinkage = { 193 MODREV_1, &ixgbe_modldrv, NULL 194 }; 195 196 /* 197 * Access attributes for register mapping 198 */ 199 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 200 DDI_DEVICE_ATTR_V1, 201 DDI_STRUCTURE_LE_ACC, 202 DDI_STRICTORDER_ACC, 203 DDI_FLAGERR_ACC 204 }; 205 206 /* 207 * Loopback property 208 */ 209 static lb_property_t lb_normal = { 210 normal, "normal", IXGBE_LB_NONE 211 }; 212 213 static lb_property_t lb_mac = { 214 internal, "MAC", IXGBE_LB_INTERNAL_MAC 215 }; 216 217 static lb_property_t lb_external = { 218 external, "External", IXGBE_LB_EXTERNAL 219 }; 220 221 #define IXGBE_M_CALLBACK_FLAGS \ 222 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 223 224 static mac_callbacks_t ixgbe_m_callbacks = { 225 IXGBE_M_CALLBACK_FLAGS, 226 ixgbe_m_stat, 227 ixgbe_m_start, 228 ixgbe_m_stop, 229 ixgbe_m_promisc, 230 ixgbe_m_multicst, 231 NULL, 232 NULL, 233 NULL, 234 ixgbe_m_ioctl, 235 ixgbe_m_getcapab, 236 NULL, 237 NULL, 238 ixgbe_m_setprop, 239 ixgbe_m_getprop, 240 ixgbe_m_propinfo 241 }; 242 243 /* 244 * Initialize capabilities of each supported adapter type 245 */ 246 static adapter_info_t ixgbe_82598eb_cap = { 247 64, /* maximum number of rx queues */ 248 1, /* minimum number of rx queues */ 249 64, /* default number of rx queues */ 250 16, /* maximum number of rx groups */ 251 1, /* minimum number of rx groups */ 252 1, /* default number of rx groups */ 253 32, /* maximum number of tx queues */ 254 1, /* minimum number of tx queues */ 255 8, /* default number of tx queues */ 256 16366, /* maximum MTU size */ 257 0xFFFF, /* maximum interrupt throttle rate */ 258 0, /* minimum interrupt throttle rate */ 259 200, /* default interrupt throttle rate */ 260 18, /* maximum total msix vectors */ 261 16, /* maximum number of ring vectors */ 262 2, /* maximum number of other vectors */ 263 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 264 0, /* "other" interrupt types enable mask */ 265 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 266 | IXGBE_FLAG_RSS_CAPABLE 267 | IXGBE_FLAG_VMDQ_CAPABLE) 268 }; 269 270 static adapter_info_t ixgbe_82599eb_cap = { 271 128, /* maximum number of rx queues */ 272 1, /* minimum number of rx queues */ 273 128, /* default number of rx queues */ 274 64, /* maximum number of rx groups */ 275 1, /* minimum number of rx groups */ 276 1, /* default number of rx groups */ 277 128, /* maximum number of tx queues */ 278 1, /* minimum number of tx queues */ 279 8, /* default number of tx queues */ 280 15500, /* maximum MTU size */ 281 0xFF8, /* maximum interrupt throttle rate */ 282 0, /* minimum interrupt throttle rate */ 283 200, /* default interrupt throttle rate */ 284 64, /* maximum total msix vectors */ 285 16, /* maximum number of ring vectors */ 286 2, /* maximum number of other vectors */ 287 (IXGBE_EICR_LSC 288 | IXGBE_EICR_GPI_SDP1 289 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */ 290 291 (IXGBE_SDP1_GPIEN 292 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */ 293 294 (IXGBE_FLAG_DCA_CAPABLE 295 | IXGBE_FLAG_RSS_CAPABLE 296 | IXGBE_FLAG_VMDQ_CAPABLE 297 | IXGBE_FLAG_RSC_CAPABLE 298 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */ 299 }; 300 301 static adapter_info_t ixgbe_X540_cap = { 302 128, /* maximum number of rx queues */ 303 1, /* minimum number of rx queues */ 304 128, /* default number of rx queues */ 305 64, /* maximum number of rx groups */ 306 1, /* minimum number of rx groups */ 307 1, /* default number of rx groups */ 308 128, /* maximum number of tx queues */ 309 1, /* minimum number of tx queues */ 310 8, /* default number of tx queues */ 311 15500, /* maximum MTU size */ 312 0xFF8, /* maximum interrupt throttle rate */ 313 0, /* minimum interrupt throttle rate */ 314 200, /* default interrupt throttle rate */ 315 64, /* maximum total msix vectors */ 316 16, /* maximum number of ring vectors */ 317 2, /* maximum number of other vectors */ 318 (IXGBE_EICR_LSC 319 | IXGBE_EICR_GPI_SDP1_X540 320 | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */ 321 322 (IXGBE_SDP1_GPIEN_X540 323 | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */ 324 325 (IXGBE_FLAG_DCA_CAPABLE 326 | IXGBE_FLAG_RSS_CAPABLE 327 | IXGBE_FLAG_VMDQ_CAPABLE 328 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 329 }; 330 331 static adapter_info_t ixgbe_X550_cap = { 332 128, /* maximum number of rx queues */ 333 1, /* minimum number of rx queues */ 334 128, /* default number of rx queues */ 335 64, /* maximum number of rx groups */ 336 1, /* minimum number of rx groups */ 337 1, /* default number of rx groups */ 338 128, /* maximum number of tx queues */ 339 1, /* minimum number of tx queues */ 340 8, /* default number of tx queues */ 341 15500, /* maximum MTU size */ 342 0xFF8, /* maximum interrupt throttle rate */ 343 0, /* minimum interrupt throttle rate */ 344 0x200, /* default interrupt throttle rate */ 345 64, /* maximum total msix vectors */ 346 16, /* maximum number of ring vectors */ 347 2, /* maximum number of other vectors */ 348 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 349 0, /* "other" interrupt types enable mask */ 350 (IXGBE_FLAG_RSS_CAPABLE 351 | IXGBE_FLAG_VMDQ_CAPABLE 352 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 353 }; 354 355 /* 356 * Module Initialization Functions. 357 */ 358 359 int 360 _init(void) 361 { 362 int status; 363 364 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 365 366 status = mod_install(&ixgbe_modlinkage); 367 368 if (status != DDI_SUCCESS) { 369 mac_fini_ops(&ixgbe_dev_ops); 370 } 371 372 return (status); 373 } 374 375 int 376 _fini(void) 377 { 378 int status; 379 380 status = mod_remove(&ixgbe_modlinkage); 381 382 if (status == DDI_SUCCESS) { 383 mac_fini_ops(&ixgbe_dev_ops); 384 } 385 386 return (status); 387 } 388 389 int 390 _info(struct modinfo *modinfop) 391 { 392 int status; 393 394 status = mod_info(&ixgbe_modlinkage, modinfop); 395 396 return (status); 397 } 398 399 /* 400 * ixgbe_attach - Driver attach. 401 * 402 * This function is the device specific initialization entry 403 * point. This entry point is required and must be written. 404 * The DDI_ATTACH command must be provided in the attach entry 405 * point. When attach() is called with cmd set to DDI_ATTACH, 406 * all normal kernel services (such as kmem_alloc(9F)) are 407 * available for use by the driver. 408 * 409 * The attach() function will be called once for each instance 410 * of the device on the system with cmd set to DDI_ATTACH. 411 * Until attach() succeeds, the only driver entry points which 412 * may be called are open(9E) and getinfo(9E). 413 */ 414 static int 415 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 416 { 417 ixgbe_t *ixgbe; 418 struct ixgbe_osdep *osdep; 419 struct ixgbe_hw *hw; 420 int instance; 421 char taskqname[32]; 422 423 /* 424 * Check the command and perform corresponding operations 425 */ 426 switch (cmd) { 427 default: 428 return (DDI_FAILURE); 429 430 case DDI_RESUME: 431 return (ixgbe_resume(devinfo)); 432 433 case DDI_ATTACH: 434 break; 435 } 436 437 /* Get the device instance */ 438 instance = ddi_get_instance(devinfo); 439 440 /* Allocate memory for the instance data structure */ 441 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 442 443 ixgbe->dip = devinfo; 444 ixgbe->instance = instance; 445 446 hw = &ixgbe->hw; 447 osdep = &ixgbe->osdep; 448 hw->back = osdep; 449 osdep->ixgbe = ixgbe; 450 451 /* Attach the instance pointer to the dev_info data structure */ 452 ddi_set_driver_private(devinfo, ixgbe); 453 454 /* 455 * Initialize for FMA support 456 */ 457 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 458 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 459 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 460 ixgbe_fm_init(ixgbe); 461 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 462 463 /* 464 * Map PCI config space registers 465 */ 466 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 467 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 468 goto attach_fail; 469 } 470 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 471 472 /* 473 * Identify the chipset family 474 */ 475 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 476 ixgbe_error(ixgbe, "Failed to identify hardware"); 477 goto attach_fail; 478 } 479 480 /* 481 * Map device registers 482 */ 483 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 484 ixgbe_error(ixgbe, "Failed to map device registers"); 485 goto attach_fail; 486 } 487 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 488 489 /* 490 * Initialize driver parameters 491 */ 492 ixgbe_init_properties(ixgbe); 493 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 494 495 /* 496 * Register interrupt callback 497 */ 498 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) { 499 ixgbe_error(ixgbe, "Failed to register interrupt callback"); 500 goto attach_fail; 501 } 502 503 /* 504 * Allocate interrupts 505 */ 506 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 507 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 508 goto attach_fail; 509 } 510 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 511 512 /* 513 * Allocate rx/tx rings based on the ring numbers. 514 * The actual numbers of rx/tx rings are decided by the number of 515 * allocated interrupt vectors, so we should allocate the rings after 516 * interrupts are allocated. 517 */ 518 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 519 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 520 goto attach_fail; 521 } 522 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 523 524 /* 525 * Map rings to interrupt vectors 526 */ 527 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 528 ixgbe_error(ixgbe, "Failed to map interrupts to vectors"); 529 goto attach_fail; 530 } 531 532 /* 533 * Add interrupt handlers 534 */ 535 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 536 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 537 goto attach_fail; 538 } 539 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 540 541 /* 542 * Create a taskq for sfp-change 543 */ 544 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance); 545 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname, 546 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 547 ixgbe_error(ixgbe, "sfp_taskq create failed"); 548 goto attach_fail; 549 } 550 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ; 551 552 /* 553 * Create a taskq for over-temp 554 */ 555 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance); 556 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname, 557 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 558 ixgbe_error(ixgbe, "overtemp_taskq create failed"); 559 goto attach_fail; 560 } 561 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ; 562 563 /* 564 * Create a taskq for processing external PHY interrupts 565 */ 566 (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance); 567 if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname, 568 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 569 ixgbe_error(ixgbe, "phy_taskq create failed"); 570 goto attach_fail; 571 } 572 ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ; 573 574 /* 575 * Initialize driver parameters 576 */ 577 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 578 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 579 goto attach_fail; 580 } 581 582 /* 583 * Initialize mutexes for this device. 584 * Do this before enabling the interrupt handler and 585 * register the softint to avoid the condition where 586 * interrupt handler can try using uninitialized mutex. 587 */ 588 ixgbe_init_locks(ixgbe); 589 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 590 591 /* 592 * Initialize chipset hardware 593 */ 594 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 595 ixgbe_error(ixgbe, "Failed to initialize adapter"); 596 goto attach_fail; 597 } 598 ixgbe->link_check_complete = B_FALSE; 599 ixgbe->link_check_hrtime = gethrtime() + 600 (IXGBE_LINK_UP_TIME * 100000000ULL); 601 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 602 603 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 604 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 605 goto attach_fail; 606 } 607 608 /* 609 * Initialize adapter capabilities 610 */ 611 ixgbe_init_params(ixgbe); 612 613 /* 614 * Initialize statistics 615 */ 616 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 617 ixgbe_error(ixgbe, "Failed to initialize statistics"); 618 goto attach_fail; 619 } 620 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 621 622 /* 623 * Register the driver to the MAC 624 */ 625 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 626 ixgbe_error(ixgbe, "Failed to register MAC"); 627 goto attach_fail; 628 } 629 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 630 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 631 632 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe, 633 IXGBE_CYCLIC_PERIOD, DDI_IPL_0); 634 if (ixgbe->periodic_id == 0) { 635 ixgbe_error(ixgbe, "Failed to add the link check timer"); 636 goto attach_fail; 637 } 638 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER; 639 640 /* 641 * Now that mutex locks are initialized, and the chip is also 642 * initialized, enable interrupts. 643 */ 644 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 645 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 646 goto attach_fail; 647 } 648 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 649 650 ixgbe_log(ixgbe, "%s", ixgbe_ident); 651 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED); 652 653 return (DDI_SUCCESS); 654 655 attach_fail: 656 ixgbe_unconfigure(devinfo, ixgbe); 657 return (DDI_FAILURE); 658 } 659 660 /* 661 * ixgbe_detach - Driver detach. 662 * 663 * The detach() function is the complement of the attach routine. 664 * If cmd is set to DDI_DETACH, detach() is used to remove the 665 * state associated with a given instance of a device node 666 * prior to the removal of that instance from the system. 667 * 668 * The detach() function will be called once for each instance 669 * of the device for which there has been a successful attach() 670 * once there are no longer any opens on the device. 671 * 672 * Interrupts routine are disabled, All memory allocated by this 673 * driver are freed. 674 */ 675 static int 676 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 677 { 678 ixgbe_t *ixgbe; 679 680 /* 681 * Check detach command 682 */ 683 switch (cmd) { 684 default: 685 return (DDI_FAILURE); 686 687 case DDI_SUSPEND: 688 return (ixgbe_suspend(devinfo)); 689 690 case DDI_DETACH: 691 break; 692 } 693 694 /* 695 * Get the pointer to the driver private data structure 696 */ 697 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 698 if (ixgbe == NULL) 699 return (DDI_FAILURE); 700 701 /* 702 * If the device is still running, it needs to be stopped first. 703 * This check is necessary because under some specific circumstances, 704 * the detach routine can be called without stopping the interface 705 * first. 706 */ 707 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 708 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 709 mutex_enter(&ixgbe->gen_lock); 710 ixgbe_stop(ixgbe, B_TRUE); 711 mutex_exit(&ixgbe->gen_lock); 712 /* Disable and stop the watchdog timer */ 713 ixgbe_disable_watchdog_timer(ixgbe); 714 } 715 716 /* 717 * Check if there are still rx buffers held by the upper layer. 718 * If so, fail the detach. 719 */ 720 if (!ixgbe_rx_drain(ixgbe)) 721 return (DDI_FAILURE); 722 723 /* 724 * Do the remaining unconfigure routines 725 */ 726 ixgbe_unconfigure(devinfo, ixgbe); 727 728 return (DDI_SUCCESS); 729 } 730 731 /* 732 * quiesce(9E) entry point. 733 * 734 * This function is called when the system is single-threaded at high 735 * PIL with preemption disabled. Therefore, this function must not be 736 * blocked. 737 * 738 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 739 * DDI_FAILURE indicates an error condition and should almost never happen. 740 */ 741 static int 742 ixgbe_quiesce(dev_info_t *devinfo) 743 { 744 ixgbe_t *ixgbe; 745 struct ixgbe_hw *hw; 746 747 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 748 749 if (ixgbe == NULL) 750 return (DDI_FAILURE); 751 752 hw = &ixgbe->hw; 753 754 /* 755 * Disable the adapter interrupts 756 */ 757 ixgbe_disable_adapter_interrupts(ixgbe); 758 759 /* 760 * Tell firmware driver is no longer in control 761 */ 762 ixgbe_release_driver_control(hw); 763 764 /* 765 * Reset the chipset 766 */ 767 (void) ixgbe_reset_hw(hw); 768 769 /* 770 * Reset PHY 771 */ 772 (void) ixgbe_reset_phy(hw); 773 774 return (DDI_SUCCESS); 775 } 776 777 static void 778 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 779 { 780 /* 781 * Disable interrupt 782 */ 783 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 784 (void) ixgbe_disable_intrs(ixgbe); 785 } 786 787 /* 788 * remove the link check timer 789 */ 790 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) { 791 if (ixgbe->periodic_id != NULL) { 792 ddi_periodic_delete(ixgbe->periodic_id); 793 ixgbe->periodic_id = NULL; 794 } 795 } 796 797 /* 798 * Unregister MAC 799 */ 800 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 801 (void) mac_unregister(ixgbe->mac_hdl); 802 } 803 804 /* 805 * Free statistics 806 */ 807 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 808 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 809 } 810 811 /* 812 * Remove interrupt handlers 813 */ 814 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 815 ixgbe_rem_intr_handlers(ixgbe); 816 } 817 818 /* 819 * Remove taskq for sfp-status-change 820 */ 821 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) { 822 ddi_taskq_destroy(ixgbe->sfp_taskq); 823 } 824 825 /* 826 * Remove taskq for over-temp 827 */ 828 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) { 829 ddi_taskq_destroy(ixgbe->overtemp_taskq); 830 } 831 832 /* 833 * Remove taskq for external PHYs 834 */ 835 if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) { 836 ddi_taskq_destroy(ixgbe->phy_taskq); 837 } 838 839 /* 840 * Remove interrupts 841 */ 842 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 843 ixgbe_rem_intrs(ixgbe); 844 } 845 846 /* 847 * Unregister interrupt callback handler 848 */ 849 if (ixgbe->cb_hdl != NULL) { 850 (void) ddi_cb_unregister(ixgbe->cb_hdl); 851 } 852 853 /* 854 * Remove driver properties 855 */ 856 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 857 (void) ddi_prop_remove_all(devinfo); 858 } 859 860 /* 861 * Stop the chipset 862 */ 863 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 864 mutex_enter(&ixgbe->gen_lock); 865 ixgbe_chip_stop(ixgbe); 866 mutex_exit(&ixgbe->gen_lock); 867 } 868 869 /* 870 * Free register handle 871 */ 872 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 873 if (ixgbe->osdep.reg_handle != NULL) 874 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 875 } 876 877 /* 878 * Free PCI config handle 879 */ 880 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 881 if (ixgbe->osdep.cfg_handle != NULL) 882 pci_config_teardown(&ixgbe->osdep.cfg_handle); 883 } 884 885 /* 886 * Free locks 887 */ 888 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 889 ixgbe_destroy_locks(ixgbe); 890 } 891 892 /* 893 * Free the rx/tx rings 894 */ 895 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 896 ixgbe_free_rings(ixgbe); 897 } 898 899 /* 900 * Unregister FMA capabilities 901 */ 902 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 903 ixgbe_fm_fini(ixgbe); 904 } 905 906 /* 907 * Free the driver data structure 908 */ 909 kmem_free(ixgbe, sizeof (ixgbe_t)); 910 911 ddi_set_driver_private(devinfo, NULL); 912 } 913 914 /* 915 * ixgbe_register_mac - Register the driver and its function pointers with 916 * the GLD interface. 917 */ 918 static int 919 ixgbe_register_mac(ixgbe_t *ixgbe) 920 { 921 struct ixgbe_hw *hw = &ixgbe->hw; 922 mac_register_t *mac; 923 int status; 924 925 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 926 return (IXGBE_FAILURE); 927 928 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 929 mac->m_driver = ixgbe; 930 mac->m_dip = ixgbe->dip; 931 mac->m_src_addr = hw->mac.addr; 932 mac->m_callbacks = &ixgbe_m_callbacks; 933 mac->m_min_sdu = 0; 934 mac->m_max_sdu = ixgbe->default_mtu; 935 mac->m_margin = VLAN_TAGSZ; 936 mac->m_priv_props = ixgbe_priv_props; 937 mac->m_v12n = MAC_VIRT_LEVEL1; 938 939 status = mac_register(mac, &ixgbe->mac_hdl); 940 941 mac_free(mac); 942 943 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 944 } 945 946 /* 947 * ixgbe_identify_hardware - Identify the type of the chipset. 948 */ 949 static int 950 ixgbe_identify_hardware(ixgbe_t *ixgbe) 951 { 952 struct ixgbe_hw *hw = &ixgbe->hw; 953 struct ixgbe_osdep *osdep = &ixgbe->osdep; 954 955 /* 956 * Get the device id 957 */ 958 hw->vendor_id = 959 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 960 hw->device_id = 961 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 962 hw->revision_id = 963 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 964 hw->subsystem_device_id = 965 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 966 hw->subsystem_vendor_id = 967 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 968 969 /* 970 * Set the mac type of the adapter based on the device id 971 */ 972 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) { 973 return (IXGBE_FAILURE); 974 } 975 976 /* 977 * Install adapter capabilities 978 */ 979 switch (hw->mac.type) { 980 case ixgbe_mac_82598EB: 981 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n"); 982 ixgbe->capab = &ixgbe_82598eb_cap; 983 984 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { 985 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 986 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1; 987 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN; 988 } 989 break; 990 991 case ixgbe_mac_82599EB: 992 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n"); 993 ixgbe->capab = &ixgbe_82599eb_cap; 994 995 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) { 996 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE; 997 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0; 998 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN; 999 } 1000 break; 1001 1002 case ixgbe_mac_X540: 1003 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n"); 1004 ixgbe->capab = &ixgbe_X540_cap; 1005 /* 1006 * For now, X540 is all set in its capab structure. 1007 * As other X540 variants show up, things can change here. 1008 */ 1009 break; 1010 1011 case ixgbe_mac_X550: 1012 case ixgbe_mac_X550EM_x: 1013 IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n"); 1014 ixgbe->capab = &ixgbe_X550_cap; 1015 1016 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) 1017 ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE; 1018 1019 /* 1020 * Link detection on X552 SFP+ and X552/X557-AT 1021 */ 1022 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 1023 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 1024 ixgbe->capab->other_intr |= 1025 IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 1026 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540; 1027 } 1028 break; 1029 1030 default: 1031 IXGBE_DEBUGLOG_1(ixgbe, 1032 "adapter not supported in ixgbe_identify_hardware(): %d\n", 1033 hw->mac.type); 1034 return (IXGBE_FAILURE); 1035 } 1036 1037 return (IXGBE_SUCCESS); 1038 } 1039 1040 /* 1041 * ixgbe_regs_map - Map the device registers. 1042 * 1043 */ 1044 static int 1045 ixgbe_regs_map(ixgbe_t *ixgbe) 1046 { 1047 dev_info_t *devinfo = ixgbe->dip; 1048 struct ixgbe_hw *hw = &ixgbe->hw; 1049 struct ixgbe_osdep *osdep = &ixgbe->osdep; 1050 off_t mem_size; 1051 1052 /* 1053 * First get the size of device registers to be mapped. 1054 */ 1055 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size) 1056 != DDI_SUCCESS) { 1057 return (IXGBE_FAILURE); 1058 } 1059 1060 /* 1061 * Call ddi_regs_map_setup() to map registers 1062 */ 1063 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET, 1064 (caddr_t *)&hw->hw_addr, 0, 1065 mem_size, &ixgbe_regs_acc_attr, 1066 &osdep->reg_handle)) != DDI_SUCCESS) { 1067 return (IXGBE_FAILURE); 1068 } 1069 1070 return (IXGBE_SUCCESS); 1071 } 1072 1073 /* 1074 * ixgbe_init_properties - Initialize driver properties. 1075 */ 1076 static void 1077 ixgbe_init_properties(ixgbe_t *ixgbe) 1078 { 1079 /* 1080 * Get conf file properties, including link settings 1081 * jumbo frames, ring number, descriptor number, etc. 1082 */ 1083 ixgbe_get_conf(ixgbe); 1084 } 1085 1086 /* 1087 * ixgbe_init_driver_settings - Initialize driver settings. 1088 * 1089 * The settings include hardware function pointers, bus information, 1090 * rx/tx rings settings, link state, and any other parameters that 1091 * need to be setup during driver initialization. 1092 */ 1093 static int 1094 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 1095 { 1096 struct ixgbe_hw *hw = &ixgbe->hw; 1097 dev_info_t *devinfo = ixgbe->dip; 1098 ixgbe_rx_ring_t *rx_ring; 1099 ixgbe_rx_group_t *rx_group; 1100 ixgbe_tx_ring_t *tx_ring; 1101 uint32_t rx_size; 1102 uint32_t tx_size; 1103 uint32_t ring_per_group; 1104 int i; 1105 1106 /* 1107 * Initialize chipset specific hardware function pointers 1108 */ 1109 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 1110 return (IXGBE_FAILURE); 1111 } 1112 1113 /* 1114 * Get the system page size 1115 */ 1116 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1); 1117 1118 /* 1119 * Set rx buffer size 1120 * 1121 * The IP header alignment room is counted in the calculation. 1122 * The rx buffer size is in unit of 1K that is required by the 1123 * chipset hardware. 1124 */ 1125 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 1126 ixgbe->rx_buf_size = ((rx_size >> 10) + 1127 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1128 1129 /* 1130 * Set tx buffer size 1131 */ 1132 tx_size = ixgbe->max_frame_size; 1133 ixgbe->tx_buf_size = ((tx_size >> 10) + 1134 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1135 1136 /* 1137 * Initialize rx/tx rings/groups parameters 1138 */ 1139 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 1140 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1141 rx_ring = &ixgbe->rx_rings[i]; 1142 rx_ring->index = i; 1143 rx_ring->ixgbe = ixgbe; 1144 rx_ring->group_index = i / ring_per_group; 1145 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i); 1146 } 1147 1148 for (i = 0; i < ixgbe->num_rx_groups; i++) { 1149 rx_group = &ixgbe->rx_groups[i]; 1150 rx_group->index = i; 1151 rx_group->ixgbe = ixgbe; 1152 } 1153 1154 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1155 tx_ring = &ixgbe->tx_rings[i]; 1156 tx_ring->index = i; 1157 tx_ring->ixgbe = ixgbe; 1158 if (ixgbe->tx_head_wb_enable) 1159 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 1160 else 1161 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 1162 1163 tx_ring->ring_size = ixgbe->tx_ring_size; 1164 tx_ring->free_list_size = ixgbe->tx_ring_size + 1165 (ixgbe->tx_ring_size >> 1); 1166 } 1167 1168 /* 1169 * Initialize values of interrupt throttling rate 1170 */ 1171 for (i = 1; i < MAX_INTR_VECTOR; i++) 1172 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 1173 1174 /* 1175 * The initial link state should be "unknown" 1176 */ 1177 ixgbe->link_state = LINK_STATE_UNKNOWN; 1178 1179 return (IXGBE_SUCCESS); 1180 } 1181 1182 /* 1183 * ixgbe_init_locks - Initialize locks. 1184 */ 1185 static void 1186 ixgbe_init_locks(ixgbe_t *ixgbe) 1187 { 1188 ixgbe_rx_ring_t *rx_ring; 1189 ixgbe_tx_ring_t *tx_ring; 1190 int i; 1191 1192 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1193 rx_ring = &ixgbe->rx_rings[i]; 1194 mutex_init(&rx_ring->rx_lock, NULL, 1195 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1196 } 1197 1198 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1199 tx_ring = &ixgbe->tx_rings[i]; 1200 mutex_init(&tx_ring->tx_lock, NULL, 1201 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1202 mutex_init(&tx_ring->recycle_lock, NULL, 1203 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1204 mutex_init(&tx_ring->tcb_head_lock, NULL, 1205 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1206 mutex_init(&tx_ring->tcb_tail_lock, NULL, 1207 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1208 } 1209 1210 mutex_init(&ixgbe->gen_lock, NULL, 1211 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1212 1213 mutex_init(&ixgbe->watchdog_lock, NULL, 1214 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1215 } 1216 1217 /* 1218 * ixgbe_destroy_locks - Destroy locks. 1219 */ 1220 static void 1221 ixgbe_destroy_locks(ixgbe_t *ixgbe) 1222 { 1223 ixgbe_rx_ring_t *rx_ring; 1224 ixgbe_tx_ring_t *tx_ring; 1225 int i; 1226 1227 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1228 rx_ring = &ixgbe->rx_rings[i]; 1229 mutex_destroy(&rx_ring->rx_lock); 1230 } 1231 1232 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1233 tx_ring = &ixgbe->tx_rings[i]; 1234 mutex_destroy(&tx_ring->tx_lock); 1235 mutex_destroy(&tx_ring->recycle_lock); 1236 mutex_destroy(&tx_ring->tcb_head_lock); 1237 mutex_destroy(&tx_ring->tcb_tail_lock); 1238 } 1239 1240 mutex_destroy(&ixgbe->gen_lock); 1241 mutex_destroy(&ixgbe->watchdog_lock); 1242 } 1243 1244 /* 1245 * We need to try and determine which LED index in hardware corresponds to the 1246 * link/activity LED. This is the one that'll be overwritten when we perform 1247 * GLDv3 LED activity. 1248 */ 1249 static void 1250 ixgbe_led_init(ixgbe_t *ixgbe) 1251 { 1252 uint32_t reg, i; 1253 struct ixgbe_hw *hw = &ixgbe->hw; 1254 1255 reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1256 for (i = 0; i < 4; i++) { 1257 if (((reg >> IXGBE_LED_MODE_SHIFT(i)) & 1258 IXGBE_LED_MODE_MASK_BASE) == IXGBE_LED_LINK_ACTIVE) { 1259 ixgbe->ixgbe_led_index = i; 1260 return; 1261 } 1262 } 1263 1264 /* 1265 * If we couldn't determine this, we use the default for various MACs 1266 * based on information Intel has inserted into other drivers over the 1267 * years. Note, when we have support for the X553 which should add the 1268 * ixgbe_x550_em_a mac type, that should be at index 0. 1269 */ 1270 switch (hw->mac.type) { 1271 case ixgbe_mac_X550EM_x: 1272 ixgbe->ixgbe_led_index = 1; 1273 break; 1274 default: 1275 ixgbe->ixgbe_led_index = 2; 1276 break; 1277 } 1278 } 1279 1280 static int 1281 ixgbe_resume(dev_info_t *devinfo) 1282 { 1283 ixgbe_t *ixgbe; 1284 int i; 1285 1286 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1287 if (ixgbe == NULL) 1288 return (DDI_FAILURE); 1289 1290 mutex_enter(&ixgbe->gen_lock); 1291 1292 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1293 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1294 mutex_exit(&ixgbe->gen_lock); 1295 return (DDI_FAILURE); 1296 } 1297 1298 /* 1299 * Enable and start the watchdog timer 1300 */ 1301 ixgbe_enable_watchdog_timer(ixgbe); 1302 } 1303 1304 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED); 1305 1306 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1307 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1308 mac_tx_ring_update(ixgbe->mac_hdl, 1309 ixgbe->tx_rings[i].ring_handle); 1310 } 1311 } 1312 1313 mutex_exit(&ixgbe->gen_lock); 1314 1315 return (DDI_SUCCESS); 1316 } 1317 1318 static int 1319 ixgbe_suspend(dev_info_t *devinfo) 1320 { 1321 ixgbe_t *ixgbe; 1322 1323 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1324 if (ixgbe == NULL) 1325 return (DDI_FAILURE); 1326 1327 mutex_enter(&ixgbe->gen_lock); 1328 1329 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED); 1330 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1331 mutex_exit(&ixgbe->gen_lock); 1332 return (DDI_SUCCESS); 1333 } 1334 ixgbe_stop(ixgbe, B_FALSE); 1335 1336 mutex_exit(&ixgbe->gen_lock); 1337 1338 /* 1339 * Disable and stop the watchdog timer 1340 */ 1341 ixgbe_disable_watchdog_timer(ixgbe); 1342 1343 return (DDI_SUCCESS); 1344 } 1345 1346 /* 1347 * ixgbe_init - Initialize the device. 1348 */ 1349 static int 1350 ixgbe_init(ixgbe_t *ixgbe) 1351 { 1352 struct ixgbe_hw *hw = &ixgbe->hw; 1353 u8 pbanum[IXGBE_PBANUM_LENGTH]; 1354 int rv; 1355 1356 mutex_enter(&ixgbe->gen_lock); 1357 1358 /* 1359 * Configure/Initialize hardware 1360 */ 1361 rv = ixgbe_init_hw(hw); 1362 if (rv != IXGBE_SUCCESS) { 1363 switch (rv) { 1364 1365 /* 1366 * The first three errors are not prohibitive to us progressing 1367 * further, and are maily advisory in nature. In the case of a 1368 * SFP module not being present or not deemed supported by the 1369 * common code, we adivse the operator of this fact but carry on 1370 * instead of failing hard, as SFPs can be inserted or replaced 1371 * while the driver is running. In the case of a unknown error, 1372 * we fail-hard, logging the reason and emitting a FMA event. 1373 */ 1374 case IXGBE_ERR_EEPROM_VERSION: 1375 ixgbe_error(ixgbe, 1376 "This Intel 10Gb Ethernet device is pre-release and" 1377 " contains outdated firmware. Please contact your" 1378 " hardware vendor for a replacement."); 1379 break; 1380 case IXGBE_ERR_SFP_NOT_PRESENT: 1381 ixgbe_error(ixgbe, 1382 "No SFP+ module detected on this interface. Please " 1383 "install a supported SFP+ module for this " 1384 "interface to become operational."); 1385 break; 1386 case IXGBE_ERR_SFP_NOT_SUPPORTED: 1387 ixgbe_error(ixgbe, 1388 "Unsupported SFP+ module detected. Please replace " 1389 "it with a supported SFP+ module per Intel " 1390 "documentation, or bypass this check with " 1391 "allow_unsupported_sfp=1 in ixgbe.conf."); 1392 break; 1393 default: 1394 ixgbe_error(ixgbe, 1395 "Failed to initialize hardware. ixgbe_init_hw " 1396 "returned %d", rv); 1397 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1398 goto init_fail; 1399 } 1400 } 1401 1402 /* 1403 * Need to init eeprom before validating the checksum. 1404 */ 1405 if (ixgbe_init_eeprom_params(hw) < 0) { 1406 ixgbe_error(ixgbe, 1407 "Unable to intitialize the eeprom interface."); 1408 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1409 goto init_fail; 1410 } 1411 1412 /* 1413 * NVM validation 1414 */ 1415 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1416 /* 1417 * Some PCI-E parts fail the first check due to 1418 * the link being in sleep state. Call it again, 1419 * if it fails a second time it's a real issue. 1420 */ 1421 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1422 ixgbe_error(ixgbe, 1423 "Invalid NVM checksum. Please contact " 1424 "the vendor to update the NVM."); 1425 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1426 goto init_fail; 1427 } 1428 } 1429 1430 /* 1431 * Setup default flow control thresholds - enable/disable 1432 * & flow control type is controlled by ixgbe.conf 1433 */ 1434 hw->fc.high_water[0] = DEFAULT_FCRTH; 1435 hw->fc.low_water[0] = DEFAULT_FCRTL; 1436 hw->fc.pause_time = DEFAULT_FCPAUSE; 1437 hw->fc.send_xon = B_TRUE; 1438 1439 /* 1440 * Initialize flow control 1441 */ 1442 (void) ixgbe_start_hw(hw); 1443 1444 /* 1445 * Initialize link settings 1446 */ 1447 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1448 1449 /* 1450 * Initialize the chipset hardware 1451 */ 1452 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1453 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1454 goto init_fail; 1455 } 1456 1457 /* 1458 * Read identifying information and place in devinfo. 1459 */ 1460 pbanum[0] = '\0'; 1461 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum)); 1462 if (*pbanum != '\0') { 1463 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip, 1464 "printed-board-assembly", (char *)pbanum); 1465 } 1466 1467 /* 1468 * Determine LED index. 1469 */ 1470 ixgbe_led_init(ixgbe); 1471 1472 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1473 goto init_fail; 1474 } 1475 1476 mutex_exit(&ixgbe->gen_lock); 1477 return (IXGBE_SUCCESS); 1478 1479 init_fail: 1480 /* 1481 * Reset PHY 1482 */ 1483 (void) ixgbe_reset_phy(hw); 1484 1485 mutex_exit(&ixgbe->gen_lock); 1486 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1487 return (IXGBE_FAILURE); 1488 } 1489 1490 /* 1491 * ixgbe_chip_start - Initialize and start the chipset hardware. 1492 */ 1493 static int 1494 ixgbe_chip_start(ixgbe_t *ixgbe) 1495 { 1496 struct ixgbe_hw *hw = &ixgbe->hw; 1497 int i; 1498 1499 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1500 1501 /* 1502 * Get the mac address 1503 * This function should handle SPARC case correctly. 1504 */ 1505 if (!ixgbe_find_mac_address(ixgbe)) { 1506 ixgbe_error(ixgbe, "Failed to get the mac address"); 1507 return (IXGBE_FAILURE); 1508 } 1509 1510 /* 1511 * Validate the mac address 1512 */ 1513 (void) ixgbe_init_rx_addrs(hw); 1514 if (!is_valid_mac_addr(hw->mac.addr)) { 1515 ixgbe_error(ixgbe, "Invalid mac address"); 1516 return (IXGBE_FAILURE); 1517 } 1518 1519 /* 1520 * Re-enable relaxed ordering for performance. It is disabled 1521 * by default in the hardware init. 1522 */ 1523 if (ixgbe->relax_order_enable == B_TRUE) 1524 ixgbe_enable_relaxed_ordering(hw); 1525 1526 /* 1527 * Setup adapter interrupt vectors 1528 */ 1529 ixgbe_setup_adapter_vector(ixgbe); 1530 1531 /* 1532 * Initialize unicast addresses. 1533 */ 1534 ixgbe_init_unicst(ixgbe); 1535 1536 /* 1537 * Setup and initialize the mctable structures. 1538 */ 1539 ixgbe_setup_multicst(ixgbe); 1540 1541 /* 1542 * Set interrupt throttling rate 1543 */ 1544 for (i = 0; i < ixgbe->intr_cnt; i++) { 1545 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1546 } 1547 1548 /* 1549 * Disable Wake-on-LAN 1550 */ 1551 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 1552 1553 /* 1554 * Some adapters offer Energy Efficient Ethernet (EEE) support. 1555 * Due to issues with EEE in e1000g/igb, we disable this by default 1556 * as a precautionary measure. 1557 * 1558 * Currently, the only known adapter which supports EEE in the ixgbe 1559 * line is 8086,15AB (IXGBE_DEV_ID_X550EM_X_KR), and only after the 1560 * first revision of it, as well as any X550 with MAC type 6 (non-EM) 1561 */ 1562 (void) ixgbe_setup_eee(hw, B_FALSE); 1563 1564 /* 1565 * Turn on any present SFP Tx laser 1566 */ 1567 ixgbe_enable_tx_laser(hw); 1568 1569 /* 1570 * Power on the PHY 1571 */ 1572 (void) ixgbe_set_phy_power(hw, B_TRUE); 1573 1574 /* 1575 * Save the state of the PHY 1576 */ 1577 ixgbe_get_hw_state(ixgbe); 1578 1579 /* 1580 * Make sure driver has control 1581 */ 1582 ixgbe_get_driver_control(hw); 1583 1584 return (IXGBE_SUCCESS); 1585 } 1586 1587 /* 1588 * ixgbe_chip_stop - Stop the chipset hardware 1589 */ 1590 static void 1591 ixgbe_chip_stop(ixgbe_t *ixgbe) 1592 { 1593 struct ixgbe_hw *hw = &ixgbe->hw; 1594 int rv; 1595 1596 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1597 1598 /* 1599 * Stop interupt generation and disable Tx unit 1600 */ 1601 hw->adapter_stopped = B_FALSE; 1602 (void) ixgbe_stop_adapter(hw); 1603 1604 /* 1605 * Reset the chipset 1606 */ 1607 (void) ixgbe_reset_hw(hw); 1608 1609 /* 1610 * Reset PHY 1611 */ 1612 (void) ixgbe_reset_phy(hw); 1613 1614 /* 1615 * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting 1616 * the PHY while doing so. Else, just power down the PHY. 1617 */ 1618 if (hw->phy.ops.enter_lplu != NULL) { 1619 hw->phy.reset_disable = B_TRUE; 1620 rv = hw->phy.ops.enter_lplu(hw); 1621 if (rv != IXGBE_SUCCESS) 1622 ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv); 1623 hw->phy.reset_disable = B_FALSE; 1624 } else { 1625 (void) ixgbe_set_phy_power(hw, B_FALSE); 1626 } 1627 1628 /* 1629 * Turn off any present SFP Tx laser 1630 * Expected for health and safety reasons 1631 */ 1632 ixgbe_disable_tx_laser(hw); 1633 1634 /* 1635 * Tell firmware driver is no longer in control 1636 */ 1637 ixgbe_release_driver_control(hw); 1638 1639 } 1640 1641 /* 1642 * ixgbe_reset - Reset the chipset and re-start the driver. 1643 * 1644 * It involves stopping and re-starting the chipset, 1645 * and re-configuring the rx/tx rings. 1646 */ 1647 static int 1648 ixgbe_reset(ixgbe_t *ixgbe) 1649 { 1650 int i; 1651 1652 /* 1653 * Disable and stop the watchdog timer 1654 */ 1655 ixgbe_disable_watchdog_timer(ixgbe); 1656 1657 mutex_enter(&ixgbe->gen_lock); 1658 1659 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1660 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 1661 1662 ixgbe_stop(ixgbe, B_FALSE); 1663 1664 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1665 mutex_exit(&ixgbe->gen_lock); 1666 return (IXGBE_FAILURE); 1667 } 1668 1669 /* 1670 * After resetting, need to recheck the link status. 1671 */ 1672 ixgbe->link_check_complete = B_FALSE; 1673 ixgbe->link_check_hrtime = gethrtime() + 1674 (IXGBE_LINK_UP_TIME * 100000000ULL); 1675 1676 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED); 1677 1678 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) { 1679 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1680 mac_tx_ring_update(ixgbe->mac_hdl, 1681 ixgbe->tx_rings[i].ring_handle); 1682 } 1683 } 1684 1685 mutex_exit(&ixgbe->gen_lock); 1686 1687 /* 1688 * Enable and start the watchdog timer 1689 */ 1690 ixgbe_enable_watchdog_timer(ixgbe); 1691 1692 return (IXGBE_SUCCESS); 1693 } 1694 1695 /* 1696 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1697 */ 1698 static void 1699 ixgbe_tx_clean(ixgbe_t *ixgbe) 1700 { 1701 ixgbe_tx_ring_t *tx_ring; 1702 tx_control_block_t *tcb; 1703 link_list_t pending_list; 1704 uint32_t desc_num; 1705 int i, j; 1706 1707 LINK_LIST_INIT(&pending_list); 1708 1709 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1710 tx_ring = &ixgbe->tx_rings[i]; 1711 1712 mutex_enter(&tx_ring->recycle_lock); 1713 1714 /* 1715 * Clean the pending tx data - the pending packets in the 1716 * work_list that have no chances to be transmitted again. 1717 * 1718 * We must ensure the chipset is stopped or the link is down 1719 * before cleaning the transmit packets. 1720 */ 1721 desc_num = 0; 1722 for (j = 0; j < tx_ring->ring_size; j++) { 1723 tcb = tx_ring->work_list[j]; 1724 if (tcb != NULL) { 1725 desc_num += tcb->desc_num; 1726 1727 tx_ring->work_list[j] = NULL; 1728 1729 ixgbe_free_tcb(tcb); 1730 1731 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1732 } 1733 } 1734 1735 if (desc_num > 0) { 1736 atomic_add_32(&tx_ring->tbd_free, desc_num); 1737 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1738 1739 /* 1740 * Reset the head and tail pointers of the tbd ring; 1741 * Reset the writeback head if it's enable. 1742 */ 1743 tx_ring->tbd_head = 0; 1744 tx_ring->tbd_tail = 0; 1745 if (ixgbe->tx_head_wb_enable) 1746 *tx_ring->tbd_head_wb = 0; 1747 1748 IXGBE_WRITE_REG(&ixgbe->hw, 1749 IXGBE_TDH(tx_ring->index), 0); 1750 IXGBE_WRITE_REG(&ixgbe->hw, 1751 IXGBE_TDT(tx_ring->index), 0); 1752 } 1753 1754 mutex_exit(&tx_ring->recycle_lock); 1755 1756 /* 1757 * Add the tx control blocks in the pending list to 1758 * the free list. 1759 */ 1760 ixgbe_put_free_list(tx_ring, &pending_list); 1761 } 1762 } 1763 1764 /* 1765 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1766 * transmitted. 1767 */ 1768 static boolean_t 1769 ixgbe_tx_drain(ixgbe_t *ixgbe) 1770 { 1771 ixgbe_tx_ring_t *tx_ring; 1772 boolean_t done; 1773 int i, j; 1774 1775 /* 1776 * Wait for a specific time to allow pending tx packets 1777 * to be transmitted. 1778 * 1779 * Check the counter tbd_free to see if transmission is done. 1780 * No lock protection is needed here. 1781 * 1782 * Return B_TRUE if all pending packets have been transmitted; 1783 * Otherwise return B_FALSE; 1784 */ 1785 for (i = 0; i < TX_DRAIN_TIME; i++) { 1786 1787 done = B_TRUE; 1788 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1789 tx_ring = &ixgbe->tx_rings[j]; 1790 done = done && 1791 (tx_ring->tbd_free == tx_ring->ring_size); 1792 } 1793 1794 if (done) 1795 break; 1796 1797 msec_delay(1); 1798 } 1799 1800 return (done); 1801 } 1802 1803 /* 1804 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1805 */ 1806 static boolean_t 1807 ixgbe_rx_drain(ixgbe_t *ixgbe) 1808 { 1809 boolean_t done = B_TRUE; 1810 int i; 1811 1812 /* 1813 * Polling the rx free list to check if those rx buffers held by 1814 * the upper layer are released. 1815 * 1816 * Check the counter rcb_free to see if all pending buffers are 1817 * released. No lock protection is needed here. 1818 * 1819 * Return B_TRUE if all pending buffers have been released; 1820 * Otherwise return B_FALSE; 1821 */ 1822 for (i = 0; i < RX_DRAIN_TIME; i++) { 1823 done = (ixgbe->rcb_pending == 0); 1824 1825 if (done) 1826 break; 1827 1828 msec_delay(1); 1829 } 1830 1831 return (done); 1832 } 1833 1834 /* 1835 * ixgbe_start - Start the driver/chipset. 1836 */ 1837 int 1838 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer) 1839 { 1840 struct ixgbe_hw *hw = &ixgbe->hw; 1841 int i; 1842 1843 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1844 1845 if (alloc_buffer) { 1846 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) { 1847 ixgbe_error(ixgbe, 1848 "Failed to allocate software receive rings"); 1849 return (IXGBE_FAILURE); 1850 } 1851 1852 /* Allocate buffers for all the rx/tx rings */ 1853 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) { 1854 ixgbe_error(ixgbe, "Failed to allocate DMA resource"); 1855 return (IXGBE_FAILURE); 1856 } 1857 1858 ixgbe->tx_ring_init = B_TRUE; 1859 } else { 1860 ixgbe->tx_ring_init = B_FALSE; 1861 } 1862 1863 for (i = 0; i < ixgbe->num_rx_rings; i++) 1864 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1865 for (i = 0; i < ixgbe->num_tx_rings; i++) 1866 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1867 1868 /* 1869 * Start the chipset hardware 1870 */ 1871 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1872 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1873 goto start_failure; 1874 } 1875 1876 /* 1877 * Configure link now for X550 1878 * 1879 * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the 1880 * resting state of the adapter at a 1Gb FDX speed. Prior to the X550, 1881 * the resting state of the link would be the maximum speed that 1882 * autonegotiation will allow (usually 10Gb, infrastructure allowing) 1883 * so we never bothered with explicitly setting the link to 10Gb as it 1884 * would already be at that state on driver attach. With X550, we must 1885 * trigger a re-negotiation of the link in order to switch from a LPLU 1886 * 1Gb link to 10Gb (cable and link partner permitting.) 1887 */ 1888 if (hw->mac.type == ixgbe_mac_X550 || 1889 hw->mac.type == ixgbe_mac_X550EM_x) { 1890 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE); 1891 ixgbe_get_hw_state(ixgbe); 1892 } 1893 1894 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1895 goto start_failure; 1896 } 1897 1898 /* 1899 * Setup the rx/tx rings 1900 */ 1901 ixgbe_setup_rings(ixgbe); 1902 1903 /* 1904 * ixgbe_start() will be called when resetting, however if reset 1905 * happens, we need to clear the ERROR, STALL and OVERTEMP flags 1906 * before enabling the interrupts. 1907 */ 1908 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR 1909 | IXGBE_STALL| IXGBE_OVERTEMP)); 1910 1911 /* 1912 * Enable adapter interrupts 1913 * The interrupts must be enabled after the driver state is START 1914 */ 1915 ixgbe_enable_adapter_interrupts(ixgbe); 1916 1917 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1918 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1919 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1920 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1921 1922 return (IXGBE_SUCCESS); 1923 1924 start_failure: 1925 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1926 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1927 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1928 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1929 1930 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1931 1932 return (IXGBE_FAILURE); 1933 } 1934 1935 /* 1936 * ixgbe_stop - Stop the driver/chipset. 1937 */ 1938 void 1939 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer) 1940 { 1941 int i; 1942 1943 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1944 1945 /* 1946 * Disable the adapter interrupts 1947 */ 1948 ixgbe_disable_adapter_interrupts(ixgbe); 1949 1950 /* 1951 * Drain the pending tx packets 1952 */ 1953 (void) ixgbe_tx_drain(ixgbe); 1954 1955 for (i = 0; i < ixgbe->num_rx_rings; i++) 1956 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1957 for (i = 0; i < ixgbe->num_tx_rings; i++) 1958 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1959 1960 /* 1961 * Stop the chipset hardware 1962 */ 1963 ixgbe_chip_stop(ixgbe); 1964 1965 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1966 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1967 } 1968 1969 /* 1970 * Clean the pending tx data/resources 1971 */ 1972 ixgbe_tx_clean(ixgbe); 1973 1974 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1975 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1976 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1977 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1978 1979 if (ixgbe->link_state == LINK_STATE_UP) { 1980 ixgbe->link_state = LINK_STATE_UNKNOWN; 1981 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 1982 } 1983 1984 if (free_buffer) { 1985 /* 1986 * Release the DMA/memory resources of rx/tx rings 1987 */ 1988 ixgbe_free_dma(ixgbe); 1989 ixgbe_free_rx_data(ixgbe); 1990 } 1991 } 1992 1993 /* 1994 * ixgbe_cbfunc - Driver interface for generic DDI callbacks 1995 */ 1996 /* ARGSUSED */ 1997 static int 1998 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg, 1999 void *arg1, void *arg2) 2000 { 2001 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 2002 2003 switch (cbaction) { 2004 /* IRM callback */ 2005 int count; 2006 case DDI_CB_INTR_ADD: 2007 case DDI_CB_INTR_REMOVE: 2008 count = (int)(uintptr_t)cbarg; 2009 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX); 2010 DTRACE_PROBE2(ixgbe__irm__callback, int, count, 2011 int, ixgbe->intr_cnt); 2012 if (ixgbe_intr_adjust(ixgbe, cbaction, count) != 2013 DDI_SUCCESS) { 2014 ixgbe_error(ixgbe, 2015 "IRM CB: Failed to adjust interrupts"); 2016 goto cb_fail; 2017 } 2018 break; 2019 default: 2020 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported", 2021 cbaction); 2022 return (DDI_ENOTSUP); 2023 } 2024 return (DDI_SUCCESS); 2025 cb_fail: 2026 return (DDI_FAILURE); 2027 } 2028 2029 /* 2030 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request. 2031 */ 2032 static int 2033 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count) 2034 { 2035 int i, rc, actual; 2036 2037 if (count == 0) 2038 return (DDI_SUCCESS); 2039 2040 if ((cbaction == DDI_CB_INTR_ADD && 2041 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) || 2042 (cbaction == DDI_CB_INTR_REMOVE && 2043 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)) 2044 return (DDI_FAILURE); 2045 2046 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 2047 return (DDI_FAILURE); 2048 } 2049 2050 for (i = 0; i < ixgbe->num_rx_rings; i++) 2051 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL); 2052 for (i = 0; i < ixgbe->num_tx_rings; i++) 2053 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL); 2054 2055 mutex_enter(&ixgbe->gen_lock); 2056 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 2057 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST; 2058 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 2059 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 2060 2061 ixgbe_stop(ixgbe, B_FALSE); 2062 /* 2063 * Disable interrupts 2064 */ 2065 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 2066 rc = ixgbe_disable_intrs(ixgbe); 2067 ASSERT(rc == IXGBE_SUCCESS); 2068 } 2069 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR; 2070 2071 /* 2072 * Remove interrupt handlers 2073 */ 2074 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 2075 ixgbe_rem_intr_handlers(ixgbe); 2076 } 2077 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR; 2078 2079 /* 2080 * Clear vect_map 2081 */ 2082 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 2083 switch (cbaction) { 2084 case DDI_CB_INTR_ADD: 2085 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable, 2086 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual, 2087 DDI_INTR_ALLOC_NORMAL); 2088 if (rc != DDI_SUCCESS || actual != count) { 2089 ixgbe_log(ixgbe, "Adjust interrupts failed." 2090 "return: %d, irm cb size: %d, actual: %d", 2091 rc, count, actual); 2092 goto intr_adjust_fail; 2093 } 2094 ixgbe->intr_cnt += count; 2095 break; 2096 2097 case DDI_CB_INTR_REMOVE: 2098 for (i = ixgbe->intr_cnt - count; 2099 i < ixgbe->intr_cnt; i ++) { 2100 rc = ddi_intr_free(ixgbe->htable[i]); 2101 ixgbe->htable[i] = NULL; 2102 if (rc != DDI_SUCCESS) { 2103 ixgbe_log(ixgbe, "Adjust interrupts failed." 2104 "return: %d, irm cb size: %d, actual: %d", 2105 rc, count, actual); 2106 goto intr_adjust_fail; 2107 } 2108 } 2109 ixgbe->intr_cnt -= count; 2110 break; 2111 } 2112 2113 /* 2114 * Get priority for first vector, assume remaining are all the same 2115 */ 2116 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 2117 if (rc != DDI_SUCCESS) { 2118 ixgbe_log(ixgbe, 2119 "Get interrupt priority failed: %d", rc); 2120 goto intr_adjust_fail; 2121 } 2122 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 2123 if (rc != DDI_SUCCESS) { 2124 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc); 2125 goto intr_adjust_fail; 2126 } 2127 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 2128 2129 /* 2130 * Map rings to interrupt vectors 2131 */ 2132 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 2133 ixgbe_error(ixgbe, 2134 "IRM CB: Failed to map interrupts to vectors"); 2135 goto intr_adjust_fail; 2136 } 2137 2138 /* 2139 * Add interrupt handlers 2140 */ 2141 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 2142 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers"); 2143 goto intr_adjust_fail; 2144 } 2145 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 2146 2147 /* 2148 * Now that mutex locks are initialized, and the chip is also 2149 * initialized, enable interrupts. 2150 */ 2151 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 2152 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts"); 2153 goto intr_adjust_fail; 2154 } 2155 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 2156 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 2157 ixgbe_error(ixgbe, "IRM CB: Failed to start"); 2158 goto intr_adjust_fail; 2159 } 2160 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST; 2161 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 2162 ixgbe->ixgbe_state |= IXGBE_STARTED; 2163 mutex_exit(&ixgbe->gen_lock); 2164 2165 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2166 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, 2167 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]); 2168 } 2169 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2170 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, 2171 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]); 2172 } 2173 2174 /* Wakeup all Tx rings */ 2175 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2176 mac_tx_ring_update(ixgbe->mac_hdl, 2177 ixgbe->tx_rings[i].ring_handle); 2178 } 2179 2180 IXGBE_DEBUGLOG_3(ixgbe, 2181 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).", 2182 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max); 2183 return (DDI_SUCCESS); 2184 2185 intr_adjust_fail: 2186 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 2187 mutex_exit(&ixgbe->gen_lock); 2188 return (DDI_FAILURE); 2189 } 2190 2191 /* 2192 * ixgbe_intr_cb_register - Register interrupt callback function. 2193 */ 2194 static int 2195 ixgbe_intr_cb_register(ixgbe_t *ixgbe) 2196 { 2197 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc, 2198 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) { 2199 return (IXGBE_FAILURE); 2200 } 2201 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered."); 2202 return (IXGBE_SUCCESS); 2203 } 2204 2205 /* 2206 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 2207 */ 2208 static int 2209 ixgbe_alloc_rings(ixgbe_t *ixgbe) 2210 { 2211 /* 2212 * Allocate memory space for rx rings 2213 */ 2214 ixgbe->rx_rings = kmem_zalloc( 2215 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 2216 KM_NOSLEEP); 2217 2218 if (ixgbe->rx_rings == NULL) { 2219 return (IXGBE_FAILURE); 2220 } 2221 2222 /* 2223 * Allocate memory space for tx rings 2224 */ 2225 ixgbe->tx_rings = kmem_zalloc( 2226 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 2227 KM_NOSLEEP); 2228 2229 if (ixgbe->tx_rings == NULL) { 2230 kmem_free(ixgbe->rx_rings, 2231 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2232 ixgbe->rx_rings = NULL; 2233 return (IXGBE_FAILURE); 2234 } 2235 2236 /* 2237 * Allocate memory space for rx ring groups 2238 */ 2239 ixgbe->rx_groups = kmem_zalloc( 2240 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups, 2241 KM_NOSLEEP); 2242 2243 if (ixgbe->rx_groups == NULL) { 2244 kmem_free(ixgbe->rx_rings, 2245 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2246 kmem_free(ixgbe->tx_rings, 2247 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2248 ixgbe->rx_rings = NULL; 2249 ixgbe->tx_rings = NULL; 2250 return (IXGBE_FAILURE); 2251 } 2252 2253 return (IXGBE_SUCCESS); 2254 } 2255 2256 /* 2257 * ixgbe_free_rings - Free the memory space of rx/tx rings. 2258 */ 2259 static void 2260 ixgbe_free_rings(ixgbe_t *ixgbe) 2261 { 2262 if (ixgbe->rx_rings != NULL) { 2263 kmem_free(ixgbe->rx_rings, 2264 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2265 ixgbe->rx_rings = NULL; 2266 } 2267 2268 if (ixgbe->tx_rings != NULL) { 2269 kmem_free(ixgbe->tx_rings, 2270 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2271 ixgbe->tx_rings = NULL; 2272 } 2273 2274 if (ixgbe->rx_groups != NULL) { 2275 kmem_free(ixgbe->rx_groups, 2276 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); 2277 ixgbe->rx_groups = NULL; 2278 } 2279 } 2280 2281 static int 2282 ixgbe_alloc_rx_data(ixgbe_t *ixgbe) 2283 { 2284 ixgbe_rx_ring_t *rx_ring; 2285 int i; 2286 2287 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2288 rx_ring = &ixgbe->rx_rings[i]; 2289 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS) 2290 goto alloc_rx_rings_failure; 2291 } 2292 return (IXGBE_SUCCESS); 2293 2294 alloc_rx_rings_failure: 2295 ixgbe_free_rx_data(ixgbe); 2296 return (IXGBE_FAILURE); 2297 } 2298 2299 static void 2300 ixgbe_free_rx_data(ixgbe_t *ixgbe) 2301 { 2302 ixgbe_rx_ring_t *rx_ring; 2303 ixgbe_rx_data_t *rx_data; 2304 int i; 2305 2306 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2307 rx_ring = &ixgbe->rx_rings[i]; 2308 2309 mutex_enter(&ixgbe->rx_pending_lock); 2310 rx_data = rx_ring->rx_data; 2311 2312 if (rx_data != NULL) { 2313 rx_data->flag |= IXGBE_RX_STOPPED; 2314 2315 if (rx_data->rcb_pending == 0) { 2316 ixgbe_free_rx_ring_data(rx_data); 2317 rx_ring->rx_data = NULL; 2318 } 2319 } 2320 2321 mutex_exit(&ixgbe->rx_pending_lock); 2322 } 2323 } 2324 2325 /* 2326 * ixgbe_setup_rings - Setup rx/tx rings. 2327 */ 2328 static void 2329 ixgbe_setup_rings(ixgbe_t *ixgbe) 2330 { 2331 /* 2332 * Setup the rx/tx rings, including the following: 2333 * 2334 * 1. Setup the descriptor ring and the control block buffers; 2335 * 2. Initialize necessary registers for receive/transmit; 2336 * 3. Initialize software pointers/parameters for receive/transmit; 2337 */ 2338 ixgbe_setup_rx(ixgbe); 2339 2340 ixgbe_setup_tx(ixgbe); 2341 } 2342 2343 static void 2344 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 2345 { 2346 ixgbe_t *ixgbe = rx_ring->ixgbe; 2347 ixgbe_rx_data_t *rx_data = rx_ring->rx_data; 2348 struct ixgbe_hw *hw = &ixgbe->hw; 2349 rx_control_block_t *rcb; 2350 union ixgbe_adv_rx_desc *rbd; 2351 uint32_t size; 2352 uint32_t buf_low; 2353 uint32_t buf_high; 2354 uint32_t reg_val; 2355 int i; 2356 2357 ASSERT(mutex_owned(&rx_ring->rx_lock)); 2358 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2359 2360 for (i = 0; i < ixgbe->rx_ring_size; i++) { 2361 rcb = rx_data->work_list[i]; 2362 rbd = &rx_data->rbd_ring[i]; 2363 2364 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 2365 rbd->read.hdr_addr = 0; 2366 } 2367 2368 /* 2369 * Initialize the length register 2370 */ 2371 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc); 2372 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size); 2373 2374 /* 2375 * Initialize the base address registers 2376 */ 2377 buf_low = (uint32_t)rx_data->rbd_area.dma_address; 2378 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); 2379 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high); 2380 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low); 2381 2382 /* 2383 * Setup head & tail pointers 2384 */ 2385 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index), 2386 rx_data->ring_size - 1); 2387 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0); 2388 2389 rx_data->rbd_next = 0; 2390 rx_data->lro_first = 0; 2391 2392 /* 2393 * Setup the Receive Descriptor Control Register (RXDCTL) 2394 * PTHRESH=32 descriptors (half the internal cache) 2395 * HTHRESH=0 descriptors (to minimize latency on fetch) 2396 * WTHRESH defaults to 1 (writeback each descriptor) 2397 */ 2398 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index)); 2399 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 2400 2401 /* Not a valid value for 82599, X540 or X550 */ 2402 if (hw->mac.type == ixgbe_mac_82598EB) { 2403 reg_val |= 0x0020; /* pthresh */ 2404 } 2405 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); 2406 2407 if (hw->mac.type == ixgbe_mac_82599EB || 2408 hw->mac.type == ixgbe_mac_X540 || 2409 hw->mac.type == ixgbe_mac_X550 || 2410 hw->mac.type == ixgbe_mac_X550EM_x) { 2411 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2412 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); 2413 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2414 } 2415 2416 /* 2417 * Setup the Split and Replication Receive Control Register. 2418 * Set the rx buffer size and the advanced descriptor type. 2419 */ 2420 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 2421 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2422 reg_val |= IXGBE_SRRCTL_DROP_EN; 2423 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); 2424 } 2425 2426 static void 2427 ixgbe_setup_rx(ixgbe_t *ixgbe) 2428 { 2429 ixgbe_rx_ring_t *rx_ring; 2430 struct ixgbe_hw *hw = &ixgbe->hw; 2431 uint32_t reg_val; 2432 uint32_t ring_mapping; 2433 uint32_t i, index; 2434 uint32_t psrtype_rss_bit; 2435 2436 /* 2437 * Ensure that Rx is disabled while setting up 2438 * the Rx unit and Rx descriptor ring(s) 2439 */ 2440 ixgbe_disable_rx(hw); 2441 2442 /* PSRTYPE must be configured for 82599 */ 2443 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ && 2444 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) { 2445 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2446 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2447 reg_val |= IXGBE_PSRTYPE_L2HDR; 2448 reg_val |= 0x80000000; 2449 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val); 2450 } else { 2451 if (ixgbe->num_rx_groups > 32) { 2452 psrtype_rss_bit = 0x20000000; 2453 } else { 2454 psrtype_rss_bit = 0x40000000; 2455 } 2456 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) { 2457 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2458 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2459 reg_val |= IXGBE_PSRTYPE_L2HDR; 2460 reg_val |= psrtype_rss_bit; 2461 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val); 2462 } 2463 } 2464 2465 /* 2466 * Set filter control in FCTRL to determine types of packets are passed 2467 * up to the driver. 2468 * - Pass broadcast packets. 2469 * - Do not pass flow control pause frames (82598-specific) 2470 */ 2471 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2472 reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */ 2473 if (hw->mac.type == ixgbe_mac_82598EB) { 2474 reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */ 2475 } 2476 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 2477 2478 /* 2479 * Hardware checksum settings 2480 */ 2481 if (ixgbe->rx_hcksum_enable) { 2482 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2483 reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 2484 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 2485 } 2486 2487 /* 2488 * Setup VMDq and RSS for multiple receive queues 2489 */ 2490 switch (ixgbe->classify_mode) { 2491 case IXGBE_CLASSIFY_RSS: 2492 /* 2493 * One group, only RSS is needed when more than 2494 * one ring enabled. 2495 */ 2496 ixgbe_setup_rss(ixgbe); 2497 break; 2498 2499 case IXGBE_CLASSIFY_VMDQ: 2500 /* 2501 * Multiple groups, each group has one ring, 2502 * only VMDq is needed. 2503 */ 2504 ixgbe_setup_vmdq(ixgbe); 2505 break; 2506 2507 case IXGBE_CLASSIFY_VMDQ_RSS: 2508 /* 2509 * Multiple groups and multiple rings, both 2510 * VMDq and RSS are needed. 2511 */ 2512 ixgbe_setup_vmdq_rss(ixgbe); 2513 break; 2514 2515 default: 2516 break; 2517 } 2518 2519 /* 2520 * Enable the receive unit. This must be done after filter 2521 * control is set in FCTRL. On 82598, we disable the descriptor monitor. 2522 * 82598 is the only adapter which defines this RXCTRL option. 2523 */ 2524 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2525 if (hw->mac.type == ixgbe_mac_82598EB) 2526 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */ 2527 reg_val |= IXGBE_RXCTRL_RXEN; 2528 (void) ixgbe_enable_rx_dma(hw, reg_val); 2529 2530 /* 2531 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 2532 */ 2533 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2534 rx_ring = &ixgbe->rx_rings[i]; 2535 ixgbe_setup_rx_ring(rx_ring); 2536 } 2537 2538 /* 2539 * Setup the per-ring statistics mapping. 2540 */ 2541 ring_mapping = 0; 2542 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2543 index = ixgbe->rx_rings[i].hw_index; 2544 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2)); 2545 ring_mapping |= (i & 0xF) << (8 * (index & 0x3)); 2546 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping); 2547 } 2548 2549 /* 2550 * The Max Frame Size in MHADD/MAXFRS will be internally increased 2551 * by four bytes if the packet has a VLAN field, so includes MTU, 2552 * ethernet header and frame check sequence. 2553 * Register is MAXFRS in 82599. 2554 */ 2555 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD); 2556 reg_val &= ~IXGBE_MHADD_MFS_MASK; 2557 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header) 2558 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 2559 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 2560 2561 /* 2562 * Setup Jumbo Frame enable bit 2563 */ 2564 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2565 if (ixgbe->default_mtu > ETHERMTU) 2566 reg_val |= IXGBE_HLREG0_JUMBOEN; 2567 else 2568 reg_val &= ~IXGBE_HLREG0_JUMBOEN; 2569 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2570 2571 /* 2572 * Setup RSC for multiple receive queues. 2573 */ 2574 if (ixgbe->lro_enable) { 2575 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2576 /* 2577 * Make sure rx_buf_size * MAXDESC not greater 2578 * than 65535. 2579 * Intel recommends 4 for MAXDESC field value. 2580 */ 2581 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i)); 2582 reg_val |= IXGBE_RSCCTL_RSCEN; 2583 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k) 2584 reg_val |= IXGBE_RSCCTL_MAXDESC_1; 2585 else 2586 reg_val |= IXGBE_RSCCTL_MAXDESC_4; 2587 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val); 2588 } 2589 2590 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU); 2591 reg_val |= IXGBE_RSCDBU_RSCACKDIS; 2592 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val); 2593 2594 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2595 reg_val |= IXGBE_RDRXCTL_RSCACKC; 2596 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX; 2597 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2598 2599 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2600 } 2601 } 2602 2603 static void 2604 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 2605 { 2606 ixgbe_t *ixgbe = tx_ring->ixgbe; 2607 struct ixgbe_hw *hw = &ixgbe->hw; 2608 uint32_t size; 2609 uint32_t buf_low; 2610 uint32_t buf_high; 2611 uint32_t reg_val; 2612 2613 ASSERT(mutex_owned(&tx_ring->tx_lock)); 2614 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2615 2616 /* 2617 * Initialize the length register 2618 */ 2619 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 2620 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 2621 2622 /* 2623 * Initialize the base address registers 2624 */ 2625 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 2626 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 2627 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 2628 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 2629 2630 /* 2631 * Setup head & tail pointers 2632 */ 2633 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 2634 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 2635 2636 /* 2637 * Setup head write-back 2638 */ 2639 if (ixgbe->tx_head_wb_enable) { 2640 /* 2641 * The memory of the head write-back is allocated using 2642 * the extra tbd beyond the tail of the tbd ring. 2643 */ 2644 tx_ring->tbd_head_wb = (uint32_t *) 2645 ((uintptr_t)tx_ring->tbd_area.address + size); 2646 *tx_ring->tbd_head_wb = 0; 2647 2648 buf_low = (uint32_t) 2649 (tx_ring->tbd_area.dma_address + size); 2650 buf_high = (uint32_t) 2651 ((tx_ring->tbd_area.dma_address + size) >> 32); 2652 2653 /* Set the head write-back enable bit */ 2654 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 2655 2656 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 2657 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 2658 2659 /* 2660 * Turn off relaxed ordering for head write back or it will 2661 * cause problems with the tx recycling 2662 */ 2663 2664 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ? 2665 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) : 2666 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index)); 2667 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 2668 if (hw->mac.type == ixgbe_mac_82598EB) { 2669 IXGBE_WRITE_REG(hw, 2670 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 2671 } else { 2672 IXGBE_WRITE_REG(hw, 2673 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val); 2674 } 2675 } else { 2676 tx_ring->tbd_head_wb = NULL; 2677 } 2678 2679 tx_ring->tbd_head = 0; 2680 tx_ring->tbd_tail = 0; 2681 tx_ring->tbd_free = tx_ring->ring_size; 2682 2683 if (ixgbe->tx_ring_init == B_TRUE) { 2684 tx_ring->tcb_head = 0; 2685 tx_ring->tcb_tail = 0; 2686 tx_ring->tcb_free = tx_ring->free_list_size; 2687 } 2688 2689 /* 2690 * Initialize the s/w context structure 2691 */ 2692 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); 2693 } 2694 2695 static void 2696 ixgbe_setup_tx(ixgbe_t *ixgbe) 2697 { 2698 struct ixgbe_hw *hw = &ixgbe->hw; 2699 ixgbe_tx_ring_t *tx_ring; 2700 uint32_t reg_val; 2701 uint32_t ring_mapping; 2702 int i; 2703 2704 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2705 tx_ring = &ixgbe->tx_rings[i]; 2706 ixgbe_setup_tx_ring(tx_ring); 2707 } 2708 2709 /* 2710 * Setup the per-ring statistics mapping. 2711 */ 2712 ring_mapping = 0; 2713 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2714 ring_mapping |= (i & 0xF) << (8 * (i & 0x3)); 2715 if ((i & 0x3) == 0x3) { 2716 switch (hw->mac.type) { 2717 case ixgbe_mac_82598EB: 2718 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 2719 ring_mapping); 2720 break; 2721 2722 case ixgbe_mac_82599EB: 2723 case ixgbe_mac_X540: 2724 case ixgbe_mac_X550: 2725 case ixgbe_mac_X550EM_x: 2726 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 2727 ring_mapping); 2728 break; 2729 2730 default: 2731 break; 2732 } 2733 2734 ring_mapping = 0; 2735 } 2736 } 2737 if (i & 0x3) { 2738 switch (hw->mac.type) { 2739 case ixgbe_mac_82598EB: 2740 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); 2741 break; 2742 2743 case ixgbe_mac_82599EB: 2744 case ixgbe_mac_X540: 2745 case ixgbe_mac_X550: 2746 case ixgbe_mac_X550EM_x: 2747 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); 2748 break; 2749 2750 default: 2751 break; 2752 } 2753 } 2754 2755 /* 2756 * Enable CRC appending and TX padding (for short tx frames) 2757 */ 2758 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2759 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 2760 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2761 2762 /* 2763 * enable DMA for 82599, X540 and X550 parts 2764 */ 2765 if (hw->mac.type == ixgbe_mac_82599EB || 2766 hw->mac.type == ixgbe_mac_X540 || 2767 hw->mac.type == ixgbe_mac_X550 || 2768 hw->mac.type == ixgbe_mac_X550EM_x) { 2769 /* DMATXCTL.TE must be set after all Tx config is complete */ 2770 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2771 reg_val |= IXGBE_DMATXCTL_TE; 2772 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); 2773 2774 /* Disable arbiter to set MTQC */ 2775 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2776 reg_val |= IXGBE_RTTDCS_ARBDIS; 2777 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2778 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2779 reg_val &= ~IXGBE_RTTDCS_ARBDIS; 2780 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2781 } 2782 2783 /* 2784 * Enabling tx queues .. 2785 * For 82599 must be done after DMATXCTL.TE is set 2786 */ 2787 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2788 tx_ring = &ixgbe->tx_rings[i]; 2789 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index)); 2790 reg_val |= IXGBE_TXDCTL_ENABLE; 2791 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 2792 } 2793 } 2794 2795 /* 2796 * ixgbe_setup_rss - Setup receive-side scaling feature. 2797 */ 2798 static void 2799 ixgbe_setup_rss(ixgbe_t *ixgbe) 2800 { 2801 struct ixgbe_hw *hw = &ixgbe->hw; 2802 uint32_t mrqc; 2803 2804 /* 2805 * Initialize RETA/ERETA table 2806 */ 2807 ixgbe_setup_rss_table(ixgbe); 2808 2809 /* 2810 * Enable RSS & perform hash on these packet types 2811 */ 2812 mrqc = IXGBE_MRQC_RSSEN | 2813 IXGBE_MRQC_RSS_FIELD_IPV4 | 2814 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2815 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2816 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2817 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2818 IXGBE_MRQC_RSS_FIELD_IPV6 | 2819 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2820 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2821 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2822 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2823 } 2824 2825 /* 2826 * ixgbe_setup_vmdq - Setup MAC classification feature 2827 */ 2828 static void 2829 ixgbe_setup_vmdq(ixgbe_t *ixgbe) 2830 { 2831 struct ixgbe_hw *hw = &ixgbe->hw; 2832 uint32_t vmdctl, i, vtctl; 2833 2834 /* 2835 * Setup the VMDq Control register, enable VMDq based on 2836 * packet destination MAC address: 2837 */ 2838 switch (hw->mac.type) { 2839 case ixgbe_mac_82598EB: 2840 /* 2841 * VMDq Enable = 1; 2842 * VMDq Filter = 0; MAC filtering 2843 * Default VMDq output index = 0; 2844 */ 2845 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2846 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2847 break; 2848 2849 case ixgbe_mac_82599EB: 2850 case ixgbe_mac_X540: 2851 case ixgbe_mac_X550: 2852 case ixgbe_mac_X550EM_x: 2853 /* 2854 * Enable VMDq-only. 2855 */ 2856 vmdctl = IXGBE_MRQC_VMDQEN; 2857 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl); 2858 2859 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2860 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2861 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2862 } 2863 2864 /* 2865 * Enable Virtualization and Replication. 2866 */ 2867 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2868 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2869 2870 /* 2871 * Enable receiving packets to all VFs 2872 */ 2873 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2874 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2875 break; 2876 2877 default: 2878 break; 2879 } 2880 } 2881 2882 /* 2883 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature. 2884 */ 2885 static void 2886 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) 2887 { 2888 struct ixgbe_hw *hw = &ixgbe->hw; 2889 uint32_t i, mrqc; 2890 uint32_t vtctl, vmdctl; 2891 2892 /* 2893 * Initialize RETA/ERETA table 2894 */ 2895 ixgbe_setup_rss_table(ixgbe); 2896 2897 /* 2898 * Enable and setup RSS and VMDq 2899 */ 2900 switch (hw->mac.type) { 2901 case ixgbe_mac_82598EB: 2902 /* 2903 * Enable RSS & Setup RSS Hash functions 2904 */ 2905 mrqc = IXGBE_MRQC_RSSEN | 2906 IXGBE_MRQC_RSS_FIELD_IPV4 | 2907 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2908 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2909 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2910 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2911 IXGBE_MRQC_RSS_FIELD_IPV6 | 2912 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2913 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2914 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2915 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2916 2917 /* 2918 * Enable and Setup VMDq 2919 * VMDq Filter = 0; MAC filtering 2920 * Default VMDq output index = 0; 2921 */ 2922 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2923 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2924 break; 2925 2926 case ixgbe_mac_82599EB: 2927 case ixgbe_mac_X540: 2928 case ixgbe_mac_X550: 2929 case ixgbe_mac_X550EM_x: 2930 /* 2931 * Enable RSS & Setup RSS Hash functions 2932 */ 2933 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 | 2934 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2935 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2936 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2937 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2938 IXGBE_MRQC_RSS_FIELD_IPV6 | 2939 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2940 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2941 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2942 2943 /* 2944 * Enable VMDq+RSS. 2945 */ 2946 if (ixgbe->num_rx_groups > 32) { 2947 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN; 2948 } else { 2949 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN; 2950 } 2951 2952 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2953 2954 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2955 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2956 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2957 } 2958 break; 2959 2960 default: 2961 break; 2962 2963 } 2964 2965 if (hw->mac.type == ixgbe_mac_82599EB || 2966 hw->mac.type == ixgbe_mac_X540 || 2967 hw->mac.type == ixgbe_mac_X550 || 2968 hw->mac.type == ixgbe_mac_X550EM_x) { 2969 /* 2970 * Enable Virtualization and Replication. 2971 */ 2972 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2973 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2974 2975 /* 2976 * Enable receiving packets to all VFs 2977 */ 2978 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2979 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2980 } 2981 } 2982 2983 /* 2984 * ixgbe_setup_rss_table - Setup RSS table 2985 */ 2986 static void 2987 ixgbe_setup_rss_table(ixgbe_t *ixgbe) 2988 { 2989 struct ixgbe_hw *hw = &ixgbe->hw; 2990 uint32_t i, j; 2991 uint32_t random; 2992 uint32_t reta; 2993 uint32_t ring_per_group; 2994 uint32_t ring; 2995 uint32_t table_size; 2996 uint32_t index_mult; 2997 uint32_t rxcsum; 2998 2999 /* 3000 * Set multiplier for RETA setup and table size based on MAC type. 3001 * RETA table sizes vary by model: 3002 * 3003 * 82598, 82599, X540: 128 table entries. 3004 * X550: 512 table entries. 3005 */ 3006 index_mult = 0x1; 3007 table_size = 128; 3008 switch (ixgbe->hw.mac.type) { 3009 case ixgbe_mac_82598EB: 3010 index_mult = 0x11; 3011 break; 3012 case ixgbe_mac_X550: 3013 case ixgbe_mac_X550EM_x: 3014 table_size = 512; 3015 break; 3016 default: 3017 break; 3018 } 3019 3020 /* 3021 * Fill out RSS redirection table. The configuation of the indices is 3022 * hardware-dependent. 3023 * 3024 * 82598: 8 bits wide containing two 4 bit RSS indices 3025 * 82599, X540: 8 bits wide containing one 4 bit RSS index 3026 * X550: 8 bits wide containing one 6 bit RSS index 3027 */ 3028 reta = 0; 3029 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3030 3031 for (i = 0, j = 0; i < table_size; i++, j++) { 3032 if (j == ring_per_group) j = 0; 3033 3034 /* 3035 * The low 8 bits are for hash value (n+0); 3036 * The next 8 bits are for hash value (n+1), etc. 3037 */ 3038 ring = (j * index_mult); 3039 reta = reta >> 8; 3040 reta = reta | (((uint32_t)ring) << 24); 3041 3042 if ((i & 3) == 3) { 3043 /* 3044 * The first 128 table entries are programmed into the 3045 * RETA register, with any beyond that (eg; on X550) 3046 * into ERETA. 3047 */ 3048 if (i < 128) 3049 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3050 else 3051 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 3052 reta); 3053 reta = 0; 3054 } 3055 } 3056 3057 /* 3058 * Fill out hash function seeds with a random constant 3059 */ 3060 for (i = 0; i < 10; i++) { 3061 (void) random_get_pseudo_bytes((uint8_t *)&random, 3062 sizeof (uint32_t)); 3063 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 3064 } 3065 3066 /* 3067 * Disable Packet Checksum to enable RSS for multiple receive queues. 3068 * It is an adapter hardware limitation that Packet Checksum is 3069 * mutually exclusive with RSS. 3070 */ 3071 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3072 rxcsum |= IXGBE_RXCSUM_PCSD; 3073 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 3074 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3075 } 3076 3077 /* 3078 * ixgbe_init_unicst - Initialize the unicast addresses. 3079 */ 3080 static void 3081 ixgbe_init_unicst(ixgbe_t *ixgbe) 3082 { 3083 struct ixgbe_hw *hw = &ixgbe->hw; 3084 uint8_t *mac_addr; 3085 int slot; 3086 /* 3087 * Here we should consider two situations: 3088 * 3089 * 1. Chipset is initialized at the first time, 3090 * Clear all the multiple unicast addresses. 3091 * 3092 * 2. Chipset is reset 3093 * Recover the multiple unicast addresses from the 3094 * software data structure to the RAR registers. 3095 */ 3096 if (!ixgbe->unicst_init) { 3097 /* 3098 * Initialize the multiple unicast addresses 3099 */ 3100 ixgbe->unicst_total = hw->mac.num_rar_entries; 3101 ixgbe->unicst_avail = ixgbe->unicst_total; 3102 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3103 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3104 bzero(mac_addr, ETHERADDRL); 3105 (void) ixgbe_set_rar(hw, slot, mac_addr, 0, 0); 3106 ixgbe->unicst_addr[slot].mac.set = 0; 3107 } 3108 ixgbe->unicst_init = B_TRUE; 3109 } else { 3110 /* Re-configure the RAR registers */ 3111 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3112 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3113 if (ixgbe->unicst_addr[slot].mac.set == 1) { 3114 (void) ixgbe_set_rar(hw, slot, mac_addr, 3115 ixgbe->unicst_addr[slot].mac.group_index, 3116 IXGBE_RAH_AV); 3117 } else { 3118 bzero(mac_addr, ETHERADDRL); 3119 (void) ixgbe_set_rar(hw, slot, mac_addr, 0, 0); 3120 } 3121 } 3122 } 3123 } 3124 3125 /* 3126 * ixgbe_unicst_find - Find the slot for the specified unicast address 3127 */ 3128 int 3129 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr) 3130 { 3131 int slot; 3132 3133 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3134 3135 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3136 if (bcmp(ixgbe->unicst_addr[slot].mac.addr, 3137 mac_addr, ETHERADDRL) == 0) 3138 return (slot); 3139 } 3140 3141 return (-1); 3142 } 3143 3144 /* 3145 * ixgbe_multicst_add - Add a multicst address. 3146 */ 3147 int 3148 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3149 { 3150 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3151 3152 if ((multiaddr[0] & 01) == 0) { 3153 return (EINVAL); 3154 } 3155 3156 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 3157 return (ENOENT); 3158 } 3159 3160 bcopy(multiaddr, 3161 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 3162 ixgbe->mcast_count++; 3163 3164 /* 3165 * Update the multicast table in the hardware 3166 */ 3167 ixgbe_setup_multicst(ixgbe); 3168 3169 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3170 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3171 return (EIO); 3172 } 3173 3174 return (0); 3175 } 3176 3177 /* 3178 * ixgbe_multicst_remove - Remove a multicst address. 3179 */ 3180 int 3181 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3182 { 3183 int i; 3184 3185 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3186 3187 for (i = 0; i < ixgbe->mcast_count; i++) { 3188 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 3189 ETHERADDRL) == 0) { 3190 for (i++; i < ixgbe->mcast_count; i++) { 3191 ixgbe->mcast_table[i - 1] = 3192 ixgbe->mcast_table[i]; 3193 } 3194 ixgbe->mcast_count--; 3195 break; 3196 } 3197 } 3198 3199 /* 3200 * Update the multicast table in the hardware 3201 */ 3202 ixgbe_setup_multicst(ixgbe); 3203 3204 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3205 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3206 return (EIO); 3207 } 3208 3209 return (0); 3210 } 3211 3212 /* 3213 * ixgbe_setup_multicast - Setup multicast data structures. 3214 * 3215 * This routine initializes all of the multicast related structures 3216 * and save them in the hardware registers. 3217 */ 3218 static void 3219 ixgbe_setup_multicst(ixgbe_t *ixgbe) 3220 { 3221 uint8_t *mc_addr_list; 3222 uint32_t mc_addr_count; 3223 struct ixgbe_hw *hw = &ixgbe->hw; 3224 3225 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3226 3227 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 3228 3229 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 3230 mc_addr_count = ixgbe->mcast_count; 3231 3232 /* 3233 * Update the multicast addresses to the MTA registers 3234 */ 3235 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 3236 ixgbe_mc_table_itr, TRUE); 3237 } 3238 3239 /* 3240 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode). 3241 * 3242 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers. 3243 * Different chipsets may have different allowed configuration of vmdq and rss. 3244 */ 3245 static void 3246 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe) 3247 { 3248 struct ixgbe_hw *hw = &ixgbe->hw; 3249 uint32_t ring_per_group; 3250 3251 switch (hw->mac.type) { 3252 case ixgbe_mac_82598EB: 3253 /* 3254 * 82598 supports the following combination: 3255 * vmdq no. x rss no. 3256 * [5..16] x 1 3257 * [1..4] x [1..16] 3258 * However 8 rss queue per pool (vmdq) is sufficient for 3259 * most cases. 3260 */ 3261 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3262 if (ixgbe->num_rx_groups > 4) { 3263 ixgbe->num_rx_rings = ixgbe->num_rx_groups; 3264 } else { 3265 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3266 min(8, ring_per_group); 3267 } 3268 3269 break; 3270 3271 case ixgbe_mac_82599EB: 3272 case ixgbe_mac_X540: 3273 case ixgbe_mac_X550: 3274 case ixgbe_mac_X550EM_x: 3275 /* 3276 * 82599 supports the following combination: 3277 * vmdq no. x rss no. 3278 * [33..64] x [1..2] 3279 * [2..32] x [1..4] 3280 * 1 x [1..16] 3281 * However 8 rss queue per pool (vmdq) is sufficient for 3282 * most cases. 3283 * 3284 * For now, treat X540 and X550 like the 82599. 3285 */ 3286 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3287 if (ixgbe->num_rx_groups == 1) { 3288 ixgbe->num_rx_rings = min(8, ring_per_group); 3289 } else if (ixgbe->num_rx_groups <= 32) { 3290 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3291 min(4, ring_per_group); 3292 } else if (ixgbe->num_rx_groups <= 64) { 3293 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3294 min(2, ring_per_group); 3295 } 3296 break; 3297 3298 default: 3299 break; 3300 } 3301 3302 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3303 3304 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) { 3305 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3306 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) { 3307 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ; 3308 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) { 3309 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS; 3310 } else { 3311 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS; 3312 } 3313 3314 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d", 3315 ixgbe->num_rx_groups, ixgbe->num_rx_rings); 3316 } 3317 3318 /* 3319 * ixgbe_get_conf - Get driver configurations set in driver.conf. 3320 * 3321 * This routine gets user-configured values out of the configuration 3322 * file ixgbe.conf. 3323 * 3324 * For each configurable value, there is a minimum, a maximum, and a 3325 * default. 3326 * If user does not configure a value, use the default. 3327 * If user configures below the minimum, use the minumum. 3328 * If user configures above the maximum, use the maxumum. 3329 */ 3330 static void 3331 ixgbe_get_conf(ixgbe_t *ixgbe) 3332 { 3333 struct ixgbe_hw *hw = &ixgbe->hw; 3334 uint32_t flow_control; 3335 3336 /* 3337 * ixgbe driver supports the following user configurations: 3338 * 3339 * Jumbo frame configuration: 3340 * default_mtu 3341 * 3342 * Ethernet flow control configuration: 3343 * flow_control 3344 * 3345 * Multiple rings configurations: 3346 * tx_queue_number 3347 * tx_ring_size 3348 * rx_queue_number 3349 * rx_ring_size 3350 * 3351 * Call ixgbe_get_prop() to get the value for a specific 3352 * configuration parameter. 3353 */ 3354 3355 /* 3356 * Jumbo frame configuration - max_frame_size controls host buffer 3357 * allocation, so includes MTU, ethernet header, vlan tag and 3358 * frame check sequence. 3359 */ 3360 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 3361 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU); 3362 3363 ixgbe->max_frame_size = ixgbe->default_mtu + 3364 sizeof (struct ether_vlan_header) + ETHERFCSL; 3365 3366 /* 3367 * Ethernet flow control configuration 3368 */ 3369 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 3370 ixgbe_fc_none, 3, ixgbe_fc_none); 3371 if (flow_control == 3) 3372 flow_control = ixgbe_fc_default; 3373 3374 /* 3375 * fc.requested mode is what the user requests. After autoneg, 3376 * fc.current_mode will be the flow_control mode that was negotiated. 3377 */ 3378 hw->fc.requested_mode = flow_control; 3379 3380 /* 3381 * Multiple rings configurations 3382 */ 3383 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 3384 ixgbe->capab->min_tx_que_num, 3385 ixgbe->capab->max_tx_que_num, 3386 ixgbe->capab->def_tx_que_num); 3387 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 3388 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 3389 3390 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 3391 ixgbe->capab->min_rx_que_num, 3392 ixgbe->capab->max_rx_que_num, 3393 ixgbe->capab->def_rx_que_num); 3394 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 3395 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 3396 3397 /* 3398 * Multiple groups configuration 3399 */ 3400 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM, 3401 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num, 3402 ixgbe->capab->def_rx_grp_num); 3403 3404 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE, 3405 0, 1, DEFAULT_MR_ENABLE); 3406 3407 if (ixgbe->mr_enable == B_FALSE) { 3408 ixgbe->num_tx_rings = 1; 3409 ixgbe->num_rx_rings = 1; 3410 ixgbe->num_rx_groups = 1; 3411 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3412 } else { 3413 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3414 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1); 3415 /* 3416 * The combination of num_rx_rings and num_rx_groups 3417 * may be not supported by h/w. We need to adjust 3418 * them to appropriate values. 3419 */ 3420 ixgbe_setup_vmdq_rss_conf(ixgbe); 3421 } 3422 3423 /* 3424 * Tunable used to force an interrupt type. The only use is 3425 * for testing of the lesser interrupt types. 3426 * 0 = don't force interrupt type 3427 * 1 = force interrupt type MSI-X 3428 * 2 = force interrupt type MSI 3429 * 3 = force interrupt type Legacy 3430 */ 3431 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 3432 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 3433 3434 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 3435 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 3436 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 3437 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 3438 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 3439 0, 1, DEFAULT_LSO_ENABLE); 3440 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE, 3441 0, 1, DEFAULT_LRO_ENABLE); 3442 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 3443 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 3444 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe, 3445 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE); 3446 3447 /* Head Write Back not recommended for 82599, X540 and X550 */ 3448 if (hw->mac.type == ixgbe_mac_82599EB || 3449 hw->mac.type == ixgbe_mac_X540 || 3450 hw->mac.type == ixgbe_mac_X550 || 3451 hw->mac.type == ixgbe_mac_X550EM_x) { 3452 ixgbe->tx_head_wb_enable = B_FALSE; 3453 } 3454 3455 /* 3456 * ixgbe LSO needs the tx h/w checksum support. 3457 * LSO will be disabled if tx h/w checksum is not 3458 * enabled. 3459 */ 3460 if (ixgbe->tx_hcksum_enable == B_FALSE) { 3461 ixgbe->lso_enable = B_FALSE; 3462 } 3463 3464 /* 3465 * ixgbe LRO needs the rx h/w checksum support. 3466 * LRO will be disabled if rx h/w checksum is not 3467 * enabled. 3468 */ 3469 if (ixgbe->rx_hcksum_enable == B_FALSE) { 3470 ixgbe->lro_enable = B_FALSE; 3471 } 3472 3473 /* 3474 * ixgbe LRO only supported by 82599, X540 and X550 3475 */ 3476 if (hw->mac.type == ixgbe_mac_82598EB) { 3477 ixgbe->lro_enable = B_FALSE; 3478 } 3479 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 3480 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 3481 DEFAULT_TX_COPY_THRESHOLD); 3482 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 3483 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 3484 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 3485 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 3486 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 3487 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 3488 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 3489 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 3490 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 3491 3492 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 3493 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 3494 DEFAULT_RX_COPY_THRESHOLD); 3495 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 3496 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 3497 DEFAULT_RX_LIMIT_PER_INTR); 3498 3499 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 3500 ixgbe->capab->min_intr_throttle, 3501 ixgbe->capab->max_intr_throttle, 3502 ixgbe->capab->def_intr_throttle); 3503 /* 3504 * 82599, X540 and X550 require the interrupt throttling rate is 3505 * a multiple of 8. This is enforced by the register definiton. 3506 */ 3507 if (hw->mac.type == ixgbe_mac_82599EB || 3508 hw->mac.type == ixgbe_mac_X540 || 3509 hw->mac.type == ixgbe_mac_X550 || 3510 hw->mac.type == ixgbe_mac_X550EM_x) 3511 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; 3512 3513 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe, 3514 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP); 3515 } 3516 3517 static void 3518 ixgbe_init_params(ixgbe_t *ixgbe) 3519 { 3520 struct ixgbe_hw *hw = &ixgbe->hw; 3521 ixgbe_link_speed speeds_supported = 0; 3522 boolean_t negotiate; 3523 3524 /* 3525 * Get a list of speeds the adapter supports. If the hw struct hasn't 3526 * been populated with this information yet, retrieve it from the 3527 * adapter and save it to our own variable. 3528 * 3529 * On certain adapters, such as ones which use SFPs, the contents of 3530 * hw->phy.speeds_supported (and hw->phy.autoneg_advertised) are not 3531 * updated, so we must rely on calling ixgbe_get_link_capabilities() 3532 * in order to ascertain the speeds which we are capable of supporting, 3533 * and in the case of SFP-equipped adapters, which speed we are 3534 * advertising. If ixgbe_get_link_capabilities() fails for some reason, 3535 * we'll go with a default list of speeds as a last resort. 3536 */ 3537 speeds_supported = hw->phy.speeds_supported; 3538 3539 if (speeds_supported == 0) { 3540 if (ixgbe_get_link_capabilities(hw, &speeds_supported, 3541 &negotiate) != IXGBE_SUCCESS) { 3542 if (hw->mac.type == ixgbe_mac_82598EB) { 3543 speeds_supported = 3544 IXGBE_LINK_SPEED_82598_AUTONEG; 3545 } else { 3546 speeds_supported = 3547 IXGBE_LINK_SPEED_82599_AUTONEG; 3548 } 3549 } 3550 } 3551 ixgbe->speeds_supported = speeds_supported; 3552 3553 /* 3554 * By default, all supported speeds are enabled and advertised. 3555 */ 3556 if (speeds_supported & IXGBE_LINK_SPEED_10GB_FULL) { 3557 ixgbe->param_en_10000fdx_cap = 1; 3558 ixgbe->param_adv_10000fdx_cap = 1; 3559 } else { 3560 ixgbe->param_en_10000fdx_cap = 0; 3561 ixgbe->param_adv_10000fdx_cap = 0; 3562 } 3563 3564 if (speeds_supported & IXGBE_LINK_SPEED_5GB_FULL) { 3565 ixgbe->param_en_5000fdx_cap = 1; 3566 ixgbe->param_adv_5000fdx_cap = 1; 3567 } else { 3568 ixgbe->param_en_5000fdx_cap = 0; 3569 ixgbe->param_adv_5000fdx_cap = 0; 3570 } 3571 3572 if (speeds_supported & IXGBE_LINK_SPEED_2_5GB_FULL) { 3573 ixgbe->param_en_2500fdx_cap = 1; 3574 ixgbe->param_adv_2500fdx_cap = 1; 3575 } else { 3576 ixgbe->param_en_2500fdx_cap = 0; 3577 ixgbe->param_adv_2500fdx_cap = 0; 3578 } 3579 3580 if (speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) { 3581 ixgbe->param_en_1000fdx_cap = 1; 3582 ixgbe->param_adv_1000fdx_cap = 1; 3583 } else { 3584 ixgbe->param_en_1000fdx_cap = 0; 3585 ixgbe->param_adv_1000fdx_cap = 0; 3586 } 3587 3588 if (speeds_supported & IXGBE_LINK_SPEED_100_FULL) { 3589 ixgbe->param_en_100fdx_cap = 1; 3590 ixgbe->param_adv_100fdx_cap = 1; 3591 } else { 3592 ixgbe->param_en_100fdx_cap = 0; 3593 ixgbe->param_adv_100fdx_cap = 0; 3594 } 3595 3596 ixgbe->param_pause_cap = 1; 3597 ixgbe->param_asym_pause_cap = 1; 3598 ixgbe->param_rem_fault = 0; 3599 3600 ixgbe->param_adv_autoneg_cap = 1; 3601 ixgbe->param_adv_pause_cap = 1; 3602 ixgbe->param_adv_asym_pause_cap = 1; 3603 ixgbe->param_adv_rem_fault = 0; 3604 3605 ixgbe->param_lp_10000fdx_cap = 0; 3606 ixgbe->param_lp_5000fdx_cap = 0; 3607 ixgbe->param_lp_2500fdx_cap = 0; 3608 ixgbe->param_lp_1000fdx_cap = 0; 3609 ixgbe->param_lp_100fdx_cap = 0; 3610 ixgbe->param_lp_autoneg_cap = 0; 3611 ixgbe->param_lp_pause_cap = 0; 3612 ixgbe->param_lp_asym_pause_cap = 0; 3613 ixgbe->param_lp_rem_fault = 0; 3614 } 3615 3616 /* 3617 * ixgbe_get_prop - Get a property value out of the configuration file 3618 * ixgbe.conf. 3619 * 3620 * Caller provides the name of the property, a default value, a minimum 3621 * value, and a maximum value. 3622 * 3623 * Return configured value of the property, with default, minimum and 3624 * maximum properly applied. 3625 */ 3626 static int 3627 ixgbe_get_prop(ixgbe_t *ixgbe, 3628 char *propname, /* name of the property */ 3629 int minval, /* minimum acceptable value */ 3630 int maxval, /* maximim acceptable value */ 3631 int defval) /* default value */ 3632 { 3633 int value; 3634 3635 /* 3636 * Call ddi_prop_get_int() to read the conf settings 3637 */ 3638 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 3639 DDI_PROP_DONTPASS, propname, defval); 3640 if (value > maxval) 3641 value = maxval; 3642 3643 if (value < minval) 3644 value = minval; 3645 3646 return (value); 3647 } 3648 3649 /* 3650 * ixgbe_driver_setup_link - Using the link properties to setup the link. 3651 */ 3652 int 3653 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 3654 { 3655 struct ixgbe_hw *hw = &ixgbe->hw; 3656 ixgbe_link_speed advertised = 0; 3657 3658 /* 3659 * Assemble a list of enabled speeds to auto-negotiate with. 3660 */ 3661 if (ixgbe->param_en_10000fdx_cap == 1) 3662 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 3663 3664 if (ixgbe->param_en_5000fdx_cap == 1) 3665 advertised |= IXGBE_LINK_SPEED_5GB_FULL; 3666 3667 if (ixgbe->param_en_2500fdx_cap == 1) 3668 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; 3669 3670 if (ixgbe->param_en_1000fdx_cap == 1) 3671 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 3672 3673 if (ixgbe->param_en_100fdx_cap == 1) 3674 advertised |= IXGBE_LINK_SPEED_100_FULL; 3675 3676 /* 3677 * As a last resort, autoneg with a default list of speeds. 3678 */ 3679 if (ixgbe->param_adv_autoneg_cap == 1 && advertised == 0) { 3680 ixgbe_notice(ixgbe, "Invalid link settings. Setting link " 3681 "to autonegotiate with full capabilities."); 3682 3683 if (hw->mac.type == ixgbe_mac_82598EB) 3684 advertised = IXGBE_LINK_SPEED_82598_AUTONEG; 3685 else 3686 advertised = IXGBE_LINK_SPEED_82599_AUTONEG; 3687 } 3688 3689 if (setup_hw) { 3690 if (ixgbe_setup_link(&ixgbe->hw, advertised, 3691 ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) { 3692 ixgbe_notice(ixgbe, "Setup link failed on this " 3693 "device."); 3694 return (IXGBE_FAILURE); 3695 } 3696 } 3697 3698 return (IXGBE_SUCCESS); 3699 } 3700 3701 /* 3702 * ixgbe_driver_link_check - Link status processing. 3703 * 3704 * This function can be called in both kernel context and interrupt context 3705 */ 3706 static void 3707 ixgbe_driver_link_check(ixgbe_t *ixgbe) 3708 { 3709 struct ixgbe_hw *hw = &ixgbe->hw; 3710 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 3711 boolean_t link_up = B_FALSE; 3712 boolean_t link_changed = B_FALSE; 3713 3714 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3715 3716 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 3717 if (link_up) { 3718 ixgbe->link_check_complete = B_TRUE; 3719 3720 /* Link is up, enable flow control settings */ 3721 (void) ixgbe_fc_enable(hw); 3722 3723 /* 3724 * The Link is up, check whether it was marked as down earlier 3725 */ 3726 if (ixgbe->link_state != LINK_STATE_UP) { 3727 switch (speed) { 3728 case IXGBE_LINK_SPEED_10GB_FULL: 3729 ixgbe->link_speed = SPEED_10GB; 3730 break; 3731 case IXGBE_LINK_SPEED_5GB_FULL: 3732 ixgbe->link_speed = SPEED_5GB; 3733 break; 3734 case IXGBE_LINK_SPEED_2_5GB_FULL: 3735 ixgbe->link_speed = SPEED_2_5GB; 3736 break; 3737 case IXGBE_LINK_SPEED_1GB_FULL: 3738 ixgbe->link_speed = SPEED_1GB; 3739 break; 3740 case IXGBE_LINK_SPEED_100_FULL: 3741 ixgbe->link_speed = SPEED_100; 3742 } 3743 ixgbe->link_duplex = LINK_DUPLEX_FULL; 3744 ixgbe->link_state = LINK_STATE_UP; 3745 link_changed = B_TRUE; 3746 } 3747 } else { 3748 if (ixgbe->link_check_complete == B_TRUE || 3749 (ixgbe->link_check_complete == B_FALSE && 3750 gethrtime() >= ixgbe->link_check_hrtime)) { 3751 /* 3752 * The link is really down 3753 */ 3754 ixgbe->link_check_complete = B_TRUE; 3755 3756 if (ixgbe->link_state != LINK_STATE_DOWN) { 3757 ixgbe->link_speed = 0; 3758 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN; 3759 ixgbe->link_state = LINK_STATE_DOWN; 3760 link_changed = B_TRUE; 3761 } 3762 } 3763 } 3764 3765 /* 3766 * If we are in an interrupt context, need to re-enable the 3767 * interrupt, which was automasked 3768 */ 3769 if (servicing_interrupt() != 0) { 3770 ixgbe->eims |= IXGBE_EICR_LSC; 3771 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3772 } 3773 3774 if (link_changed) { 3775 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3776 } 3777 } 3778 3779 /* 3780 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599. 3781 */ 3782 static void 3783 ixgbe_sfp_check(void *arg) 3784 { 3785 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3786 uint32_t eicr = ixgbe->eicr; 3787 struct ixgbe_hw *hw = &ixgbe->hw; 3788 3789 mutex_enter(&ixgbe->gen_lock); 3790 (void) hw->phy.ops.identify_sfp(hw); 3791 if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) { 3792 /* clear the interrupt */ 3793 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3794 3795 /* if link up, do multispeed fiber setup */ 3796 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3797 B_TRUE); 3798 ixgbe_driver_link_check(ixgbe); 3799 ixgbe_get_hw_state(ixgbe); 3800 } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) { 3801 /* clear the interrupt */ 3802 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw)); 3803 3804 /* if link up, do sfp module setup */ 3805 (void) hw->mac.ops.setup_sfp(hw); 3806 3807 /* do multispeed fiber setup */ 3808 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3809 B_TRUE); 3810 ixgbe_driver_link_check(ixgbe); 3811 ixgbe_get_hw_state(ixgbe); 3812 } 3813 mutex_exit(&ixgbe->gen_lock); 3814 3815 /* 3816 * We need to fully re-check the link later. 3817 */ 3818 ixgbe->link_check_complete = B_FALSE; 3819 ixgbe->link_check_hrtime = gethrtime() + 3820 (IXGBE_LINK_UP_TIME * 100000000ULL); 3821 } 3822 3823 /* 3824 * ixgbe_overtemp_check - overtemp module processing done in taskq 3825 * 3826 * This routine will only be called on adapters with temperature sensor. 3827 * The indication of over-temperature can be either SDP0 interrupt or the link 3828 * status change interrupt. 3829 */ 3830 static void 3831 ixgbe_overtemp_check(void *arg) 3832 { 3833 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3834 struct ixgbe_hw *hw = &ixgbe->hw; 3835 uint32_t eicr = ixgbe->eicr; 3836 ixgbe_link_speed speed; 3837 boolean_t link_up; 3838 3839 mutex_enter(&ixgbe->gen_lock); 3840 3841 /* make sure we know current state of link */ 3842 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 3843 3844 /* check over-temp condition */ 3845 if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) || 3846 (eicr & IXGBE_EICR_LSC)) { 3847 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) { 3848 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3849 3850 /* 3851 * Disable the adapter interrupts 3852 */ 3853 ixgbe_disable_adapter_interrupts(ixgbe); 3854 3855 /* 3856 * Disable Rx/Tx units 3857 */ 3858 (void) ixgbe_stop_adapter(hw); 3859 3860 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3861 ixgbe_error(ixgbe, 3862 "Problem: Network adapter has been stopped " 3863 "because it has overheated"); 3864 ixgbe_error(ixgbe, 3865 "Action: Restart the computer. " 3866 "If the problem persists, power off the system " 3867 "and replace the adapter"); 3868 } 3869 } 3870 3871 /* write to clear the interrupt */ 3872 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 3873 3874 mutex_exit(&ixgbe->gen_lock); 3875 } 3876 3877 /* 3878 * ixgbe_phy_check - taskq to process interrupts from an external PHY 3879 * 3880 * This routine will only be called on adapters with external PHYs 3881 * (such as X550) that may be trying to raise our attention to some event. 3882 * Currently, this is limited to claiming PHY overtemperature and link status 3883 * change (LSC) events, however this may expand to include other things in 3884 * future adapters. 3885 */ 3886 static void 3887 ixgbe_phy_check(void *arg) 3888 { 3889 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3890 struct ixgbe_hw *hw = &ixgbe->hw; 3891 int rv; 3892 3893 mutex_enter(&ixgbe->gen_lock); 3894 3895 /* 3896 * X550 baseT PHY overtemp and LSC events are handled here. 3897 * 3898 * If an overtemp event occurs, it will be reflected in the 3899 * return value of phy.ops.handle_lasi() and the common code will 3900 * automatically power off the baseT PHY. This is our cue to trigger 3901 * an FMA event. 3902 * 3903 * If a link status change event occurs, phy.ops.handle_lasi() will 3904 * automatically initiate a link setup between the integrated KR PHY 3905 * and the external X557 PHY to ensure that the link speed between 3906 * them matches the link speed of the baseT link. 3907 */ 3908 rv = ixgbe_handle_lasi(hw); 3909 3910 if (rv == IXGBE_ERR_OVERTEMP) { 3911 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3912 3913 /* 3914 * Disable the adapter interrupts 3915 */ 3916 ixgbe_disable_adapter_interrupts(ixgbe); 3917 3918 /* 3919 * Disable Rx/Tx units 3920 */ 3921 (void) ixgbe_stop_adapter(hw); 3922 3923 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3924 ixgbe_error(ixgbe, 3925 "Problem: Network adapter has been stopped due to a " 3926 "overtemperature event being detected."); 3927 ixgbe_error(ixgbe, 3928 "Action: Shut down or restart the computer. If the issue " 3929 "persists, please take action in accordance with the " 3930 "recommendations from your system vendor."); 3931 } 3932 3933 mutex_exit(&ixgbe->gen_lock); 3934 } 3935 3936 /* 3937 * ixgbe_link_timer - timer for link status detection 3938 */ 3939 static void 3940 ixgbe_link_timer(void *arg) 3941 { 3942 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3943 3944 mutex_enter(&ixgbe->gen_lock); 3945 ixgbe_driver_link_check(ixgbe); 3946 mutex_exit(&ixgbe->gen_lock); 3947 } 3948 3949 /* 3950 * ixgbe_local_timer - Driver watchdog function. 3951 * 3952 * This function will handle the transmit stall check and other routines. 3953 */ 3954 static void 3955 ixgbe_local_timer(void *arg) 3956 { 3957 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3958 3959 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP) 3960 goto out; 3961 3962 if (ixgbe->ixgbe_state & IXGBE_ERROR) { 3963 ixgbe->reset_count++; 3964 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3965 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3966 goto out; 3967 } 3968 3969 if (ixgbe_stall_check(ixgbe)) { 3970 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL); 3971 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3972 3973 ixgbe->reset_count++; 3974 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3975 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3976 } 3977 3978 out: 3979 ixgbe_restart_watchdog_timer(ixgbe); 3980 } 3981 3982 /* 3983 * ixgbe_stall_check - Check for transmit stall. 3984 * 3985 * This function checks if the adapter is stalled (in transmit). 3986 * 3987 * It is called each time the watchdog timeout is invoked. 3988 * If the transmit descriptor reclaim continuously fails, 3989 * the watchdog value will increment by 1. If the watchdog 3990 * value exceeds the threshold, the ixgbe is assumed to 3991 * have stalled and need to be reset. 3992 */ 3993 static boolean_t 3994 ixgbe_stall_check(ixgbe_t *ixgbe) 3995 { 3996 ixgbe_tx_ring_t *tx_ring; 3997 boolean_t result; 3998 int i; 3999 4000 if (ixgbe->link_state != LINK_STATE_UP) 4001 return (B_FALSE); 4002 4003 /* 4004 * If any tx ring is stalled, we'll reset the chipset 4005 */ 4006 result = B_FALSE; 4007 for (i = 0; i < ixgbe->num_tx_rings; i++) { 4008 tx_ring = &ixgbe->tx_rings[i]; 4009 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) { 4010 tx_ring->tx_recycle(tx_ring); 4011 } 4012 4013 if (tx_ring->recycle_fail > 0) 4014 tx_ring->stall_watchdog++; 4015 else 4016 tx_ring->stall_watchdog = 0; 4017 4018 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 4019 result = B_TRUE; 4020 break; 4021 } 4022 } 4023 4024 if (result) { 4025 tx_ring->stall_watchdog = 0; 4026 tx_ring->recycle_fail = 0; 4027 } 4028 4029 return (result); 4030 } 4031 4032 4033 /* 4034 * is_valid_mac_addr - Check if the mac address is valid. 4035 */ 4036 static boolean_t 4037 is_valid_mac_addr(uint8_t *mac_addr) 4038 { 4039 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 4040 const uint8_t addr_test2[6] = 4041 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4042 4043 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 4044 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 4045 return (B_FALSE); 4046 4047 return (B_TRUE); 4048 } 4049 4050 static boolean_t 4051 ixgbe_find_mac_address(ixgbe_t *ixgbe) 4052 { 4053 #ifdef __sparc 4054 struct ixgbe_hw *hw = &ixgbe->hw; 4055 uchar_t *bytes; 4056 struct ether_addr sysaddr; 4057 uint_t nelts; 4058 int err; 4059 boolean_t found = B_FALSE; 4060 4061 /* 4062 * The "vendor's factory-set address" may already have 4063 * been extracted from the chip, but if the property 4064 * "local-mac-address" is set we use that instead. 4065 * 4066 * We check whether it looks like an array of 6 4067 * bytes (which it should, if OBP set it). If we can't 4068 * make sense of it this way, we'll ignore it. 4069 */ 4070 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4071 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 4072 if (err == DDI_PROP_SUCCESS) { 4073 if (nelts == ETHERADDRL) { 4074 while (nelts--) 4075 hw->mac.addr[nelts] = bytes[nelts]; 4076 found = B_TRUE; 4077 } 4078 ddi_prop_free(bytes); 4079 } 4080 4081 /* 4082 * Look up the OBP property "local-mac-address?". If the user has set 4083 * 'local-mac-address? = false', use "the system address" instead. 4084 */ 4085 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 4086 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 4087 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 4088 if (localetheraddr(NULL, &sysaddr) != 0) { 4089 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 4090 found = B_TRUE; 4091 } 4092 } 4093 ddi_prop_free(bytes); 4094 } 4095 4096 /* 4097 * Finally(!), if there's a valid "mac-address" property (created 4098 * if we netbooted from this interface), we must use this instead 4099 * of any of the above to ensure that the NFS/install server doesn't 4100 * get confused by the address changing as illumos takes over! 4101 */ 4102 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4103 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 4104 if (err == DDI_PROP_SUCCESS) { 4105 if (nelts == ETHERADDRL) { 4106 while (nelts--) 4107 hw->mac.addr[nelts] = bytes[nelts]; 4108 found = B_TRUE; 4109 } 4110 ddi_prop_free(bytes); 4111 } 4112 4113 if (found) { 4114 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 4115 return (B_TRUE); 4116 } 4117 #else 4118 _NOTE(ARGUNUSED(ixgbe)); 4119 #endif 4120 4121 return (B_TRUE); 4122 } 4123 4124 #pragma inline(ixgbe_arm_watchdog_timer) 4125 static void 4126 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 4127 { 4128 /* 4129 * Fire a watchdog timer 4130 */ 4131 ixgbe->watchdog_tid = 4132 timeout(ixgbe_local_timer, 4133 (void *)ixgbe, 1 * drv_usectohz(1000000)); 4134 4135 } 4136 4137 /* 4138 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 4139 */ 4140 void 4141 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 4142 { 4143 mutex_enter(&ixgbe->watchdog_lock); 4144 4145 if (!ixgbe->watchdog_enable) { 4146 ixgbe->watchdog_enable = B_TRUE; 4147 ixgbe->watchdog_start = B_TRUE; 4148 ixgbe_arm_watchdog_timer(ixgbe); 4149 } 4150 4151 mutex_exit(&ixgbe->watchdog_lock); 4152 } 4153 4154 /* 4155 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 4156 */ 4157 void 4158 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 4159 { 4160 timeout_id_t tid; 4161 4162 mutex_enter(&ixgbe->watchdog_lock); 4163 4164 ixgbe->watchdog_enable = B_FALSE; 4165 ixgbe->watchdog_start = B_FALSE; 4166 tid = ixgbe->watchdog_tid; 4167 ixgbe->watchdog_tid = 0; 4168 4169 mutex_exit(&ixgbe->watchdog_lock); 4170 4171 if (tid != 0) 4172 (void) untimeout(tid); 4173 } 4174 4175 /* 4176 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 4177 */ 4178 void 4179 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 4180 { 4181 mutex_enter(&ixgbe->watchdog_lock); 4182 4183 if (ixgbe->watchdog_enable) { 4184 if (!ixgbe->watchdog_start) { 4185 ixgbe->watchdog_start = B_TRUE; 4186 ixgbe_arm_watchdog_timer(ixgbe); 4187 } 4188 } 4189 4190 mutex_exit(&ixgbe->watchdog_lock); 4191 } 4192 4193 /* 4194 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 4195 */ 4196 static void 4197 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 4198 { 4199 mutex_enter(&ixgbe->watchdog_lock); 4200 4201 if (ixgbe->watchdog_start) 4202 ixgbe_arm_watchdog_timer(ixgbe); 4203 4204 mutex_exit(&ixgbe->watchdog_lock); 4205 } 4206 4207 /* 4208 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 4209 */ 4210 void 4211 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 4212 { 4213 timeout_id_t tid; 4214 4215 mutex_enter(&ixgbe->watchdog_lock); 4216 4217 ixgbe->watchdog_start = B_FALSE; 4218 tid = ixgbe->watchdog_tid; 4219 ixgbe->watchdog_tid = 0; 4220 4221 mutex_exit(&ixgbe->watchdog_lock); 4222 4223 if (tid != 0) 4224 (void) untimeout(tid); 4225 } 4226 4227 /* 4228 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 4229 */ 4230 static void 4231 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 4232 { 4233 struct ixgbe_hw *hw = &ixgbe->hw; 4234 4235 /* 4236 * mask all interrupts off 4237 */ 4238 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 4239 4240 /* 4241 * for MSI-X, also disable autoclear 4242 */ 4243 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4244 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 4245 } 4246 4247 IXGBE_WRITE_FLUSH(hw); 4248 } 4249 4250 /* 4251 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 4252 */ 4253 static void 4254 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 4255 { 4256 struct ixgbe_hw *hw = &ixgbe->hw; 4257 uint32_t eiac, eiam; 4258 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 4259 4260 /* interrupt types to enable */ 4261 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 4262 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 4263 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */ 4264 4265 /* enable automask on "other" causes that this adapter can generate */ 4266 eiam = ixgbe->capab->other_intr; 4267 4268 /* 4269 * msi-x mode 4270 */ 4271 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4272 /* enable autoclear but not on bits 29:20 */ 4273 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR); 4274 4275 /* general purpose interrupt enable */ 4276 gpie |= (IXGBE_GPIE_MSIX_MODE 4277 | IXGBE_GPIE_PBA_SUPPORT 4278 | IXGBE_GPIE_OCD 4279 | IXGBE_GPIE_EIAME); 4280 /* 4281 * non-msi-x mode 4282 */ 4283 } else { 4284 4285 /* disable autoclear, leave gpie at default */ 4286 eiac = 0; 4287 4288 /* 4289 * General purpose interrupt enable. 4290 * For 82599, X540 and X550, extended interrupt 4291 * automask enable only in MSI or MSI-X mode 4292 */ 4293 if ((hw->mac.type == ixgbe_mac_82598EB) || 4294 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) { 4295 gpie |= IXGBE_GPIE_EIAME; 4296 } 4297 } 4298 4299 /* Enable specific "other" interrupt types */ 4300 switch (hw->mac.type) { 4301 case ixgbe_mac_82598EB: 4302 gpie |= ixgbe->capab->other_gpie; 4303 break; 4304 4305 case ixgbe_mac_82599EB: 4306 case ixgbe_mac_X540: 4307 case ixgbe_mac_X550: 4308 case ixgbe_mac_X550EM_x: 4309 gpie |= ixgbe->capab->other_gpie; 4310 4311 /* Enable RSC Delay 8us when LRO enabled */ 4312 if (ixgbe->lro_enable) { 4313 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT); 4314 } 4315 break; 4316 4317 default: 4318 break; 4319 } 4320 4321 /* write to interrupt control registers */ 4322 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4323 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 4324 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam); 4325 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 4326 IXGBE_WRITE_FLUSH(hw); 4327 } 4328 4329 /* 4330 * ixgbe_loopback_ioctl - Loopback support. 4331 */ 4332 enum ioc_reply 4333 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 4334 { 4335 lb_info_sz_t *lbsp; 4336 lb_property_t *lbpp; 4337 uint32_t *lbmp; 4338 uint32_t size; 4339 uint32_t value; 4340 4341 if (mp->b_cont == NULL) 4342 return (IOC_INVAL); 4343 4344 switch (iocp->ioc_cmd) { 4345 default: 4346 return (IOC_INVAL); 4347 4348 case LB_GET_INFO_SIZE: 4349 size = sizeof (lb_info_sz_t); 4350 if (iocp->ioc_count != size) 4351 return (IOC_INVAL); 4352 4353 value = sizeof (lb_normal); 4354 value += sizeof (lb_mac); 4355 value += sizeof (lb_external); 4356 4357 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 4358 *lbsp = value; 4359 break; 4360 4361 case LB_GET_INFO: 4362 value = sizeof (lb_normal); 4363 value += sizeof (lb_mac); 4364 value += sizeof (lb_external); 4365 4366 size = value; 4367 if (iocp->ioc_count != size) 4368 return (IOC_INVAL); 4369 4370 value = 0; 4371 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 4372 4373 lbpp[value++] = lb_normal; 4374 lbpp[value++] = lb_mac; 4375 lbpp[value++] = lb_external; 4376 break; 4377 4378 case LB_GET_MODE: 4379 size = sizeof (uint32_t); 4380 if (iocp->ioc_count != size) 4381 return (IOC_INVAL); 4382 4383 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4384 *lbmp = ixgbe->loopback_mode; 4385 break; 4386 4387 case LB_SET_MODE: 4388 size = 0; 4389 if (iocp->ioc_count != sizeof (uint32_t)) 4390 return (IOC_INVAL); 4391 4392 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4393 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 4394 return (IOC_INVAL); 4395 break; 4396 } 4397 4398 iocp->ioc_count = size; 4399 iocp->ioc_error = 0; 4400 4401 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4402 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4403 return (IOC_INVAL); 4404 } 4405 4406 return (IOC_REPLY); 4407 } 4408 4409 /* 4410 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 4411 */ 4412 static boolean_t 4413 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 4414 { 4415 if (mode == ixgbe->loopback_mode) 4416 return (B_TRUE); 4417 4418 ixgbe->loopback_mode = mode; 4419 4420 if (mode == IXGBE_LB_NONE) { 4421 /* 4422 * Reset the chip 4423 */ 4424 (void) ixgbe_reset(ixgbe); 4425 return (B_TRUE); 4426 } 4427 4428 mutex_enter(&ixgbe->gen_lock); 4429 4430 switch (mode) { 4431 default: 4432 mutex_exit(&ixgbe->gen_lock); 4433 return (B_FALSE); 4434 4435 case IXGBE_LB_EXTERNAL: 4436 break; 4437 4438 case IXGBE_LB_INTERNAL_MAC: 4439 ixgbe_set_internal_mac_loopback(ixgbe); 4440 break; 4441 } 4442 4443 mutex_exit(&ixgbe->gen_lock); 4444 4445 return (B_TRUE); 4446 } 4447 4448 /* 4449 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 4450 */ 4451 static void 4452 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 4453 { 4454 struct ixgbe_hw *hw; 4455 uint32_t reg; 4456 uint8_t atlas; 4457 4458 hw = &ixgbe->hw; 4459 4460 /* 4461 * Setup MAC loopback 4462 */ 4463 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 4464 reg |= IXGBE_HLREG0_LPBK; 4465 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 4466 4467 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4468 reg &= ~IXGBE_AUTOC_LMS_MASK; 4469 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4470 4471 /* 4472 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 4473 */ 4474 switch (hw->mac.type) { 4475 case ixgbe_mac_82598EB: 4476 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4477 &atlas); 4478 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 4479 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4480 atlas); 4481 4482 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4483 &atlas); 4484 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 4485 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4486 atlas); 4487 4488 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4489 &atlas); 4490 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 4491 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4492 atlas); 4493 4494 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4495 &atlas); 4496 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 4497 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4498 atlas); 4499 break; 4500 4501 case ixgbe_mac_82599EB: 4502 case ixgbe_mac_X540: 4503 case ixgbe_mac_X550: 4504 case ixgbe_mac_X550EM_x: 4505 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4506 reg |= (IXGBE_AUTOC_FLU | 4507 IXGBE_AUTOC_10G_KX4); 4508 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4509 4510 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL, 4511 B_FALSE); 4512 break; 4513 4514 default: 4515 break; 4516 } 4517 } 4518 4519 #pragma inline(ixgbe_intr_rx_work) 4520 /* 4521 * ixgbe_intr_rx_work - RX processing of ISR. 4522 */ 4523 static void 4524 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 4525 { 4526 mblk_t *mp; 4527 4528 mutex_enter(&rx_ring->rx_lock); 4529 4530 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4531 mutex_exit(&rx_ring->rx_lock); 4532 4533 if (mp != NULL) 4534 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4535 rx_ring->ring_gen_num); 4536 } 4537 4538 #pragma inline(ixgbe_intr_tx_work) 4539 /* 4540 * ixgbe_intr_tx_work - TX processing of ISR. 4541 */ 4542 static void 4543 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 4544 { 4545 ixgbe_t *ixgbe = tx_ring->ixgbe; 4546 4547 /* 4548 * Recycle the tx descriptors 4549 */ 4550 tx_ring->tx_recycle(tx_ring); 4551 4552 /* 4553 * Schedule the re-transmit 4554 */ 4555 if (tx_ring->reschedule && 4556 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) { 4557 tx_ring->reschedule = B_FALSE; 4558 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl, 4559 tx_ring->ring_handle); 4560 tx_ring->stat_reschedule++; 4561 } 4562 } 4563 4564 #pragma inline(ixgbe_intr_other_work) 4565 /* 4566 * ixgbe_intr_other_work - Process interrupt types other than tx/rx 4567 */ 4568 static void 4569 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr) 4570 { 4571 struct ixgbe_hw *hw = &ixgbe->hw; 4572 4573 ASSERT(mutex_owned(&ixgbe->gen_lock)); 4574 4575 /* 4576 * handle link status change 4577 */ 4578 if (eicr & IXGBE_EICR_LSC) { 4579 ixgbe_driver_link_check(ixgbe); 4580 ixgbe_get_hw_state(ixgbe); 4581 } 4582 4583 /* 4584 * check for fan failure on adapters with fans 4585 */ 4586 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 4587 (eicr & IXGBE_EICR_GPI_SDP1)) { 4588 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 4589 4590 /* 4591 * Disable the adapter interrupts 4592 */ 4593 ixgbe_disable_adapter_interrupts(ixgbe); 4594 4595 /* 4596 * Disable Rx/Tx units 4597 */ 4598 (void) ixgbe_stop_adapter(&ixgbe->hw); 4599 4600 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 4601 ixgbe_error(ixgbe, 4602 "Problem: Network adapter has been stopped " 4603 "because the fan has stopped.\n"); 4604 ixgbe_error(ixgbe, 4605 "Action: Replace the adapter.\n"); 4606 4607 /* re-enable the interrupt, which was automasked */ 4608 ixgbe->eims |= IXGBE_EICR_GPI_SDP1; 4609 } 4610 4611 /* 4612 * Do SFP check for adapters with hot-plug capability 4613 */ 4614 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) && 4615 ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) || 4616 (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) { 4617 ixgbe->eicr = eicr; 4618 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq, 4619 ixgbe_sfp_check, (void *)ixgbe, 4620 DDI_NOSLEEP)) != DDI_SUCCESS) { 4621 ixgbe_log(ixgbe, "No memory available to dispatch " 4622 "taskq for SFP check"); 4623 } 4624 } 4625 4626 /* 4627 * Do over-temperature check for adapters with temp sensor 4628 */ 4629 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) && 4630 ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) || 4631 (eicr & IXGBE_EICR_LSC))) { 4632 ixgbe->eicr = eicr; 4633 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq, 4634 ixgbe_overtemp_check, (void *)ixgbe, 4635 DDI_NOSLEEP)) != DDI_SUCCESS) { 4636 ixgbe_log(ixgbe, "No memory available to dispatch " 4637 "taskq for overtemp check"); 4638 } 4639 } 4640 4641 /* 4642 * Process an external PHY interrupt 4643 */ 4644 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 4645 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 4646 ixgbe->eicr = eicr; 4647 if ((ddi_taskq_dispatch(ixgbe->phy_taskq, 4648 ixgbe_phy_check, (void *)ixgbe, 4649 DDI_NOSLEEP)) != DDI_SUCCESS) { 4650 ixgbe_log(ixgbe, "No memory available to dispatch " 4651 "taskq for PHY check"); 4652 } 4653 } 4654 } 4655 4656 /* 4657 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 4658 */ 4659 static uint_t 4660 ixgbe_intr_legacy(void *arg1, void *arg2) 4661 { 4662 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4663 struct ixgbe_hw *hw = &ixgbe->hw; 4664 ixgbe_tx_ring_t *tx_ring; 4665 ixgbe_rx_ring_t *rx_ring; 4666 uint32_t eicr; 4667 mblk_t *mp; 4668 boolean_t tx_reschedule; 4669 uint_t result; 4670 4671 _NOTE(ARGUNUSED(arg2)); 4672 4673 mutex_enter(&ixgbe->gen_lock); 4674 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 4675 mutex_exit(&ixgbe->gen_lock); 4676 return (DDI_INTR_UNCLAIMED); 4677 } 4678 4679 mp = NULL; 4680 tx_reschedule = B_FALSE; 4681 4682 /* 4683 * Any bit set in eicr: claim this interrupt 4684 */ 4685 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4686 4687 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4688 mutex_exit(&ixgbe->gen_lock); 4689 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4690 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4691 return (DDI_INTR_CLAIMED); 4692 } 4693 4694 if (eicr) { 4695 /* 4696 * For legacy interrupt, we have only one interrupt, 4697 * so we have only one rx ring and one tx ring enabled. 4698 */ 4699 ASSERT(ixgbe->num_rx_rings == 1); 4700 ASSERT(ixgbe->num_tx_rings == 1); 4701 4702 /* 4703 * For legacy interrupt, rx rings[0] will use RTxQ[0]. 4704 */ 4705 if (eicr & 0x1) { 4706 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE; 4707 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4708 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4709 /* 4710 * Clean the rx descriptors 4711 */ 4712 rx_ring = &ixgbe->rx_rings[0]; 4713 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4714 } 4715 4716 /* 4717 * For legacy interrupt, tx rings[0] will use RTxQ[1]. 4718 */ 4719 if (eicr & 0x2) { 4720 /* 4721 * Recycle the tx descriptors 4722 */ 4723 tx_ring = &ixgbe->tx_rings[0]; 4724 tx_ring->tx_recycle(tx_ring); 4725 4726 /* 4727 * Schedule the re-transmit 4728 */ 4729 tx_reschedule = (tx_ring->reschedule && 4730 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)); 4731 } 4732 4733 /* any interrupt type other than tx/rx */ 4734 if (eicr & ixgbe->capab->other_intr) { 4735 switch (hw->mac.type) { 4736 case ixgbe_mac_82598EB: 4737 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4738 break; 4739 4740 case ixgbe_mac_82599EB: 4741 case ixgbe_mac_X540: 4742 case ixgbe_mac_X550: 4743 case ixgbe_mac_X550EM_x: 4744 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4745 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4746 break; 4747 4748 default: 4749 break; 4750 } 4751 ixgbe_intr_other_work(ixgbe, eicr); 4752 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4753 } 4754 4755 mutex_exit(&ixgbe->gen_lock); 4756 4757 result = DDI_INTR_CLAIMED; 4758 } else { 4759 mutex_exit(&ixgbe->gen_lock); 4760 4761 /* 4762 * No interrupt cause bits set: don't claim this interrupt. 4763 */ 4764 result = DDI_INTR_UNCLAIMED; 4765 } 4766 4767 /* re-enable the interrupts which were automasked */ 4768 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4769 4770 /* 4771 * Do the following work outside of the gen_lock 4772 */ 4773 if (mp != NULL) { 4774 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4775 rx_ring->ring_gen_num); 4776 } 4777 4778 if (tx_reschedule) { 4779 tx_ring->reschedule = B_FALSE; 4780 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle); 4781 tx_ring->stat_reschedule++; 4782 } 4783 4784 return (result); 4785 } 4786 4787 /* 4788 * ixgbe_intr_msi - Interrupt handler for MSI. 4789 */ 4790 static uint_t 4791 ixgbe_intr_msi(void *arg1, void *arg2) 4792 { 4793 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4794 struct ixgbe_hw *hw = &ixgbe->hw; 4795 uint32_t eicr; 4796 4797 _NOTE(ARGUNUSED(arg2)); 4798 4799 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4800 4801 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4802 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4803 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4804 return (DDI_INTR_CLAIMED); 4805 } 4806 4807 /* 4808 * For MSI interrupt, we have only one vector, 4809 * so we have only one rx ring and one tx ring enabled. 4810 */ 4811 ASSERT(ixgbe->num_rx_rings == 1); 4812 ASSERT(ixgbe->num_tx_rings == 1); 4813 4814 /* 4815 * For MSI interrupt, rx rings[0] will use RTxQ[0]. 4816 */ 4817 if (eicr & 0x1) { 4818 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 4819 } 4820 4821 /* 4822 * For MSI interrupt, tx rings[0] will use RTxQ[1]. 4823 */ 4824 if (eicr & 0x2) { 4825 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 4826 } 4827 4828 /* any interrupt type other than tx/rx */ 4829 if (eicr & ixgbe->capab->other_intr) { 4830 mutex_enter(&ixgbe->gen_lock); 4831 switch (hw->mac.type) { 4832 case ixgbe_mac_82598EB: 4833 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4834 break; 4835 4836 case ixgbe_mac_82599EB: 4837 case ixgbe_mac_X540: 4838 case ixgbe_mac_X550: 4839 case ixgbe_mac_X550EM_x: 4840 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4841 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4842 break; 4843 4844 default: 4845 break; 4846 } 4847 ixgbe_intr_other_work(ixgbe, eicr); 4848 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4849 mutex_exit(&ixgbe->gen_lock); 4850 } 4851 4852 /* re-enable the interrupts which were automasked */ 4853 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4854 4855 return (DDI_INTR_CLAIMED); 4856 } 4857 4858 /* 4859 * ixgbe_intr_msix - Interrupt handler for MSI-X. 4860 */ 4861 static uint_t 4862 ixgbe_intr_msix(void *arg1, void *arg2) 4863 { 4864 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1; 4865 ixgbe_t *ixgbe = vect->ixgbe; 4866 struct ixgbe_hw *hw = &ixgbe->hw; 4867 uint32_t eicr; 4868 int r_idx = 0; 4869 4870 _NOTE(ARGUNUSED(arg2)); 4871 4872 /* 4873 * Clean each rx ring that has its bit set in the map 4874 */ 4875 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 4876 while (r_idx >= 0) { 4877 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 4878 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 4879 (ixgbe->num_rx_rings - 1)); 4880 } 4881 4882 /* 4883 * Clean each tx ring that has its bit set in the map 4884 */ 4885 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1)); 4886 while (r_idx >= 0) { 4887 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]); 4888 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 4889 (ixgbe->num_tx_rings - 1)); 4890 } 4891 4892 4893 /* 4894 * Clean other interrupt (link change) that has its bit set in the map 4895 */ 4896 if (BT_TEST(vect->other_map, 0) == 1) { 4897 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4898 4899 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != 4900 DDI_FM_OK) { 4901 ddi_fm_service_impact(ixgbe->dip, 4902 DDI_SERVICE_DEGRADED); 4903 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4904 return (DDI_INTR_CLAIMED); 4905 } 4906 4907 /* 4908 * Check "other" cause bits: any interrupt type other than tx/rx 4909 */ 4910 if (eicr & ixgbe->capab->other_intr) { 4911 mutex_enter(&ixgbe->gen_lock); 4912 switch (hw->mac.type) { 4913 case ixgbe_mac_82598EB: 4914 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4915 ixgbe_intr_other_work(ixgbe, eicr); 4916 break; 4917 4918 case ixgbe_mac_82599EB: 4919 case ixgbe_mac_X540: 4920 case ixgbe_mac_X550: 4921 case ixgbe_mac_X550EM_x: 4922 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4923 ixgbe_intr_other_work(ixgbe, eicr); 4924 break; 4925 4926 default: 4927 break; 4928 } 4929 mutex_exit(&ixgbe->gen_lock); 4930 } 4931 4932 /* re-enable the interrupts which were automasked */ 4933 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4934 } 4935 4936 return (DDI_INTR_CLAIMED); 4937 } 4938 4939 /* 4940 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 4941 * 4942 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 4943 * if not successful, try Legacy. 4944 * ixgbe->intr_force can be used to force sequence to start with 4945 * any of the 3 types. 4946 * If MSI-X is not used, number of tx/rx rings is forced to 1. 4947 */ 4948 static int 4949 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 4950 { 4951 dev_info_t *devinfo; 4952 int intr_types; 4953 int rc; 4954 4955 devinfo = ixgbe->dip; 4956 4957 /* 4958 * Get supported interrupt types 4959 */ 4960 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 4961 4962 if (rc != DDI_SUCCESS) { 4963 ixgbe_log(ixgbe, 4964 "Get supported interrupt types failed: %d", rc); 4965 return (IXGBE_FAILURE); 4966 } 4967 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 4968 4969 ixgbe->intr_type = 0; 4970 4971 /* 4972 * Install MSI-X interrupts 4973 */ 4974 if ((intr_types & DDI_INTR_TYPE_MSIX) && 4975 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 4976 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 4977 if (rc == IXGBE_SUCCESS) 4978 return (IXGBE_SUCCESS); 4979 4980 ixgbe_log(ixgbe, 4981 "Allocate MSI-X failed, trying MSI interrupts..."); 4982 } 4983 4984 /* 4985 * MSI-X not used, force rings and groups to 1 4986 */ 4987 ixgbe->num_rx_rings = 1; 4988 ixgbe->num_rx_groups = 1; 4989 ixgbe->num_tx_rings = 1; 4990 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 4991 ixgbe_log(ixgbe, 4992 "MSI-X not used, force rings and groups number to 1"); 4993 4994 /* 4995 * Install MSI interrupts 4996 */ 4997 if ((intr_types & DDI_INTR_TYPE_MSI) && 4998 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 4999 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 5000 if (rc == IXGBE_SUCCESS) 5001 return (IXGBE_SUCCESS); 5002 5003 ixgbe_log(ixgbe, 5004 "Allocate MSI failed, trying Legacy interrupts..."); 5005 } 5006 5007 /* 5008 * Install legacy interrupts 5009 */ 5010 if (intr_types & DDI_INTR_TYPE_FIXED) { 5011 /* 5012 * Disallow legacy interrupts for X550. X550 has a silicon 5013 * bug which prevents Shared Legacy interrupts from working. 5014 * For details, please reference: 5015 * 5016 * Intel Ethernet Controller X550 Specification Update rev. 2.1 5017 * May 2016, erratum 22: PCIe Interrupt Status Bit 5018 */ 5019 if (ixgbe->hw.mac.type == ixgbe_mac_X550 || 5020 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x || 5021 ixgbe->hw.mac.type == ixgbe_mac_X550_vf || 5022 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x_vf) { 5023 ixgbe_log(ixgbe, 5024 "Legacy interrupts are not supported on this " 5025 "adapter. Please use MSI or MSI-X instead."); 5026 return (IXGBE_FAILURE); 5027 } 5028 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 5029 if (rc == IXGBE_SUCCESS) 5030 return (IXGBE_SUCCESS); 5031 5032 ixgbe_log(ixgbe, 5033 "Allocate Legacy interrupts failed"); 5034 } 5035 5036 /* 5037 * If none of the 3 types succeeded, return failure 5038 */ 5039 return (IXGBE_FAILURE); 5040 } 5041 5042 /* 5043 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 5044 * 5045 * For legacy and MSI, only 1 handle is needed. For MSI-X, 5046 * if fewer than 2 handles are available, return failure. 5047 * Upon success, this maps the vectors to rx and tx rings for 5048 * interrupts. 5049 */ 5050 static int 5051 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 5052 { 5053 dev_info_t *devinfo; 5054 int request, count, actual; 5055 int minimum; 5056 int rc; 5057 uint32_t ring_per_group; 5058 5059 devinfo = ixgbe->dip; 5060 5061 switch (intr_type) { 5062 case DDI_INTR_TYPE_FIXED: 5063 request = 1; /* Request 1 legacy interrupt handle */ 5064 minimum = 1; 5065 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 5066 break; 5067 5068 case DDI_INTR_TYPE_MSI: 5069 request = 1; /* Request 1 MSI interrupt handle */ 5070 minimum = 1; 5071 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 5072 break; 5073 5074 case DDI_INTR_TYPE_MSIX: 5075 /* 5076 * Best number of vectors for the adapter is 5077 * (# rx rings + # tx rings), however we will 5078 * limit the request number. 5079 */ 5080 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings); 5081 if (request > ixgbe->capab->max_ring_vect) 5082 request = ixgbe->capab->max_ring_vect; 5083 minimum = 1; 5084 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 5085 break; 5086 5087 default: 5088 ixgbe_log(ixgbe, 5089 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 5090 intr_type); 5091 return (IXGBE_FAILURE); 5092 } 5093 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 5094 request, minimum); 5095 5096 /* 5097 * Get number of supported interrupts 5098 */ 5099 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 5100 if ((rc != DDI_SUCCESS) || (count < minimum)) { 5101 ixgbe_log(ixgbe, 5102 "Get interrupt number failed. Return: %d, count: %d", 5103 rc, count); 5104 return (IXGBE_FAILURE); 5105 } 5106 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 5107 5108 actual = 0; 5109 ixgbe->intr_cnt = 0; 5110 ixgbe->intr_cnt_max = 0; 5111 ixgbe->intr_cnt_min = 0; 5112 5113 /* 5114 * Allocate an array of interrupt handles 5115 */ 5116 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 5117 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 5118 5119 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 5120 request, &actual, DDI_INTR_ALLOC_NORMAL); 5121 if (rc != DDI_SUCCESS) { 5122 ixgbe_log(ixgbe, "Allocate interrupts failed. " 5123 "return: %d, request: %d, actual: %d", 5124 rc, request, actual); 5125 goto alloc_handle_fail; 5126 } 5127 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 5128 5129 /* 5130 * upper/lower limit of interrupts 5131 */ 5132 ixgbe->intr_cnt = actual; 5133 ixgbe->intr_cnt_max = request; 5134 ixgbe->intr_cnt_min = minimum; 5135 5136 /* 5137 * rss number per group should not exceed the rx interrupt number, 5138 * else need to adjust rx ring number. 5139 */ 5140 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5141 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0); 5142 if (actual < ring_per_group) { 5143 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual; 5144 ixgbe_setup_vmdq_rss_conf(ixgbe); 5145 } 5146 5147 /* 5148 * Now we know the actual number of vectors. Here we map the vector 5149 * to other, rx rings and tx ring. 5150 */ 5151 if (actual < minimum) { 5152 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 5153 actual); 5154 goto alloc_handle_fail; 5155 } 5156 5157 /* 5158 * Get priority for first vector, assume remaining are all the same 5159 */ 5160 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 5161 if (rc != DDI_SUCCESS) { 5162 ixgbe_log(ixgbe, 5163 "Get interrupt priority failed: %d", rc); 5164 goto alloc_handle_fail; 5165 } 5166 5167 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 5168 if (rc != DDI_SUCCESS) { 5169 ixgbe_log(ixgbe, 5170 "Get interrupt cap failed: %d", rc); 5171 goto alloc_handle_fail; 5172 } 5173 5174 ixgbe->intr_type = intr_type; 5175 5176 return (IXGBE_SUCCESS); 5177 5178 alloc_handle_fail: 5179 ixgbe_rem_intrs(ixgbe); 5180 5181 return (IXGBE_FAILURE); 5182 } 5183 5184 /* 5185 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 5186 * 5187 * Before adding the interrupt handlers, the interrupt vectors have 5188 * been allocated, and the rx/tx rings have also been allocated. 5189 */ 5190 static int 5191 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 5192 { 5193 int vector = 0; 5194 int rc; 5195 5196 switch (ixgbe->intr_type) { 5197 case DDI_INTR_TYPE_MSIX: 5198 /* 5199 * Add interrupt handler for all vectors 5200 */ 5201 for (vector = 0; vector < ixgbe->intr_cnt; vector++) { 5202 /* 5203 * install pointer to vect_map[vector] 5204 */ 5205 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5206 (ddi_intr_handler_t *)ixgbe_intr_msix, 5207 (void *)&ixgbe->vect_map[vector], NULL); 5208 5209 if (rc != DDI_SUCCESS) { 5210 ixgbe_log(ixgbe, 5211 "Add interrupt handler failed. " 5212 "return: %d, vector: %d", rc, vector); 5213 for (vector--; vector >= 0; vector--) { 5214 (void) ddi_intr_remove_handler( 5215 ixgbe->htable[vector]); 5216 } 5217 return (IXGBE_FAILURE); 5218 } 5219 } 5220 5221 break; 5222 5223 case DDI_INTR_TYPE_MSI: 5224 /* 5225 * Add interrupt handlers for the only vector 5226 */ 5227 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5228 (ddi_intr_handler_t *)ixgbe_intr_msi, 5229 (void *)ixgbe, NULL); 5230 5231 if (rc != DDI_SUCCESS) { 5232 ixgbe_log(ixgbe, 5233 "Add MSI interrupt handler failed: %d", rc); 5234 return (IXGBE_FAILURE); 5235 } 5236 5237 break; 5238 5239 case DDI_INTR_TYPE_FIXED: 5240 /* 5241 * Add interrupt handlers for the only vector 5242 */ 5243 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5244 (ddi_intr_handler_t *)ixgbe_intr_legacy, 5245 (void *)ixgbe, NULL); 5246 5247 if (rc != DDI_SUCCESS) { 5248 ixgbe_log(ixgbe, 5249 "Add legacy interrupt handler failed: %d", rc); 5250 return (IXGBE_FAILURE); 5251 } 5252 5253 break; 5254 5255 default: 5256 return (IXGBE_FAILURE); 5257 } 5258 5259 return (IXGBE_SUCCESS); 5260 } 5261 5262 #pragma inline(ixgbe_map_rxring_to_vector) 5263 /* 5264 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 5265 */ 5266 static void 5267 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 5268 { 5269 /* 5270 * Set bit in map 5271 */ 5272 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 5273 5274 /* 5275 * Count bits set 5276 */ 5277 ixgbe->vect_map[v_idx].rxr_cnt++; 5278 5279 /* 5280 * Remember bit position 5281 */ 5282 ixgbe->rx_rings[r_idx].intr_vector = v_idx; 5283 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 5284 } 5285 5286 #pragma inline(ixgbe_map_txring_to_vector) 5287 /* 5288 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 5289 */ 5290 static void 5291 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 5292 { 5293 /* 5294 * Set bit in map 5295 */ 5296 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 5297 5298 /* 5299 * Count bits set 5300 */ 5301 ixgbe->vect_map[v_idx].txr_cnt++; 5302 5303 /* 5304 * Remember bit position 5305 */ 5306 ixgbe->tx_rings[t_idx].intr_vector = v_idx; 5307 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 5308 } 5309 5310 /* 5311 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector 5312 * allocation register (IVAR). 5313 * cause: 5314 * -1 : other cause 5315 * 0 : rx 5316 * 1 : tx 5317 */ 5318 static void 5319 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector, 5320 int8_t cause) 5321 { 5322 struct ixgbe_hw *hw = &ixgbe->hw; 5323 u32 ivar, index; 5324 5325 switch (hw->mac.type) { 5326 case ixgbe_mac_82598EB: 5327 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5328 if (cause == -1) { 5329 cause = 0; 5330 } 5331 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5332 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5333 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3))); 5334 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3))); 5335 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5336 break; 5337 5338 case ixgbe_mac_82599EB: 5339 case ixgbe_mac_X540: 5340 case ixgbe_mac_X550: 5341 case ixgbe_mac_X550EM_x: 5342 if (cause == -1) { 5343 /* other causes */ 5344 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5345 index = (intr_alloc_entry & 1) * 8; 5346 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5347 ivar &= ~(0xFF << index); 5348 ivar |= (msix_vector << index); 5349 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5350 } else { 5351 /* tx or rx causes */ 5352 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5353 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5354 ivar = IXGBE_READ_REG(hw, 5355 IXGBE_IVAR(intr_alloc_entry >> 1)); 5356 ivar &= ~(0xFF << index); 5357 ivar |= (msix_vector << index); 5358 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5359 ivar); 5360 } 5361 break; 5362 5363 default: 5364 break; 5365 } 5366 } 5367 5368 /* 5369 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of 5370 * given interrupt vector allocation register (IVAR). 5371 * cause: 5372 * -1 : other cause 5373 * 0 : rx 5374 * 1 : tx 5375 */ 5376 static void 5377 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5378 { 5379 struct ixgbe_hw *hw = &ixgbe->hw; 5380 u32 ivar, index; 5381 5382 switch (hw->mac.type) { 5383 case ixgbe_mac_82598EB: 5384 if (cause == -1) { 5385 cause = 0; 5386 } 5387 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5388 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5389 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 * 5390 (intr_alloc_entry & 0x3))); 5391 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5392 break; 5393 5394 case ixgbe_mac_82599EB: 5395 case ixgbe_mac_X540: 5396 case ixgbe_mac_X550: 5397 case ixgbe_mac_X550EM_x: 5398 if (cause == -1) { 5399 /* other causes */ 5400 index = (intr_alloc_entry & 1) * 8; 5401 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5402 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5403 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5404 } else { 5405 /* tx or rx causes */ 5406 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5407 ivar = IXGBE_READ_REG(hw, 5408 IXGBE_IVAR(intr_alloc_entry >> 1)); 5409 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5410 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5411 ivar); 5412 } 5413 break; 5414 5415 default: 5416 break; 5417 } 5418 } 5419 5420 /* 5421 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of 5422 * given interrupt vector allocation register (IVAR). 5423 * cause: 5424 * -1 : other cause 5425 * 0 : rx 5426 * 1 : tx 5427 */ 5428 static void 5429 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5430 { 5431 struct ixgbe_hw *hw = &ixgbe->hw; 5432 u32 ivar, index; 5433 5434 switch (hw->mac.type) { 5435 case ixgbe_mac_82598EB: 5436 if (cause == -1) { 5437 cause = 0; 5438 } 5439 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5440 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5441 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 * 5442 (intr_alloc_entry & 0x3))); 5443 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5444 break; 5445 5446 case ixgbe_mac_82599EB: 5447 case ixgbe_mac_X540: 5448 case ixgbe_mac_X550: 5449 case ixgbe_mac_X550EM_x: 5450 if (cause == -1) { 5451 /* other causes */ 5452 index = (intr_alloc_entry & 1) * 8; 5453 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5454 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5455 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5456 } else { 5457 /* tx or rx causes */ 5458 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5459 ivar = IXGBE_READ_REG(hw, 5460 IXGBE_IVAR(intr_alloc_entry >> 1)); 5461 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5462 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5463 ivar); 5464 } 5465 break; 5466 5467 default: 5468 break; 5469 } 5470 } 5471 5472 /* 5473 * Convert the rx ring index driver maintained to the rx ring index 5474 * in h/w. 5475 */ 5476 static uint32_t 5477 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index) 5478 { 5479 5480 struct ixgbe_hw *hw = &ixgbe->hw; 5481 uint32_t rx_ring_per_group, hw_rx_index; 5482 5483 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS || 5484 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) { 5485 return (sw_rx_index); 5486 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) { 5487 switch (hw->mac.type) { 5488 case ixgbe_mac_82598EB: 5489 return (sw_rx_index); 5490 5491 case ixgbe_mac_82599EB: 5492 case ixgbe_mac_X540: 5493 case ixgbe_mac_X550: 5494 case ixgbe_mac_X550EM_x: 5495 return (sw_rx_index * 2); 5496 5497 default: 5498 break; 5499 } 5500 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) { 5501 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5502 5503 switch (hw->mac.type) { 5504 case ixgbe_mac_82598EB: 5505 hw_rx_index = (sw_rx_index / rx_ring_per_group) * 5506 16 + (sw_rx_index % rx_ring_per_group); 5507 return (hw_rx_index); 5508 5509 case ixgbe_mac_82599EB: 5510 case ixgbe_mac_X540: 5511 case ixgbe_mac_X550: 5512 case ixgbe_mac_X550EM_x: 5513 if (ixgbe->num_rx_groups > 32) { 5514 hw_rx_index = (sw_rx_index / 5515 rx_ring_per_group) * 2 + 5516 (sw_rx_index % rx_ring_per_group); 5517 } else { 5518 hw_rx_index = (sw_rx_index / 5519 rx_ring_per_group) * 4 + 5520 (sw_rx_index % rx_ring_per_group); 5521 } 5522 return (hw_rx_index); 5523 5524 default: 5525 break; 5526 } 5527 } 5528 5529 /* 5530 * Should never reach. Just to make compiler happy. 5531 */ 5532 return (sw_rx_index); 5533 } 5534 5535 /* 5536 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors. 5537 * 5538 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt 5539 * to vector[0 - (intr_cnt -1)]. 5540 */ 5541 static int 5542 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe) 5543 { 5544 int i, vector = 0; 5545 5546 /* initialize vector map */ 5547 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 5548 for (i = 0; i < ixgbe->intr_cnt; i++) { 5549 ixgbe->vect_map[i].ixgbe = ixgbe; 5550 } 5551 5552 /* 5553 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0], 5554 * tx rings[0] on RTxQ[1]. 5555 */ 5556 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5557 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 5558 ixgbe_map_txring_to_vector(ixgbe, 0, 1); 5559 return (IXGBE_SUCCESS); 5560 } 5561 5562 /* 5563 * Interrupts/vectors mapping for MSI-X 5564 */ 5565 5566 /* 5567 * Map other interrupt to vector 0, 5568 * Set bit in map and count the bits set. 5569 */ 5570 BT_SET(ixgbe->vect_map[vector].other_map, 0); 5571 ixgbe->vect_map[vector].other_cnt++; 5572 5573 /* 5574 * Map rx ring interrupts to vectors 5575 */ 5576 for (i = 0; i < ixgbe->num_rx_rings; i++) { 5577 ixgbe_map_rxring_to_vector(ixgbe, i, vector); 5578 vector = (vector +1) % ixgbe->intr_cnt; 5579 } 5580 5581 /* 5582 * Map tx ring interrupts to vectors 5583 */ 5584 for (i = 0; i < ixgbe->num_tx_rings; i++) { 5585 ixgbe_map_txring_to_vector(ixgbe, i, vector); 5586 vector = (vector +1) % ixgbe->intr_cnt; 5587 } 5588 5589 return (IXGBE_SUCCESS); 5590 } 5591 5592 /* 5593 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 5594 * 5595 * This relies on ring/vector mapping already set up in the 5596 * vect_map[] structures 5597 */ 5598 static void 5599 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 5600 { 5601 struct ixgbe_hw *hw = &ixgbe->hw; 5602 ixgbe_intr_vector_t *vect; /* vector bitmap */ 5603 int r_idx; /* ring index */ 5604 int v_idx; /* vector index */ 5605 uint32_t hw_index; 5606 5607 /* 5608 * Clear any previous entries 5609 */ 5610 switch (hw->mac.type) { 5611 case ixgbe_mac_82598EB: 5612 for (v_idx = 0; v_idx < 25; v_idx++) 5613 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5614 break; 5615 5616 case ixgbe_mac_82599EB: 5617 case ixgbe_mac_X540: 5618 case ixgbe_mac_X550: 5619 case ixgbe_mac_X550EM_x: 5620 for (v_idx = 0; v_idx < 64; v_idx++) 5621 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5622 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0); 5623 break; 5624 5625 default: 5626 break; 5627 } 5628 5629 /* 5630 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and 5631 * tx rings[0] will use RTxQ[1]. 5632 */ 5633 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5634 ixgbe_setup_ivar(ixgbe, 0, 0, 0); 5635 ixgbe_setup_ivar(ixgbe, 0, 1, 1); 5636 return; 5637 } 5638 5639 /* 5640 * For MSI-X interrupt, "Other" is always on vector[0]. 5641 */ 5642 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1); 5643 5644 /* 5645 * For each interrupt vector, populate the IVAR table 5646 */ 5647 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 5648 vect = &ixgbe->vect_map[v_idx]; 5649 5650 /* 5651 * For each rx ring bit set 5652 */ 5653 r_idx = bt_getlowbit(vect->rx_map, 0, 5654 (ixgbe->num_rx_rings - 1)); 5655 5656 while (r_idx >= 0) { 5657 hw_index = ixgbe->rx_rings[r_idx].hw_index; 5658 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0); 5659 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 5660 (ixgbe->num_rx_rings - 1)); 5661 } 5662 5663 /* 5664 * For each tx ring bit set 5665 */ 5666 r_idx = bt_getlowbit(vect->tx_map, 0, 5667 (ixgbe->num_tx_rings - 1)); 5668 5669 while (r_idx >= 0) { 5670 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1); 5671 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 5672 (ixgbe->num_tx_rings - 1)); 5673 } 5674 } 5675 } 5676 5677 /* 5678 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 5679 */ 5680 static void 5681 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 5682 { 5683 int i; 5684 int rc; 5685 5686 for (i = 0; i < ixgbe->intr_cnt; i++) { 5687 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 5688 if (rc != DDI_SUCCESS) { 5689 IXGBE_DEBUGLOG_1(ixgbe, 5690 "Remove intr handler failed: %d", rc); 5691 } 5692 } 5693 } 5694 5695 /* 5696 * ixgbe_rem_intrs - Remove the allocated interrupts. 5697 */ 5698 static void 5699 ixgbe_rem_intrs(ixgbe_t *ixgbe) 5700 { 5701 int i; 5702 int rc; 5703 5704 for (i = 0; i < ixgbe->intr_cnt; i++) { 5705 rc = ddi_intr_free(ixgbe->htable[i]); 5706 if (rc != DDI_SUCCESS) { 5707 IXGBE_DEBUGLOG_1(ixgbe, 5708 "Free intr failed: %d", rc); 5709 } 5710 } 5711 5712 kmem_free(ixgbe->htable, ixgbe->intr_size); 5713 ixgbe->htable = NULL; 5714 } 5715 5716 /* 5717 * ixgbe_enable_intrs - Enable all the ddi interrupts. 5718 */ 5719 static int 5720 ixgbe_enable_intrs(ixgbe_t *ixgbe) 5721 { 5722 int i; 5723 int rc; 5724 5725 /* 5726 * Enable interrupts 5727 */ 5728 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5729 /* 5730 * Call ddi_intr_block_enable() for MSI 5731 */ 5732 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 5733 if (rc != DDI_SUCCESS) { 5734 ixgbe_log(ixgbe, 5735 "Enable block intr failed: %d", rc); 5736 return (IXGBE_FAILURE); 5737 } 5738 } else { 5739 /* 5740 * Call ddi_intr_enable() for Legacy/MSI non block enable 5741 */ 5742 for (i = 0; i < ixgbe->intr_cnt; i++) { 5743 rc = ddi_intr_enable(ixgbe->htable[i]); 5744 if (rc != DDI_SUCCESS) { 5745 ixgbe_log(ixgbe, 5746 "Enable intr failed: %d", rc); 5747 return (IXGBE_FAILURE); 5748 } 5749 } 5750 } 5751 5752 return (IXGBE_SUCCESS); 5753 } 5754 5755 /* 5756 * ixgbe_disable_intrs - Disable all the interrupts. 5757 */ 5758 static int 5759 ixgbe_disable_intrs(ixgbe_t *ixgbe) 5760 { 5761 int i; 5762 int rc; 5763 5764 /* 5765 * Disable all interrupts 5766 */ 5767 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5768 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 5769 if (rc != DDI_SUCCESS) { 5770 ixgbe_log(ixgbe, 5771 "Disable block intr failed: %d", rc); 5772 return (IXGBE_FAILURE); 5773 } 5774 } else { 5775 for (i = 0; i < ixgbe->intr_cnt; i++) { 5776 rc = ddi_intr_disable(ixgbe->htable[i]); 5777 if (rc != DDI_SUCCESS) { 5778 ixgbe_log(ixgbe, 5779 "Disable intr failed: %d", rc); 5780 return (IXGBE_FAILURE); 5781 } 5782 } 5783 } 5784 5785 return (IXGBE_SUCCESS); 5786 } 5787 5788 /* 5789 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 5790 */ 5791 static void 5792 ixgbe_get_hw_state(ixgbe_t *ixgbe) 5793 { 5794 struct ixgbe_hw *hw = &ixgbe->hw; 5795 ixgbe_link_speed speed = 0; 5796 boolean_t link_up = B_FALSE; 5797 uint32_t pcs1g_anlp = 0; 5798 5799 ASSERT(mutex_owned(&ixgbe->gen_lock)); 5800 ixgbe->param_lp_1000fdx_cap = 0; 5801 ixgbe->param_lp_100fdx_cap = 0; 5802 5803 /* check for link, don't wait */ 5804 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 5805 5806 /* 5807 * Update the observed Link Partner's capabilities. Not all adapters 5808 * can provide full information on the LP's capable speeds, so we 5809 * provide what we can. 5810 */ 5811 if (link_up) { 5812 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 5813 5814 ixgbe->param_lp_1000fdx_cap = 5815 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5816 ixgbe->param_lp_100fdx_cap = 5817 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5818 } 5819 5820 /* 5821 * Update GLD's notion of the adapter's currently advertised speeds. 5822 * Since the common code doesn't always record the current autonegotiate 5823 * settings in the phy struct for all parts (specifically, adapters with 5824 * SFPs) we first test to see if it is 0, and if so, we fall back to 5825 * using the adapter's speed capabilities which we saved during instance 5826 * init in ixgbe_init_params(). 5827 * 5828 * Adapters with SFPs will always be shown as advertising all of their 5829 * supported speeds, and adapters with baseT PHYs (where the phy struct 5830 * is maintained by the common code) will always have a factual view of 5831 * their currently-advertised speeds. In the case of SFPs, this is 5832 * acceptable as we default to advertising all speeds that the adapter 5833 * claims to support, and those properties are immutable; unlike on 5834 * baseT (copper) PHYs, where speeds can be enabled or disabled at will. 5835 */ 5836 speed = hw->phy.autoneg_advertised; 5837 if (speed == 0) 5838 speed = ixgbe->speeds_supported; 5839 5840 ixgbe->param_adv_10000fdx_cap = 5841 (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 1 : 0; 5842 ixgbe->param_adv_5000fdx_cap = 5843 (speed & IXGBE_LINK_SPEED_5GB_FULL) ? 1 : 0; 5844 ixgbe->param_adv_2500fdx_cap = 5845 (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 1 : 0; 5846 ixgbe->param_adv_1000fdx_cap = 5847 (speed & IXGBE_LINK_SPEED_1GB_FULL) ? 1 : 0; 5848 ixgbe->param_adv_100fdx_cap = 5849 (speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0; 5850 } 5851 5852 /* 5853 * ixgbe_get_driver_control - Notify that driver is in control of device. 5854 */ 5855 static void 5856 ixgbe_get_driver_control(struct ixgbe_hw *hw) 5857 { 5858 uint32_t ctrl_ext; 5859 5860 /* 5861 * Notify firmware that driver is in control of device 5862 */ 5863 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5864 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 5865 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5866 } 5867 5868 /* 5869 * ixgbe_release_driver_control - Notify that driver is no longer in control 5870 * of device. 5871 */ 5872 static void 5873 ixgbe_release_driver_control(struct ixgbe_hw *hw) 5874 { 5875 uint32_t ctrl_ext; 5876 5877 /* 5878 * Notify firmware that driver is no longer in control of device 5879 */ 5880 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5881 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 5882 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5883 } 5884 5885 /* 5886 * ixgbe_atomic_reserve - Atomic decrease operation. 5887 */ 5888 int 5889 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 5890 { 5891 uint32_t oldval; 5892 uint32_t newval; 5893 5894 /* 5895 * ATOMICALLY 5896 */ 5897 do { 5898 oldval = *count_p; 5899 if (oldval < n) 5900 return (-1); 5901 newval = oldval - n; 5902 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 5903 5904 return (newval); 5905 } 5906 5907 /* 5908 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 5909 */ 5910 static uint8_t * 5911 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 5912 { 5913 uint8_t *addr = *upd_ptr; 5914 uint8_t *new_ptr; 5915 5916 _NOTE(ARGUNUSED(hw)); 5917 _NOTE(ARGUNUSED(vmdq)); 5918 5919 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 5920 *upd_ptr = new_ptr; 5921 return (addr); 5922 } 5923 5924 /* 5925 * FMA support 5926 */ 5927 int 5928 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 5929 { 5930 ddi_fm_error_t de; 5931 5932 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5933 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 5934 return (de.fme_status); 5935 } 5936 5937 int 5938 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 5939 { 5940 ddi_fm_error_t de; 5941 5942 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5943 return (de.fme_status); 5944 } 5945 5946 /* 5947 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 5948 */ 5949 static int 5950 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5951 { 5952 _NOTE(ARGUNUSED(impl_data)); 5953 /* 5954 * as the driver can always deal with an error in any dma or 5955 * access handle, we can just return the fme_status value. 5956 */ 5957 pci_ereport_post(dip, err, NULL); 5958 return (err->fme_status); 5959 } 5960 5961 static void 5962 ixgbe_fm_init(ixgbe_t *ixgbe) 5963 { 5964 ddi_iblock_cookie_t iblk; 5965 int fma_dma_flag; 5966 5967 /* 5968 * Only register with IO Fault Services if we have some capability 5969 */ 5970 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 5971 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5972 } else { 5973 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5974 } 5975 5976 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 5977 fma_dma_flag = 1; 5978 } else { 5979 fma_dma_flag = 0; 5980 } 5981 5982 ixgbe_set_fma_flags(fma_dma_flag); 5983 5984 if (ixgbe->fm_capabilities) { 5985 5986 /* 5987 * Register capabilities with IO Fault Services 5988 */ 5989 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 5990 5991 /* 5992 * Initialize pci ereport capabilities if ereport capable 5993 */ 5994 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5995 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5996 pci_ereport_setup(ixgbe->dip); 5997 5998 /* 5999 * Register error callback if error callback capable 6000 */ 6001 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 6002 ddi_fm_handler_register(ixgbe->dip, 6003 ixgbe_fm_error_cb, (void*) ixgbe); 6004 } 6005 } 6006 6007 static void 6008 ixgbe_fm_fini(ixgbe_t *ixgbe) 6009 { 6010 /* 6011 * Only unregister FMA capabilities if they are registered 6012 */ 6013 if (ixgbe->fm_capabilities) { 6014 6015 /* 6016 * Release any resources allocated by pci_ereport_setup() 6017 */ 6018 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 6019 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 6020 pci_ereport_teardown(ixgbe->dip); 6021 6022 /* 6023 * Un-register error callback if error callback capable 6024 */ 6025 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 6026 ddi_fm_handler_unregister(ixgbe->dip); 6027 6028 /* 6029 * Unregister from IO Fault Service 6030 */ 6031 ddi_fm_fini(ixgbe->dip); 6032 } 6033 } 6034 6035 void 6036 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 6037 { 6038 uint64_t ena; 6039 char buf[FM_MAX_CLASS]; 6040 6041 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6042 ena = fm_ena_generate(0, FM_ENA_FMT1); 6043 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 6044 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 6045 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6046 } 6047 } 6048 6049 static int 6050 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 6051 { 6052 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh; 6053 6054 mutex_enter(&rx_ring->rx_lock); 6055 rx_ring->ring_gen_num = mr_gen_num; 6056 mutex_exit(&rx_ring->rx_lock); 6057 return (0); 6058 } 6059 6060 /* 6061 * Get the global ring index by a ring index within a group. 6062 */ 6063 static int 6064 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex) 6065 { 6066 ixgbe_rx_ring_t *rx_ring; 6067 int i; 6068 6069 for (i = 0; i < ixgbe->num_rx_rings; i++) { 6070 rx_ring = &ixgbe->rx_rings[i]; 6071 if (rx_ring->group_index == gindex) 6072 rindex--; 6073 if (rindex < 0) 6074 return (i); 6075 } 6076 6077 return (-1); 6078 } 6079 6080 /* 6081 * Callback funtion for MAC layer to register all rings. 6082 */ 6083 /* ARGSUSED */ 6084 void 6085 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index, 6086 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 6087 { 6088 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6089 mac_intr_t *mintr = &infop->mri_intr; 6090 6091 switch (rtype) { 6092 case MAC_RING_TYPE_RX: { 6093 /* 6094 * 'index' is the ring index within the group. 6095 * Need to get the global ring index by searching in groups. 6096 */ 6097 int global_ring_index = ixgbe_get_rx_ring_index( 6098 ixgbe, group_index, ring_index); 6099 6100 ASSERT(global_ring_index >= 0); 6101 6102 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index]; 6103 rx_ring->ring_handle = rh; 6104 6105 infop->mri_driver = (mac_ring_driver_t)rx_ring; 6106 infop->mri_start = ixgbe_ring_start; 6107 infop->mri_stop = NULL; 6108 infop->mri_poll = ixgbe_ring_rx_poll; 6109 infop->mri_stat = ixgbe_rx_ring_stat; 6110 6111 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 6112 mintr->mi_enable = ixgbe_rx_ring_intr_enable; 6113 mintr->mi_disable = ixgbe_rx_ring_intr_disable; 6114 if (ixgbe->intr_type & 6115 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6116 mintr->mi_ddi_handle = 6117 ixgbe->htable[rx_ring->intr_vector]; 6118 } 6119 6120 break; 6121 } 6122 case MAC_RING_TYPE_TX: { 6123 ASSERT(group_index == -1); 6124 ASSERT(ring_index < ixgbe->num_tx_rings); 6125 6126 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index]; 6127 tx_ring->ring_handle = rh; 6128 6129 infop->mri_driver = (mac_ring_driver_t)tx_ring; 6130 infop->mri_start = NULL; 6131 infop->mri_stop = NULL; 6132 infop->mri_tx = ixgbe_ring_tx; 6133 infop->mri_stat = ixgbe_tx_ring_stat; 6134 if (ixgbe->intr_type & 6135 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6136 mintr->mi_ddi_handle = 6137 ixgbe->htable[tx_ring->intr_vector]; 6138 } 6139 break; 6140 } 6141 default: 6142 break; 6143 } 6144 } 6145 6146 /* 6147 * Callback funtion for MAC layer to register all groups. 6148 */ 6149 void 6150 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index, 6151 mac_group_info_t *infop, mac_group_handle_t gh) 6152 { 6153 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6154 6155 switch (rtype) { 6156 case MAC_RING_TYPE_RX: { 6157 ixgbe_rx_group_t *rx_group; 6158 6159 rx_group = &ixgbe->rx_groups[index]; 6160 rx_group->group_handle = gh; 6161 6162 infop->mgi_driver = (mac_group_driver_t)rx_group; 6163 infop->mgi_start = NULL; 6164 infop->mgi_stop = NULL; 6165 infop->mgi_addmac = ixgbe_addmac; 6166 infop->mgi_remmac = ixgbe_remmac; 6167 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups); 6168 6169 break; 6170 } 6171 case MAC_RING_TYPE_TX: 6172 break; 6173 default: 6174 break; 6175 } 6176 } 6177 6178 /* 6179 * Enable interrupt on the specificed rx ring. 6180 */ 6181 int 6182 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh) 6183 { 6184 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6185 ixgbe_t *ixgbe = rx_ring->ixgbe; 6186 int r_idx = rx_ring->index; 6187 int hw_r_idx = rx_ring->hw_index; 6188 int v_idx = rx_ring->intr_vector; 6189 6190 mutex_enter(&ixgbe->gen_lock); 6191 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6192 mutex_exit(&ixgbe->gen_lock); 6193 /* 6194 * Simply return 0. 6195 * Interrupts are being adjusted. ixgbe_intr_adjust() 6196 * will eventually re-enable the interrupt when it's 6197 * done with the adjustment. 6198 */ 6199 return (0); 6200 } 6201 6202 /* 6203 * To enable interrupt by setting the VAL bit of given interrupt 6204 * vector allocation register (IVAR). 6205 */ 6206 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0); 6207 6208 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 6209 6210 /* 6211 * Trigger a Rx interrupt on this ring 6212 */ 6213 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx)); 6214 IXGBE_WRITE_FLUSH(&ixgbe->hw); 6215 6216 mutex_exit(&ixgbe->gen_lock); 6217 6218 return (0); 6219 } 6220 6221 /* 6222 * Disable interrupt on the specificed rx ring. 6223 */ 6224 int 6225 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh) 6226 { 6227 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6228 ixgbe_t *ixgbe = rx_ring->ixgbe; 6229 int r_idx = rx_ring->index; 6230 int hw_r_idx = rx_ring->hw_index; 6231 int v_idx = rx_ring->intr_vector; 6232 6233 mutex_enter(&ixgbe->gen_lock); 6234 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6235 mutex_exit(&ixgbe->gen_lock); 6236 /* 6237 * Simply return 0. 6238 * In the rare case where an interrupt is being 6239 * disabled while interrupts are being adjusted, 6240 * we don't fail the operation. No interrupts will 6241 * be generated while they are adjusted, and 6242 * ixgbe_intr_adjust() will cause the interrupts 6243 * to be re-enabled once it completes. Note that 6244 * in this case, packets may be delivered to the 6245 * stack via interrupts before xgbe_rx_ring_intr_enable() 6246 * is called again. This is acceptable since interrupt 6247 * adjustment is infrequent, and the stack will be 6248 * able to handle these packets. 6249 */ 6250 return (0); 6251 } 6252 6253 /* 6254 * To disable interrupt by clearing the VAL bit of given interrupt 6255 * vector allocation register (IVAR). 6256 */ 6257 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0); 6258 6259 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx); 6260 6261 mutex_exit(&ixgbe->gen_lock); 6262 6263 return (0); 6264 } 6265 6266 /* 6267 * Add a mac address. 6268 */ 6269 static int 6270 ixgbe_addmac(void *arg, const uint8_t *mac_addr) 6271 { 6272 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6273 ixgbe_t *ixgbe = rx_group->ixgbe; 6274 struct ixgbe_hw *hw = &ixgbe->hw; 6275 int slot, i; 6276 6277 mutex_enter(&ixgbe->gen_lock); 6278 6279 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6280 mutex_exit(&ixgbe->gen_lock); 6281 return (ECANCELED); 6282 } 6283 6284 if (ixgbe->unicst_avail == 0) { 6285 /* no slots available */ 6286 mutex_exit(&ixgbe->gen_lock); 6287 return (ENOSPC); 6288 } 6289 6290 /* 6291 * The first ixgbe->num_rx_groups slots are reserved for each respective 6292 * group. The rest slots are shared by all groups. While adding a 6293 * MAC address, reserved slots are firstly checked then the shared 6294 * slots are searched. 6295 */ 6296 slot = -1; 6297 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) { 6298 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) { 6299 if (ixgbe->unicst_addr[i].mac.set == 0) { 6300 slot = i; 6301 break; 6302 } 6303 } 6304 } else { 6305 slot = rx_group->index; 6306 } 6307 6308 if (slot == -1) { 6309 /* no slots available */ 6310 mutex_exit(&ixgbe->gen_lock); 6311 return (ENOSPC); 6312 } 6313 6314 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6315 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr, 6316 rx_group->index, IXGBE_RAH_AV); 6317 ixgbe->unicst_addr[slot].mac.set = 1; 6318 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index; 6319 ixgbe->unicst_avail--; 6320 6321 mutex_exit(&ixgbe->gen_lock); 6322 6323 return (0); 6324 } 6325 6326 /* 6327 * Remove a mac address. 6328 */ 6329 static int 6330 ixgbe_remmac(void *arg, const uint8_t *mac_addr) 6331 { 6332 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6333 ixgbe_t *ixgbe = rx_group->ixgbe; 6334 struct ixgbe_hw *hw = &ixgbe->hw; 6335 int slot; 6336 6337 mutex_enter(&ixgbe->gen_lock); 6338 6339 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6340 mutex_exit(&ixgbe->gen_lock); 6341 return (ECANCELED); 6342 } 6343 6344 slot = ixgbe_unicst_find(ixgbe, mac_addr); 6345 if (slot == -1) { 6346 mutex_exit(&ixgbe->gen_lock); 6347 return (EINVAL); 6348 } 6349 6350 if (ixgbe->unicst_addr[slot].mac.set == 0) { 6351 mutex_exit(&ixgbe->gen_lock); 6352 return (EINVAL); 6353 } 6354 6355 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6356 (void) ixgbe_clear_rar(hw, slot); 6357 ixgbe->unicst_addr[slot].mac.set = 0; 6358 ixgbe->unicst_avail++; 6359 6360 mutex_exit(&ixgbe->gen_lock); 6361 6362 return (0); 6363 } 6364