1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 28 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved. 31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved. 32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved. 33 */ 34 35 #include "ixgbe_sw.h" 36 37 static char ixgbe_ident[] = "Intel 10Gb Ethernet"; 38 39 /* 40 * Local function protoypes 41 */ 42 static int ixgbe_register_mac(ixgbe_t *); 43 static int ixgbe_identify_hardware(ixgbe_t *); 44 static int ixgbe_regs_map(ixgbe_t *); 45 static void ixgbe_init_properties(ixgbe_t *); 46 static int ixgbe_init_driver_settings(ixgbe_t *); 47 static void ixgbe_init_locks(ixgbe_t *); 48 static void ixgbe_destroy_locks(ixgbe_t *); 49 static int ixgbe_init(ixgbe_t *); 50 static int ixgbe_chip_start(ixgbe_t *); 51 static void ixgbe_chip_stop(ixgbe_t *); 52 static int ixgbe_reset(ixgbe_t *); 53 static void ixgbe_tx_clean(ixgbe_t *); 54 static boolean_t ixgbe_tx_drain(ixgbe_t *); 55 static boolean_t ixgbe_rx_drain(ixgbe_t *); 56 static int ixgbe_alloc_rings(ixgbe_t *); 57 static void ixgbe_free_rings(ixgbe_t *); 58 static int ixgbe_alloc_rx_data(ixgbe_t *); 59 static void ixgbe_free_rx_data(ixgbe_t *); 60 static void ixgbe_setup_rings(ixgbe_t *); 61 static void ixgbe_setup_rx(ixgbe_t *); 62 static void ixgbe_setup_tx(ixgbe_t *); 63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 65 static void ixgbe_setup_rss(ixgbe_t *); 66 static void ixgbe_setup_vmdq(ixgbe_t *); 67 static void ixgbe_setup_vmdq_rss(ixgbe_t *); 68 static void ixgbe_setup_rss_table(ixgbe_t *); 69 static void ixgbe_init_unicst(ixgbe_t *); 70 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); 71 static void ixgbe_setup_multicst(ixgbe_t *); 72 static void ixgbe_get_hw_state(ixgbe_t *); 73 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); 74 static void ixgbe_get_conf(ixgbe_t *); 75 static void ixgbe_init_params(ixgbe_t *); 76 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 77 static void ixgbe_driver_link_check(ixgbe_t *); 78 static void ixgbe_sfp_check(void *); 79 static void ixgbe_overtemp_check(void *); 80 static void ixgbe_phy_check(void *); 81 static void ixgbe_link_timer(void *); 82 static void ixgbe_local_timer(void *); 83 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 84 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 85 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 86 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 87 static boolean_t is_valid_mac_addr(uint8_t *); 88 static boolean_t ixgbe_stall_check(ixgbe_t *); 89 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 90 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 91 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 92 static int ixgbe_alloc_intrs(ixgbe_t *); 93 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 94 static int ixgbe_add_intr_handlers(ixgbe_t *); 95 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 96 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 97 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t); 98 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t); 99 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t); 100 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index); 101 static int ixgbe_map_intrs_to_vectors(ixgbe_t *); 102 static void ixgbe_setup_adapter_vector(ixgbe_t *); 103 static void ixgbe_rem_intr_handlers(ixgbe_t *); 104 static void ixgbe_rem_intrs(ixgbe_t *); 105 static int ixgbe_enable_intrs(ixgbe_t *); 106 static int ixgbe_disable_intrs(ixgbe_t *); 107 static uint_t ixgbe_intr_legacy(void *, void *); 108 static uint_t ixgbe_intr_msi(void *, void *); 109 static uint_t ixgbe_intr_msix(void *, void *); 110 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 111 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 112 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); 113 static void ixgbe_get_driver_control(struct ixgbe_hw *); 114 static int ixgbe_addmac(void *, const uint8_t *); 115 static int ixgbe_remmac(void *, const uint8_t *); 116 static void ixgbe_release_driver_control(struct ixgbe_hw *); 117 118 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 119 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 120 static int ixgbe_resume(dev_info_t *); 121 static int ixgbe_suspend(dev_info_t *); 122 static int ixgbe_quiesce(dev_info_t *); 123 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 124 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 125 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *); 126 static int ixgbe_intr_cb_register(ixgbe_t *); 127 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int); 128 129 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 130 const void *impl_data); 131 static void ixgbe_fm_init(ixgbe_t *); 132 static void ixgbe_fm_fini(ixgbe_t *); 133 134 char *ixgbe_priv_props[] = { 135 "_tx_copy_thresh", 136 "_tx_recycle_thresh", 137 "_tx_overload_thresh", 138 "_tx_resched_thresh", 139 "_rx_copy_thresh", 140 "_rx_limit_per_intr", 141 "_intr_throttling", 142 "_adv_pause_cap", 143 "_adv_asym_pause_cap", 144 NULL 145 }; 146 147 #define IXGBE_MAX_PRIV_PROPS \ 148 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t)) 149 150 static struct cb_ops ixgbe_cb_ops = { 151 nulldev, /* cb_open */ 152 nulldev, /* cb_close */ 153 nodev, /* cb_strategy */ 154 nodev, /* cb_print */ 155 nodev, /* cb_dump */ 156 nodev, /* cb_read */ 157 nodev, /* cb_write */ 158 nodev, /* cb_ioctl */ 159 nodev, /* cb_devmap */ 160 nodev, /* cb_mmap */ 161 nodev, /* cb_segmap */ 162 nochpoll, /* cb_chpoll */ 163 ddi_prop_op, /* cb_prop_op */ 164 NULL, /* cb_stream */ 165 D_MP | D_HOTPLUG, /* cb_flag */ 166 CB_REV, /* cb_rev */ 167 nodev, /* cb_aread */ 168 nodev /* cb_awrite */ 169 }; 170 171 static struct dev_ops ixgbe_dev_ops = { 172 DEVO_REV, /* devo_rev */ 173 0, /* devo_refcnt */ 174 NULL, /* devo_getinfo */ 175 nulldev, /* devo_identify */ 176 nulldev, /* devo_probe */ 177 ixgbe_attach, /* devo_attach */ 178 ixgbe_detach, /* devo_detach */ 179 nodev, /* devo_reset */ 180 &ixgbe_cb_ops, /* devo_cb_ops */ 181 NULL, /* devo_bus_ops */ 182 ddi_power, /* devo_power */ 183 ixgbe_quiesce, /* devo_quiesce */ 184 }; 185 186 static struct modldrv ixgbe_modldrv = { 187 &mod_driverops, /* Type of module. This one is a driver */ 188 ixgbe_ident, /* Discription string */ 189 &ixgbe_dev_ops /* driver ops */ 190 }; 191 192 static struct modlinkage ixgbe_modlinkage = { 193 MODREV_1, &ixgbe_modldrv, NULL 194 }; 195 196 /* 197 * Access attributes for register mapping 198 */ 199 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 200 DDI_DEVICE_ATTR_V1, 201 DDI_STRUCTURE_LE_ACC, 202 DDI_STRICTORDER_ACC, 203 DDI_FLAGERR_ACC 204 }; 205 206 /* 207 * Loopback property 208 */ 209 static lb_property_t lb_normal = { 210 normal, "normal", IXGBE_LB_NONE 211 }; 212 213 static lb_property_t lb_mac = { 214 internal, "MAC", IXGBE_LB_INTERNAL_MAC 215 }; 216 217 static lb_property_t lb_external = { 218 external, "External", IXGBE_LB_EXTERNAL 219 }; 220 221 #define IXGBE_M_CALLBACK_FLAGS \ 222 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 223 224 static mac_callbacks_t ixgbe_m_callbacks = { 225 IXGBE_M_CALLBACK_FLAGS, 226 ixgbe_m_stat, 227 ixgbe_m_start, 228 ixgbe_m_stop, 229 ixgbe_m_promisc, 230 ixgbe_m_multicst, 231 NULL, 232 NULL, 233 NULL, 234 ixgbe_m_ioctl, 235 ixgbe_m_getcapab, 236 NULL, 237 NULL, 238 ixgbe_m_setprop, 239 ixgbe_m_getprop, 240 ixgbe_m_propinfo 241 }; 242 243 /* 244 * Initialize capabilities of each supported adapter type 245 */ 246 static adapter_info_t ixgbe_82598eb_cap = { 247 64, /* maximum number of rx queues */ 248 1, /* minimum number of rx queues */ 249 64, /* default number of rx queues */ 250 16, /* maximum number of rx groups */ 251 1, /* minimum number of rx groups */ 252 1, /* default number of rx groups */ 253 32, /* maximum number of tx queues */ 254 1, /* minimum number of tx queues */ 255 8, /* default number of tx queues */ 256 16366, /* maximum MTU size */ 257 0xFFFF, /* maximum interrupt throttle rate */ 258 0, /* minimum interrupt throttle rate */ 259 200, /* default interrupt throttle rate */ 260 18, /* maximum total msix vectors */ 261 16, /* maximum number of ring vectors */ 262 2, /* maximum number of other vectors */ 263 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 264 0, /* "other" interrupt types enable mask */ 265 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 266 | IXGBE_FLAG_RSS_CAPABLE 267 | IXGBE_FLAG_VMDQ_CAPABLE) 268 }; 269 270 static adapter_info_t ixgbe_82599eb_cap = { 271 128, /* maximum number of rx queues */ 272 1, /* minimum number of rx queues */ 273 128, /* default number of rx queues */ 274 64, /* maximum number of rx groups */ 275 1, /* minimum number of rx groups */ 276 1, /* default number of rx groups */ 277 128, /* maximum number of tx queues */ 278 1, /* minimum number of tx queues */ 279 8, /* default number of tx queues */ 280 15500, /* maximum MTU size */ 281 0xFF8, /* maximum interrupt throttle rate */ 282 0, /* minimum interrupt throttle rate */ 283 200, /* default interrupt throttle rate */ 284 64, /* maximum total msix vectors */ 285 16, /* maximum number of ring vectors */ 286 2, /* maximum number of other vectors */ 287 (IXGBE_EICR_LSC 288 | IXGBE_EICR_GPI_SDP1 289 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */ 290 291 (IXGBE_SDP1_GPIEN 292 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */ 293 294 (IXGBE_FLAG_DCA_CAPABLE 295 | IXGBE_FLAG_RSS_CAPABLE 296 | IXGBE_FLAG_VMDQ_CAPABLE 297 | IXGBE_FLAG_RSC_CAPABLE 298 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */ 299 }; 300 301 static adapter_info_t ixgbe_X540_cap = { 302 128, /* maximum number of rx queues */ 303 1, /* minimum number of rx queues */ 304 128, /* default number of rx queues */ 305 64, /* maximum number of rx groups */ 306 1, /* minimum number of rx groups */ 307 1, /* default number of rx groups */ 308 128, /* maximum number of tx queues */ 309 1, /* minimum number of tx queues */ 310 8, /* default number of tx queues */ 311 15500, /* maximum MTU size */ 312 0xFF8, /* maximum interrupt throttle rate */ 313 0, /* minimum interrupt throttle rate */ 314 200, /* default interrupt throttle rate */ 315 64, /* maximum total msix vectors */ 316 16, /* maximum number of ring vectors */ 317 2, /* maximum number of other vectors */ 318 (IXGBE_EICR_LSC 319 | IXGBE_EICR_GPI_SDP1_X540 320 | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */ 321 322 (IXGBE_SDP1_GPIEN_X540 323 | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */ 324 325 (IXGBE_FLAG_DCA_CAPABLE 326 | IXGBE_FLAG_RSS_CAPABLE 327 | IXGBE_FLAG_VMDQ_CAPABLE 328 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 329 }; 330 331 static adapter_info_t ixgbe_X550_cap = { 332 128, /* maximum number of rx queues */ 333 1, /* minimum number of rx queues */ 334 128, /* default number of rx queues */ 335 64, /* maximum number of rx groups */ 336 1, /* minimum number of rx groups */ 337 1, /* default number of rx groups */ 338 128, /* maximum number of tx queues */ 339 1, /* minimum number of tx queues */ 340 8, /* default number of tx queues */ 341 15500, /* maximum MTU size */ 342 0xFF8, /* maximum interrupt throttle rate */ 343 0, /* minimum interrupt throttle rate */ 344 0x200, /* default interrupt throttle rate */ 345 64, /* maximum total msix vectors */ 346 16, /* maximum number of ring vectors */ 347 2, /* maximum number of other vectors */ 348 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 349 0, /* "other" interrupt types enable mask */ 350 (IXGBE_FLAG_RSS_CAPABLE 351 | IXGBE_FLAG_VMDQ_CAPABLE 352 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 353 }; 354 355 /* 356 * Module Initialization Functions. 357 */ 358 359 int 360 _init(void) 361 { 362 int status; 363 364 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 365 366 status = mod_install(&ixgbe_modlinkage); 367 368 if (status != DDI_SUCCESS) { 369 mac_fini_ops(&ixgbe_dev_ops); 370 } 371 372 return (status); 373 } 374 375 int 376 _fini(void) 377 { 378 int status; 379 380 status = mod_remove(&ixgbe_modlinkage); 381 382 if (status == DDI_SUCCESS) { 383 mac_fini_ops(&ixgbe_dev_ops); 384 } 385 386 return (status); 387 } 388 389 int 390 _info(struct modinfo *modinfop) 391 { 392 int status; 393 394 status = mod_info(&ixgbe_modlinkage, modinfop); 395 396 return (status); 397 } 398 399 /* 400 * ixgbe_attach - Driver attach. 401 * 402 * This function is the device specific initialization entry 403 * point. This entry point is required and must be written. 404 * The DDI_ATTACH command must be provided in the attach entry 405 * point. When attach() is called with cmd set to DDI_ATTACH, 406 * all normal kernel services (such as kmem_alloc(9F)) are 407 * available for use by the driver. 408 * 409 * The attach() function will be called once for each instance 410 * of the device on the system with cmd set to DDI_ATTACH. 411 * Until attach() succeeds, the only driver entry points which 412 * may be called are open(9E) and getinfo(9E). 413 */ 414 static int 415 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 416 { 417 ixgbe_t *ixgbe; 418 struct ixgbe_osdep *osdep; 419 struct ixgbe_hw *hw; 420 int instance; 421 char taskqname[32]; 422 423 /* 424 * Check the command and perform corresponding operations 425 */ 426 switch (cmd) { 427 default: 428 return (DDI_FAILURE); 429 430 case DDI_RESUME: 431 return (ixgbe_resume(devinfo)); 432 433 case DDI_ATTACH: 434 break; 435 } 436 437 /* Get the device instance */ 438 instance = ddi_get_instance(devinfo); 439 440 /* Allocate memory for the instance data structure */ 441 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 442 443 ixgbe->dip = devinfo; 444 ixgbe->instance = instance; 445 446 hw = &ixgbe->hw; 447 osdep = &ixgbe->osdep; 448 hw->back = osdep; 449 osdep->ixgbe = ixgbe; 450 451 /* Attach the instance pointer to the dev_info data structure */ 452 ddi_set_driver_private(devinfo, ixgbe); 453 454 /* 455 * Initialize for FMA support 456 */ 457 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 458 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 459 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 460 ixgbe_fm_init(ixgbe); 461 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 462 463 /* 464 * Map PCI config space registers 465 */ 466 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 467 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 468 goto attach_fail; 469 } 470 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 471 472 /* 473 * Identify the chipset family 474 */ 475 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 476 ixgbe_error(ixgbe, "Failed to identify hardware"); 477 goto attach_fail; 478 } 479 480 /* 481 * Map device registers 482 */ 483 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 484 ixgbe_error(ixgbe, "Failed to map device registers"); 485 goto attach_fail; 486 } 487 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 488 489 /* 490 * Initialize driver parameters 491 */ 492 ixgbe_init_properties(ixgbe); 493 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 494 495 /* 496 * Register interrupt callback 497 */ 498 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) { 499 ixgbe_error(ixgbe, "Failed to register interrupt callback"); 500 goto attach_fail; 501 } 502 503 /* 504 * Allocate interrupts 505 */ 506 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 507 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 508 goto attach_fail; 509 } 510 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 511 512 /* 513 * Allocate rx/tx rings based on the ring numbers. 514 * The actual numbers of rx/tx rings are decided by the number of 515 * allocated interrupt vectors, so we should allocate the rings after 516 * interrupts are allocated. 517 */ 518 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 519 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 520 goto attach_fail; 521 } 522 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 523 524 /* 525 * Map rings to interrupt vectors 526 */ 527 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 528 ixgbe_error(ixgbe, "Failed to map interrupts to vectors"); 529 goto attach_fail; 530 } 531 532 /* 533 * Add interrupt handlers 534 */ 535 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 536 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 537 goto attach_fail; 538 } 539 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 540 541 /* 542 * Create a taskq for sfp-change 543 */ 544 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance); 545 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname, 546 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 547 ixgbe_error(ixgbe, "sfp_taskq create failed"); 548 goto attach_fail; 549 } 550 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ; 551 552 /* 553 * Create a taskq for over-temp 554 */ 555 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance); 556 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname, 557 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 558 ixgbe_error(ixgbe, "overtemp_taskq create failed"); 559 goto attach_fail; 560 } 561 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ; 562 563 /* 564 * Create a taskq for processing external PHY interrupts 565 */ 566 (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance); 567 if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname, 568 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 569 ixgbe_error(ixgbe, "phy_taskq create failed"); 570 goto attach_fail; 571 } 572 ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ; 573 574 /* 575 * Initialize driver parameters 576 */ 577 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 578 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 579 goto attach_fail; 580 } 581 582 /* 583 * Initialize mutexes for this device. 584 * Do this before enabling the interrupt handler and 585 * register the softint to avoid the condition where 586 * interrupt handler can try using uninitialized mutex. 587 */ 588 ixgbe_init_locks(ixgbe); 589 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 590 591 /* 592 * Initialize chipset hardware 593 */ 594 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 595 ixgbe_error(ixgbe, "Failed to initialize adapter"); 596 goto attach_fail; 597 } 598 ixgbe->link_check_complete = B_FALSE; 599 ixgbe->link_check_hrtime = gethrtime() + 600 (IXGBE_LINK_UP_TIME * 100000000ULL); 601 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 602 603 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 604 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 605 goto attach_fail; 606 } 607 608 /* 609 * Initialize adapter capabilities 610 */ 611 ixgbe_init_params(ixgbe); 612 613 /* 614 * Initialize statistics 615 */ 616 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 617 ixgbe_error(ixgbe, "Failed to initialize statistics"); 618 goto attach_fail; 619 } 620 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 621 622 /* 623 * Register the driver to the MAC 624 */ 625 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 626 ixgbe_error(ixgbe, "Failed to register MAC"); 627 goto attach_fail; 628 } 629 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 630 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 631 632 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe, 633 IXGBE_CYCLIC_PERIOD, DDI_IPL_0); 634 if (ixgbe->periodic_id == 0) { 635 ixgbe_error(ixgbe, "Failed to add the link check timer"); 636 goto attach_fail; 637 } 638 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER; 639 640 /* 641 * Now that mutex locks are initialized, and the chip is also 642 * initialized, enable interrupts. 643 */ 644 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 645 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 646 goto attach_fail; 647 } 648 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 649 650 ixgbe_log(ixgbe, "%s", ixgbe_ident); 651 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED); 652 653 return (DDI_SUCCESS); 654 655 attach_fail: 656 ixgbe_unconfigure(devinfo, ixgbe); 657 return (DDI_FAILURE); 658 } 659 660 /* 661 * ixgbe_detach - Driver detach. 662 * 663 * The detach() function is the complement of the attach routine. 664 * If cmd is set to DDI_DETACH, detach() is used to remove the 665 * state associated with a given instance of a device node 666 * prior to the removal of that instance from the system. 667 * 668 * The detach() function will be called once for each instance 669 * of the device for which there has been a successful attach() 670 * once there are no longer any opens on the device. 671 * 672 * Interrupts routine are disabled, All memory allocated by this 673 * driver are freed. 674 */ 675 static int 676 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 677 { 678 ixgbe_t *ixgbe; 679 680 /* 681 * Check detach command 682 */ 683 switch (cmd) { 684 default: 685 return (DDI_FAILURE); 686 687 case DDI_SUSPEND: 688 return (ixgbe_suspend(devinfo)); 689 690 case DDI_DETACH: 691 break; 692 } 693 694 /* 695 * Get the pointer to the driver private data structure 696 */ 697 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 698 if (ixgbe == NULL) 699 return (DDI_FAILURE); 700 701 /* 702 * If the device is still running, it needs to be stopped first. 703 * This check is necessary because under some specific circumstances, 704 * the detach routine can be called without stopping the interface 705 * first. 706 */ 707 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 708 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 709 mutex_enter(&ixgbe->gen_lock); 710 ixgbe_stop(ixgbe, B_TRUE); 711 mutex_exit(&ixgbe->gen_lock); 712 /* Disable and stop the watchdog timer */ 713 ixgbe_disable_watchdog_timer(ixgbe); 714 } 715 716 /* 717 * Check if there are still rx buffers held by the upper layer. 718 * If so, fail the detach. 719 */ 720 if (!ixgbe_rx_drain(ixgbe)) 721 return (DDI_FAILURE); 722 723 /* 724 * Do the remaining unconfigure routines 725 */ 726 ixgbe_unconfigure(devinfo, ixgbe); 727 728 return (DDI_SUCCESS); 729 } 730 731 /* 732 * quiesce(9E) entry point. 733 * 734 * This function is called when the system is single-threaded at high 735 * PIL with preemption disabled. Therefore, this function must not be 736 * blocked. 737 * 738 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 739 * DDI_FAILURE indicates an error condition and should almost never happen. 740 */ 741 static int 742 ixgbe_quiesce(dev_info_t *devinfo) 743 { 744 ixgbe_t *ixgbe; 745 struct ixgbe_hw *hw; 746 747 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 748 749 if (ixgbe == NULL) 750 return (DDI_FAILURE); 751 752 hw = &ixgbe->hw; 753 754 /* 755 * Disable the adapter interrupts 756 */ 757 ixgbe_disable_adapter_interrupts(ixgbe); 758 759 /* 760 * Tell firmware driver is no longer in control 761 */ 762 ixgbe_release_driver_control(hw); 763 764 /* 765 * Reset the chipset 766 */ 767 (void) ixgbe_reset_hw(hw); 768 769 /* 770 * Reset PHY 771 */ 772 (void) ixgbe_reset_phy(hw); 773 774 return (DDI_SUCCESS); 775 } 776 777 static void 778 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 779 { 780 /* 781 * Disable interrupt 782 */ 783 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 784 (void) ixgbe_disable_intrs(ixgbe); 785 } 786 787 /* 788 * remove the link check timer 789 */ 790 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) { 791 if (ixgbe->periodic_id != NULL) { 792 ddi_periodic_delete(ixgbe->periodic_id); 793 ixgbe->periodic_id = NULL; 794 } 795 } 796 797 /* 798 * Unregister MAC 799 */ 800 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 801 (void) mac_unregister(ixgbe->mac_hdl); 802 } 803 804 /* 805 * Free statistics 806 */ 807 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 808 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 809 } 810 811 /* 812 * Remove interrupt handlers 813 */ 814 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 815 ixgbe_rem_intr_handlers(ixgbe); 816 } 817 818 /* 819 * Remove taskq for sfp-status-change 820 */ 821 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) { 822 ddi_taskq_destroy(ixgbe->sfp_taskq); 823 } 824 825 /* 826 * Remove taskq for over-temp 827 */ 828 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) { 829 ddi_taskq_destroy(ixgbe->overtemp_taskq); 830 } 831 832 /* 833 * Remove taskq for external PHYs 834 */ 835 if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) { 836 ddi_taskq_destroy(ixgbe->phy_taskq); 837 } 838 839 /* 840 * Remove interrupts 841 */ 842 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 843 ixgbe_rem_intrs(ixgbe); 844 } 845 846 /* 847 * Unregister interrupt callback handler 848 */ 849 if (ixgbe->cb_hdl != NULL) { 850 (void) ddi_cb_unregister(ixgbe->cb_hdl); 851 } 852 853 /* 854 * Remove driver properties 855 */ 856 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 857 (void) ddi_prop_remove_all(devinfo); 858 } 859 860 /* 861 * Stop the chipset 862 */ 863 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 864 mutex_enter(&ixgbe->gen_lock); 865 ixgbe_chip_stop(ixgbe); 866 mutex_exit(&ixgbe->gen_lock); 867 } 868 869 /* 870 * Free register handle 871 */ 872 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 873 if (ixgbe->osdep.reg_handle != NULL) 874 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 875 } 876 877 /* 878 * Free PCI config handle 879 */ 880 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 881 if (ixgbe->osdep.cfg_handle != NULL) 882 pci_config_teardown(&ixgbe->osdep.cfg_handle); 883 } 884 885 /* 886 * Free locks 887 */ 888 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 889 ixgbe_destroy_locks(ixgbe); 890 } 891 892 /* 893 * Free the rx/tx rings 894 */ 895 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 896 ixgbe_free_rings(ixgbe); 897 } 898 899 /* 900 * Unregister FMA capabilities 901 */ 902 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 903 ixgbe_fm_fini(ixgbe); 904 } 905 906 /* 907 * Free the driver data structure 908 */ 909 kmem_free(ixgbe, sizeof (ixgbe_t)); 910 911 ddi_set_driver_private(devinfo, NULL); 912 } 913 914 /* 915 * ixgbe_register_mac - Register the driver and its function pointers with 916 * the GLD interface. 917 */ 918 static int 919 ixgbe_register_mac(ixgbe_t *ixgbe) 920 { 921 struct ixgbe_hw *hw = &ixgbe->hw; 922 mac_register_t *mac; 923 int status; 924 925 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 926 return (IXGBE_FAILURE); 927 928 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 929 mac->m_driver = ixgbe; 930 mac->m_dip = ixgbe->dip; 931 mac->m_src_addr = hw->mac.addr; 932 mac->m_callbacks = &ixgbe_m_callbacks; 933 mac->m_min_sdu = 0; 934 mac->m_max_sdu = ixgbe->default_mtu; 935 mac->m_margin = VLAN_TAGSZ; 936 mac->m_priv_props = ixgbe_priv_props; 937 mac->m_v12n = MAC_VIRT_LEVEL1; 938 939 status = mac_register(mac, &ixgbe->mac_hdl); 940 941 mac_free(mac); 942 943 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 944 } 945 946 /* 947 * ixgbe_identify_hardware - Identify the type of the chipset. 948 */ 949 static int 950 ixgbe_identify_hardware(ixgbe_t *ixgbe) 951 { 952 struct ixgbe_hw *hw = &ixgbe->hw; 953 struct ixgbe_osdep *osdep = &ixgbe->osdep; 954 955 /* 956 * Get the device id 957 */ 958 hw->vendor_id = 959 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 960 hw->device_id = 961 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 962 hw->revision_id = 963 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 964 hw->subsystem_device_id = 965 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 966 hw->subsystem_vendor_id = 967 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 968 969 /* 970 * Set the mac type of the adapter based on the device id 971 */ 972 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) { 973 return (IXGBE_FAILURE); 974 } 975 976 /* 977 * Install adapter capabilities 978 */ 979 switch (hw->mac.type) { 980 case ixgbe_mac_82598EB: 981 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n"); 982 ixgbe->capab = &ixgbe_82598eb_cap; 983 984 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { 985 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 986 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1; 987 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN; 988 } 989 break; 990 991 case ixgbe_mac_82599EB: 992 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n"); 993 ixgbe->capab = &ixgbe_82599eb_cap; 994 995 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) { 996 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE; 997 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0; 998 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN; 999 } 1000 break; 1001 1002 case ixgbe_mac_X540: 1003 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n"); 1004 ixgbe->capab = &ixgbe_X540_cap; 1005 /* 1006 * For now, X540 is all set in its capab structure. 1007 * As other X540 variants show up, things can change here. 1008 */ 1009 break; 1010 1011 case ixgbe_mac_X550: 1012 case ixgbe_mac_X550EM_x: 1013 IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n"); 1014 ixgbe->capab = &ixgbe_X550_cap; 1015 1016 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) 1017 ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE; 1018 1019 /* 1020 * Link detection on X552 SFP+ and X552/X557-AT 1021 */ 1022 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 1023 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 1024 ixgbe->capab->other_intr |= 1025 IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 1026 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540; 1027 } 1028 break; 1029 1030 default: 1031 IXGBE_DEBUGLOG_1(ixgbe, 1032 "adapter not supported in ixgbe_identify_hardware(): %d\n", 1033 hw->mac.type); 1034 return (IXGBE_FAILURE); 1035 } 1036 1037 return (IXGBE_SUCCESS); 1038 } 1039 1040 /* 1041 * ixgbe_regs_map - Map the device registers. 1042 * 1043 */ 1044 static int 1045 ixgbe_regs_map(ixgbe_t *ixgbe) 1046 { 1047 dev_info_t *devinfo = ixgbe->dip; 1048 struct ixgbe_hw *hw = &ixgbe->hw; 1049 struct ixgbe_osdep *osdep = &ixgbe->osdep; 1050 off_t mem_size; 1051 1052 /* 1053 * First get the size of device registers to be mapped. 1054 */ 1055 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size) 1056 != DDI_SUCCESS) { 1057 return (IXGBE_FAILURE); 1058 } 1059 1060 /* 1061 * Call ddi_regs_map_setup() to map registers 1062 */ 1063 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET, 1064 (caddr_t *)&hw->hw_addr, 0, 1065 mem_size, &ixgbe_regs_acc_attr, 1066 &osdep->reg_handle)) != DDI_SUCCESS) { 1067 return (IXGBE_FAILURE); 1068 } 1069 1070 return (IXGBE_SUCCESS); 1071 } 1072 1073 /* 1074 * ixgbe_init_properties - Initialize driver properties. 1075 */ 1076 static void 1077 ixgbe_init_properties(ixgbe_t *ixgbe) 1078 { 1079 /* 1080 * Get conf file properties, including link settings 1081 * jumbo frames, ring number, descriptor number, etc. 1082 */ 1083 ixgbe_get_conf(ixgbe); 1084 } 1085 1086 /* 1087 * ixgbe_init_driver_settings - Initialize driver settings. 1088 * 1089 * The settings include hardware function pointers, bus information, 1090 * rx/tx rings settings, link state, and any other parameters that 1091 * need to be setup during driver initialization. 1092 */ 1093 static int 1094 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 1095 { 1096 struct ixgbe_hw *hw = &ixgbe->hw; 1097 dev_info_t *devinfo = ixgbe->dip; 1098 ixgbe_rx_ring_t *rx_ring; 1099 ixgbe_rx_group_t *rx_group; 1100 ixgbe_tx_ring_t *tx_ring; 1101 uint32_t rx_size; 1102 uint32_t tx_size; 1103 uint32_t ring_per_group; 1104 int i; 1105 1106 /* 1107 * Initialize chipset specific hardware function pointers 1108 */ 1109 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 1110 return (IXGBE_FAILURE); 1111 } 1112 1113 /* 1114 * Get the system page size 1115 */ 1116 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1); 1117 1118 /* 1119 * Set rx buffer size 1120 * 1121 * The IP header alignment room is counted in the calculation. 1122 * The rx buffer size is in unit of 1K that is required by the 1123 * chipset hardware. 1124 */ 1125 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 1126 ixgbe->rx_buf_size = ((rx_size >> 10) + 1127 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1128 1129 /* 1130 * Set tx buffer size 1131 */ 1132 tx_size = ixgbe->max_frame_size; 1133 ixgbe->tx_buf_size = ((tx_size >> 10) + 1134 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1135 1136 /* 1137 * Initialize rx/tx rings/groups parameters 1138 */ 1139 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 1140 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1141 rx_ring = &ixgbe->rx_rings[i]; 1142 rx_ring->index = i; 1143 rx_ring->ixgbe = ixgbe; 1144 rx_ring->group_index = i / ring_per_group; 1145 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i); 1146 } 1147 1148 for (i = 0; i < ixgbe->num_rx_groups; i++) { 1149 rx_group = &ixgbe->rx_groups[i]; 1150 rx_group->index = i; 1151 rx_group->ixgbe = ixgbe; 1152 } 1153 1154 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1155 tx_ring = &ixgbe->tx_rings[i]; 1156 tx_ring->index = i; 1157 tx_ring->ixgbe = ixgbe; 1158 if (ixgbe->tx_head_wb_enable) 1159 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 1160 else 1161 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 1162 1163 tx_ring->ring_size = ixgbe->tx_ring_size; 1164 tx_ring->free_list_size = ixgbe->tx_ring_size + 1165 (ixgbe->tx_ring_size >> 1); 1166 } 1167 1168 /* 1169 * Initialize values of interrupt throttling rate 1170 */ 1171 for (i = 1; i < MAX_INTR_VECTOR; i++) 1172 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 1173 1174 /* 1175 * The initial link state should be "unknown" 1176 */ 1177 ixgbe->link_state = LINK_STATE_UNKNOWN; 1178 1179 return (IXGBE_SUCCESS); 1180 } 1181 1182 /* 1183 * ixgbe_init_locks - Initialize locks. 1184 */ 1185 static void 1186 ixgbe_init_locks(ixgbe_t *ixgbe) 1187 { 1188 ixgbe_rx_ring_t *rx_ring; 1189 ixgbe_tx_ring_t *tx_ring; 1190 int i; 1191 1192 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1193 rx_ring = &ixgbe->rx_rings[i]; 1194 mutex_init(&rx_ring->rx_lock, NULL, 1195 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1196 } 1197 1198 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1199 tx_ring = &ixgbe->tx_rings[i]; 1200 mutex_init(&tx_ring->tx_lock, NULL, 1201 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1202 mutex_init(&tx_ring->recycle_lock, NULL, 1203 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1204 mutex_init(&tx_ring->tcb_head_lock, NULL, 1205 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1206 mutex_init(&tx_ring->tcb_tail_lock, NULL, 1207 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1208 } 1209 1210 mutex_init(&ixgbe->gen_lock, NULL, 1211 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1212 1213 mutex_init(&ixgbe->watchdog_lock, NULL, 1214 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1215 } 1216 1217 /* 1218 * ixgbe_destroy_locks - Destroy locks. 1219 */ 1220 static void 1221 ixgbe_destroy_locks(ixgbe_t *ixgbe) 1222 { 1223 ixgbe_rx_ring_t *rx_ring; 1224 ixgbe_tx_ring_t *tx_ring; 1225 int i; 1226 1227 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1228 rx_ring = &ixgbe->rx_rings[i]; 1229 mutex_destroy(&rx_ring->rx_lock); 1230 } 1231 1232 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1233 tx_ring = &ixgbe->tx_rings[i]; 1234 mutex_destroy(&tx_ring->tx_lock); 1235 mutex_destroy(&tx_ring->recycle_lock); 1236 mutex_destroy(&tx_ring->tcb_head_lock); 1237 mutex_destroy(&tx_ring->tcb_tail_lock); 1238 } 1239 1240 mutex_destroy(&ixgbe->gen_lock); 1241 mutex_destroy(&ixgbe->watchdog_lock); 1242 } 1243 1244 static int 1245 ixgbe_resume(dev_info_t *devinfo) 1246 { 1247 ixgbe_t *ixgbe; 1248 int i; 1249 1250 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1251 if (ixgbe == NULL) 1252 return (DDI_FAILURE); 1253 1254 mutex_enter(&ixgbe->gen_lock); 1255 1256 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1257 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1258 mutex_exit(&ixgbe->gen_lock); 1259 return (DDI_FAILURE); 1260 } 1261 1262 /* 1263 * Enable and start the watchdog timer 1264 */ 1265 ixgbe_enable_watchdog_timer(ixgbe); 1266 } 1267 1268 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED); 1269 1270 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1271 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1272 mac_tx_ring_update(ixgbe->mac_hdl, 1273 ixgbe->tx_rings[i].ring_handle); 1274 } 1275 } 1276 1277 mutex_exit(&ixgbe->gen_lock); 1278 1279 return (DDI_SUCCESS); 1280 } 1281 1282 static int 1283 ixgbe_suspend(dev_info_t *devinfo) 1284 { 1285 ixgbe_t *ixgbe; 1286 1287 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1288 if (ixgbe == NULL) 1289 return (DDI_FAILURE); 1290 1291 mutex_enter(&ixgbe->gen_lock); 1292 1293 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED); 1294 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1295 mutex_exit(&ixgbe->gen_lock); 1296 return (DDI_SUCCESS); 1297 } 1298 ixgbe_stop(ixgbe, B_FALSE); 1299 1300 mutex_exit(&ixgbe->gen_lock); 1301 1302 /* 1303 * Disable and stop the watchdog timer 1304 */ 1305 ixgbe_disable_watchdog_timer(ixgbe); 1306 1307 return (DDI_SUCCESS); 1308 } 1309 1310 /* 1311 * ixgbe_init - Initialize the device. 1312 */ 1313 static int 1314 ixgbe_init(ixgbe_t *ixgbe) 1315 { 1316 struct ixgbe_hw *hw = &ixgbe->hw; 1317 u8 pbanum[IXGBE_PBANUM_LENGTH]; 1318 int rv; 1319 1320 mutex_enter(&ixgbe->gen_lock); 1321 1322 /* 1323 * Configure/Initialize hardware 1324 */ 1325 rv = ixgbe_init_hw(hw); 1326 if (rv != IXGBE_SUCCESS) { 1327 switch (rv) { 1328 1329 /* 1330 * The first three errors are not prohibitive to us progressing 1331 * further, and are maily advisory in nature. In the case of a 1332 * SFP module not being present or not deemed supported by the 1333 * common code, we adivse the operator of this fact but carry on 1334 * instead of failing hard, as SFPs can be inserted or replaced 1335 * while the driver is running. In the case of a unknown error, 1336 * we fail-hard, logging the reason and emitting a FMA event. 1337 */ 1338 case IXGBE_ERR_EEPROM_VERSION: 1339 ixgbe_error(ixgbe, 1340 "This Intel 10Gb Ethernet device is pre-release and" 1341 " contains outdated firmware. Please contact your" 1342 " hardware vendor for a replacement."); 1343 break; 1344 case IXGBE_ERR_SFP_NOT_PRESENT: 1345 ixgbe_error(ixgbe, 1346 "No SFP+ module detected on this interface. Please " 1347 "install a supported SFP+ module for this " 1348 "interface to become operational."); 1349 break; 1350 case IXGBE_ERR_SFP_NOT_SUPPORTED: 1351 ixgbe_error(ixgbe, 1352 "Unsupported SFP+ module detected. Please replace " 1353 "it with a supported SFP+ module per Intel " 1354 "documentation, or bypass this check with " 1355 "allow_unsupported_sfp=1 in ixgbe.conf."); 1356 break; 1357 default: 1358 ixgbe_error(ixgbe, 1359 "Failed to initialize hardware. ixgbe_init_hw " 1360 "returned %d", rv); 1361 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1362 goto init_fail; 1363 } 1364 } 1365 1366 /* 1367 * Need to init eeprom before validating the checksum. 1368 */ 1369 if (ixgbe_init_eeprom_params(hw) < 0) { 1370 ixgbe_error(ixgbe, 1371 "Unable to intitialize the eeprom interface."); 1372 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1373 goto init_fail; 1374 } 1375 1376 /* 1377 * NVM validation 1378 */ 1379 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1380 /* 1381 * Some PCI-E parts fail the first check due to 1382 * the link being in sleep state. Call it again, 1383 * if it fails a second time it's a real issue. 1384 */ 1385 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1386 ixgbe_error(ixgbe, 1387 "Invalid NVM checksum. Please contact " 1388 "the vendor to update the NVM."); 1389 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1390 goto init_fail; 1391 } 1392 } 1393 1394 /* 1395 * Setup default flow control thresholds - enable/disable 1396 * & flow control type is controlled by ixgbe.conf 1397 */ 1398 hw->fc.high_water[0] = DEFAULT_FCRTH; 1399 hw->fc.low_water[0] = DEFAULT_FCRTL; 1400 hw->fc.pause_time = DEFAULT_FCPAUSE; 1401 hw->fc.send_xon = B_TRUE; 1402 1403 /* 1404 * Initialize flow control 1405 */ 1406 (void) ixgbe_start_hw(hw); 1407 1408 /* 1409 * Initialize link settings 1410 */ 1411 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1412 1413 /* 1414 * Initialize the chipset hardware 1415 */ 1416 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1417 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1418 goto init_fail; 1419 } 1420 1421 /* 1422 * Read identifying information and place in devinfo. 1423 */ 1424 pbanum[0] = '\0'; 1425 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum)); 1426 if (*pbanum != '\0') { 1427 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip, 1428 "printed-board-assembly", (char *)pbanum); 1429 } 1430 1431 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1432 goto init_fail; 1433 } 1434 1435 mutex_exit(&ixgbe->gen_lock); 1436 return (IXGBE_SUCCESS); 1437 1438 init_fail: 1439 /* 1440 * Reset PHY 1441 */ 1442 (void) ixgbe_reset_phy(hw); 1443 1444 mutex_exit(&ixgbe->gen_lock); 1445 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1446 return (IXGBE_FAILURE); 1447 } 1448 1449 /* 1450 * ixgbe_chip_start - Initialize and start the chipset hardware. 1451 */ 1452 static int 1453 ixgbe_chip_start(ixgbe_t *ixgbe) 1454 { 1455 struct ixgbe_hw *hw = &ixgbe->hw; 1456 int i; 1457 1458 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1459 1460 /* 1461 * Get the mac address 1462 * This function should handle SPARC case correctly. 1463 */ 1464 if (!ixgbe_find_mac_address(ixgbe)) { 1465 ixgbe_error(ixgbe, "Failed to get the mac address"); 1466 return (IXGBE_FAILURE); 1467 } 1468 1469 /* 1470 * Validate the mac address 1471 */ 1472 (void) ixgbe_init_rx_addrs(hw); 1473 if (!is_valid_mac_addr(hw->mac.addr)) { 1474 ixgbe_error(ixgbe, "Invalid mac address"); 1475 return (IXGBE_FAILURE); 1476 } 1477 1478 /* 1479 * Re-enable relaxed ordering for performance. It is disabled 1480 * by default in the hardware init. 1481 */ 1482 if (ixgbe->relax_order_enable == B_TRUE) 1483 ixgbe_enable_relaxed_ordering(hw); 1484 1485 /* 1486 * Setup adapter interrupt vectors 1487 */ 1488 ixgbe_setup_adapter_vector(ixgbe); 1489 1490 /* 1491 * Initialize unicast addresses. 1492 */ 1493 ixgbe_init_unicst(ixgbe); 1494 1495 /* 1496 * Setup and initialize the mctable structures. 1497 */ 1498 ixgbe_setup_multicst(ixgbe); 1499 1500 /* 1501 * Set interrupt throttling rate 1502 */ 1503 for (i = 0; i < ixgbe->intr_cnt; i++) { 1504 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1505 } 1506 1507 /* 1508 * Disable Wake-on-LAN 1509 */ 1510 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 1511 1512 /* 1513 * Some adapters offer Energy Efficient Ethernet (EEE) support. 1514 * Due to issues with EEE in e1000g/igb, we disable this by default 1515 * as a precautionary measure. 1516 * 1517 * Currently, the only known adapter which supports EEE in the ixgbe 1518 * line is 8086,15AB (IXGBE_DEV_ID_X550EM_X_KR), and only after the 1519 * first revision of it, as well as any X550 with MAC type 6 (non-EM) 1520 */ 1521 (void) ixgbe_setup_eee(hw, B_FALSE); 1522 1523 /* 1524 * Turn on any present SFP Tx laser 1525 */ 1526 ixgbe_enable_tx_laser(hw); 1527 1528 /* 1529 * Power on the PHY 1530 */ 1531 (void) ixgbe_set_phy_power(hw, B_TRUE); 1532 1533 /* 1534 * Save the state of the PHY 1535 */ 1536 ixgbe_get_hw_state(ixgbe); 1537 1538 /* 1539 * Make sure driver has control 1540 */ 1541 ixgbe_get_driver_control(hw); 1542 1543 return (IXGBE_SUCCESS); 1544 } 1545 1546 /* 1547 * ixgbe_chip_stop - Stop the chipset hardware 1548 */ 1549 static void 1550 ixgbe_chip_stop(ixgbe_t *ixgbe) 1551 { 1552 struct ixgbe_hw *hw = &ixgbe->hw; 1553 int rv; 1554 1555 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1556 1557 /* 1558 * Stop interupt generation and disable Tx unit 1559 */ 1560 hw->adapter_stopped = B_FALSE; 1561 (void) ixgbe_stop_adapter(hw); 1562 1563 /* 1564 * Reset the chipset 1565 */ 1566 (void) ixgbe_reset_hw(hw); 1567 1568 /* 1569 * Reset PHY 1570 */ 1571 (void) ixgbe_reset_phy(hw); 1572 1573 /* 1574 * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting 1575 * the PHY while doing so. Else, just power down the PHY. 1576 */ 1577 if (hw->phy.ops.enter_lplu != NULL) { 1578 hw->phy.reset_disable = B_TRUE; 1579 rv = hw->phy.ops.enter_lplu(hw); 1580 if (rv != IXGBE_SUCCESS) 1581 ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv); 1582 hw->phy.reset_disable = B_FALSE; 1583 } else { 1584 (void) ixgbe_set_phy_power(hw, B_FALSE); 1585 } 1586 1587 /* 1588 * Turn off any present SFP Tx laser 1589 * Expected for health and safety reasons 1590 */ 1591 ixgbe_disable_tx_laser(hw); 1592 1593 /* 1594 * Tell firmware driver is no longer in control 1595 */ 1596 ixgbe_release_driver_control(hw); 1597 1598 } 1599 1600 /* 1601 * ixgbe_reset - Reset the chipset and re-start the driver. 1602 * 1603 * It involves stopping and re-starting the chipset, 1604 * and re-configuring the rx/tx rings. 1605 */ 1606 static int 1607 ixgbe_reset(ixgbe_t *ixgbe) 1608 { 1609 int i; 1610 1611 /* 1612 * Disable and stop the watchdog timer 1613 */ 1614 ixgbe_disable_watchdog_timer(ixgbe); 1615 1616 mutex_enter(&ixgbe->gen_lock); 1617 1618 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1619 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 1620 1621 ixgbe_stop(ixgbe, B_FALSE); 1622 1623 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1624 mutex_exit(&ixgbe->gen_lock); 1625 return (IXGBE_FAILURE); 1626 } 1627 1628 /* 1629 * After resetting, need to recheck the link status. 1630 */ 1631 ixgbe->link_check_complete = B_FALSE; 1632 ixgbe->link_check_hrtime = gethrtime() + 1633 (IXGBE_LINK_UP_TIME * 100000000ULL); 1634 1635 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED); 1636 1637 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) { 1638 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1639 mac_tx_ring_update(ixgbe->mac_hdl, 1640 ixgbe->tx_rings[i].ring_handle); 1641 } 1642 } 1643 1644 mutex_exit(&ixgbe->gen_lock); 1645 1646 /* 1647 * Enable and start the watchdog timer 1648 */ 1649 ixgbe_enable_watchdog_timer(ixgbe); 1650 1651 return (IXGBE_SUCCESS); 1652 } 1653 1654 /* 1655 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1656 */ 1657 static void 1658 ixgbe_tx_clean(ixgbe_t *ixgbe) 1659 { 1660 ixgbe_tx_ring_t *tx_ring; 1661 tx_control_block_t *tcb; 1662 link_list_t pending_list; 1663 uint32_t desc_num; 1664 int i, j; 1665 1666 LINK_LIST_INIT(&pending_list); 1667 1668 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1669 tx_ring = &ixgbe->tx_rings[i]; 1670 1671 mutex_enter(&tx_ring->recycle_lock); 1672 1673 /* 1674 * Clean the pending tx data - the pending packets in the 1675 * work_list that have no chances to be transmitted again. 1676 * 1677 * We must ensure the chipset is stopped or the link is down 1678 * before cleaning the transmit packets. 1679 */ 1680 desc_num = 0; 1681 for (j = 0; j < tx_ring->ring_size; j++) { 1682 tcb = tx_ring->work_list[j]; 1683 if (tcb != NULL) { 1684 desc_num += tcb->desc_num; 1685 1686 tx_ring->work_list[j] = NULL; 1687 1688 ixgbe_free_tcb(tcb); 1689 1690 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1691 } 1692 } 1693 1694 if (desc_num > 0) { 1695 atomic_add_32(&tx_ring->tbd_free, desc_num); 1696 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1697 1698 /* 1699 * Reset the head and tail pointers of the tbd ring; 1700 * Reset the writeback head if it's enable. 1701 */ 1702 tx_ring->tbd_head = 0; 1703 tx_ring->tbd_tail = 0; 1704 if (ixgbe->tx_head_wb_enable) 1705 *tx_ring->tbd_head_wb = 0; 1706 1707 IXGBE_WRITE_REG(&ixgbe->hw, 1708 IXGBE_TDH(tx_ring->index), 0); 1709 IXGBE_WRITE_REG(&ixgbe->hw, 1710 IXGBE_TDT(tx_ring->index), 0); 1711 } 1712 1713 mutex_exit(&tx_ring->recycle_lock); 1714 1715 /* 1716 * Add the tx control blocks in the pending list to 1717 * the free list. 1718 */ 1719 ixgbe_put_free_list(tx_ring, &pending_list); 1720 } 1721 } 1722 1723 /* 1724 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1725 * transmitted. 1726 */ 1727 static boolean_t 1728 ixgbe_tx_drain(ixgbe_t *ixgbe) 1729 { 1730 ixgbe_tx_ring_t *tx_ring; 1731 boolean_t done; 1732 int i, j; 1733 1734 /* 1735 * Wait for a specific time to allow pending tx packets 1736 * to be transmitted. 1737 * 1738 * Check the counter tbd_free to see if transmission is done. 1739 * No lock protection is needed here. 1740 * 1741 * Return B_TRUE if all pending packets have been transmitted; 1742 * Otherwise return B_FALSE; 1743 */ 1744 for (i = 0; i < TX_DRAIN_TIME; i++) { 1745 1746 done = B_TRUE; 1747 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1748 tx_ring = &ixgbe->tx_rings[j]; 1749 done = done && 1750 (tx_ring->tbd_free == tx_ring->ring_size); 1751 } 1752 1753 if (done) 1754 break; 1755 1756 msec_delay(1); 1757 } 1758 1759 return (done); 1760 } 1761 1762 /* 1763 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1764 */ 1765 static boolean_t 1766 ixgbe_rx_drain(ixgbe_t *ixgbe) 1767 { 1768 boolean_t done = B_TRUE; 1769 int i; 1770 1771 /* 1772 * Polling the rx free list to check if those rx buffers held by 1773 * the upper layer are released. 1774 * 1775 * Check the counter rcb_free to see if all pending buffers are 1776 * released. No lock protection is needed here. 1777 * 1778 * Return B_TRUE if all pending buffers have been released; 1779 * Otherwise return B_FALSE; 1780 */ 1781 for (i = 0; i < RX_DRAIN_TIME; i++) { 1782 done = (ixgbe->rcb_pending == 0); 1783 1784 if (done) 1785 break; 1786 1787 msec_delay(1); 1788 } 1789 1790 return (done); 1791 } 1792 1793 /* 1794 * ixgbe_start - Start the driver/chipset. 1795 */ 1796 int 1797 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer) 1798 { 1799 struct ixgbe_hw *hw = &ixgbe->hw; 1800 int i; 1801 1802 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1803 1804 if (alloc_buffer) { 1805 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) { 1806 ixgbe_error(ixgbe, 1807 "Failed to allocate software receive rings"); 1808 return (IXGBE_FAILURE); 1809 } 1810 1811 /* Allocate buffers for all the rx/tx rings */ 1812 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) { 1813 ixgbe_error(ixgbe, "Failed to allocate DMA resource"); 1814 return (IXGBE_FAILURE); 1815 } 1816 1817 ixgbe->tx_ring_init = B_TRUE; 1818 } else { 1819 ixgbe->tx_ring_init = B_FALSE; 1820 } 1821 1822 for (i = 0; i < ixgbe->num_rx_rings; i++) 1823 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1824 for (i = 0; i < ixgbe->num_tx_rings; i++) 1825 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1826 1827 /* 1828 * Start the chipset hardware 1829 */ 1830 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1831 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1832 goto start_failure; 1833 } 1834 1835 /* 1836 * Configure link now for X550 1837 * 1838 * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the 1839 * resting state of the adapter at a 1Gb FDX speed. Prior to the X550, 1840 * the resting state of the link would be the maximum speed that 1841 * autonegotiation will allow (usually 10Gb, infrastructure allowing) 1842 * so we never bothered with explicitly setting the link to 10Gb as it 1843 * would already be at that state on driver attach. With X550, we must 1844 * trigger a re-negotiation of the link in order to switch from a LPLU 1845 * 1Gb link to 10Gb (cable and link partner permitting.) 1846 */ 1847 if (hw->mac.type == ixgbe_mac_X550 || 1848 hw->mac.type == ixgbe_mac_X550EM_x) { 1849 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE); 1850 ixgbe_get_hw_state(ixgbe); 1851 } 1852 1853 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1854 goto start_failure; 1855 } 1856 1857 /* 1858 * Setup the rx/tx rings 1859 */ 1860 ixgbe_setup_rings(ixgbe); 1861 1862 /* 1863 * ixgbe_start() will be called when resetting, however if reset 1864 * happens, we need to clear the ERROR, STALL and OVERTEMP flags 1865 * before enabling the interrupts. 1866 */ 1867 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR 1868 | IXGBE_STALL| IXGBE_OVERTEMP)); 1869 1870 /* 1871 * Enable adapter interrupts 1872 * The interrupts must be enabled after the driver state is START 1873 */ 1874 ixgbe_enable_adapter_interrupts(ixgbe); 1875 1876 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1877 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1878 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1879 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1880 1881 return (IXGBE_SUCCESS); 1882 1883 start_failure: 1884 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1885 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1886 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1887 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1888 1889 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1890 1891 return (IXGBE_FAILURE); 1892 } 1893 1894 /* 1895 * ixgbe_stop - Stop the driver/chipset. 1896 */ 1897 void 1898 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer) 1899 { 1900 int i; 1901 1902 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1903 1904 /* 1905 * Disable the adapter interrupts 1906 */ 1907 ixgbe_disable_adapter_interrupts(ixgbe); 1908 1909 /* 1910 * Drain the pending tx packets 1911 */ 1912 (void) ixgbe_tx_drain(ixgbe); 1913 1914 for (i = 0; i < ixgbe->num_rx_rings; i++) 1915 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1916 for (i = 0; i < ixgbe->num_tx_rings; i++) 1917 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1918 1919 /* 1920 * Stop the chipset hardware 1921 */ 1922 ixgbe_chip_stop(ixgbe); 1923 1924 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1925 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1926 } 1927 1928 /* 1929 * Clean the pending tx data/resources 1930 */ 1931 ixgbe_tx_clean(ixgbe); 1932 1933 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1934 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1935 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1936 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1937 1938 if (ixgbe->link_state == LINK_STATE_UP) { 1939 ixgbe->link_state = LINK_STATE_UNKNOWN; 1940 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 1941 } 1942 1943 if (free_buffer) { 1944 /* 1945 * Release the DMA/memory resources of rx/tx rings 1946 */ 1947 ixgbe_free_dma(ixgbe); 1948 ixgbe_free_rx_data(ixgbe); 1949 } 1950 } 1951 1952 /* 1953 * ixgbe_cbfunc - Driver interface for generic DDI callbacks 1954 */ 1955 /* ARGSUSED */ 1956 static int 1957 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg, 1958 void *arg1, void *arg2) 1959 { 1960 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 1961 1962 switch (cbaction) { 1963 /* IRM callback */ 1964 int count; 1965 case DDI_CB_INTR_ADD: 1966 case DDI_CB_INTR_REMOVE: 1967 count = (int)(uintptr_t)cbarg; 1968 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX); 1969 DTRACE_PROBE2(ixgbe__irm__callback, int, count, 1970 int, ixgbe->intr_cnt); 1971 if (ixgbe_intr_adjust(ixgbe, cbaction, count) != 1972 DDI_SUCCESS) { 1973 ixgbe_error(ixgbe, 1974 "IRM CB: Failed to adjust interrupts"); 1975 goto cb_fail; 1976 } 1977 break; 1978 default: 1979 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported", 1980 cbaction); 1981 return (DDI_ENOTSUP); 1982 } 1983 return (DDI_SUCCESS); 1984 cb_fail: 1985 return (DDI_FAILURE); 1986 } 1987 1988 /* 1989 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request. 1990 */ 1991 static int 1992 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count) 1993 { 1994 int i, rc, actual; 1995 1996 if (count == 0) 1997 return (DDI_SUCCESS); 1998 1999 if ((cbaction == DDI_CB_INTR_ADD && 2000 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) || 2001 (cbaction == DDI_CB_INTR_REMOVE && 2002 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)) 2003 return (DDI_FAILURE); 2004 2005 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 2006 return (DDI_FAILURE); 2007 } 2008 2009 for (i = 0; i < ixgbe->num_rx_rings; i++) 2010 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL); 2011 for (i = 0; i < ixgbe->num_tx_rings; i++) 2012 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL); 2013 2014 mutex_enter(&ixgbe->gen_lock); 2015 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 2016 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST; 2017 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 2018 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 2019 2020 ixgbe_stop(ixgbe, B_FALSE); 2021 /* 2022 * Disable interrupts 2023 */ 2024 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 2025 rc = ixgbe_disable_intrs(ixgbe); 2026 ASSERT(rc == IXGBE_SUCCESS); 2027 } 2028 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR; 2029 2030 /* 2031 * Remove interrupt handlers 2032 */ 2033 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 2034 ixgbe_rem_intr_handlers(ixgbe); 2035 } 2036 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR; 2037 2038 /* 2039 * Clear vect_map 2040 */ 2041 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 2042 switch (cbaction) { 2043 case DDI_CB_INTR_ADD: 2044 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable, 2045 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual, 2046 DDI_INTR_ALLOC_NORMAL); 2047 if (rc != DDI_SUCCESS || actual != count) { 2048 ixgbe_log(ixgbe, "Adjust interrupts failed." 2049 "return: %d, irm cb size: %d, actual: %d", 2050 rc, count, actual); 2051 goto intr_adjust_fail; 2052 } 2053 ixgbe->intr_cnt += count; 2054 break; 2055 2056 case DDI_CB_INTR_REMOVE: 2057 for (i = ixgbe->intr_cnt - count; 2058 i < ixgbe->intr_cnt; i ++) { 2059 rc = ddi_intr_free(ixgbe->htable[i]); 2060 ixgbe->htable[i] = NULL; 2061 if (rc != DDI_SUCCESS) { 2062 ixgbe_log(ixgbe, "Adjust interrupts failed." 2063 "return: %d, irm cb size: %d, actual: %d", 2064 rc, count, actual); 2065 goto intr_adjust_fail; 2066 } 2067 } 2068 ixgbe->intr_cnt -= count; 2069 break; 2070 } 2071 2072 /* 2073 * Get priority for first vector, assume remaining are all the same 2074 */ 2075 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 2076 if (rc != DDI_SUCCESS) { 2077 ixgbe_log(ixgbe, 2078 "Get interrupt priority failed: %d", rc); 2079 goto intr_adjust_fail; 2080 } 2081 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 2082 if (rc != DDI_SUCCESS) { 2083 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc); 2084 goto intr_adjust_fail; 2085 } 2086 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 2087 2088 /* 2089 * Map rings to interrupt vectors 2090 */ 2091 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 2092 ixgbe_error(ixgbe, 2093 "IRM CB: Failed to map interrupts to vectors"); 2094 goto intr_adjust_fail; 2095 } 2096 2097 /* 2098 * Add interrupt handlers 2099 */ 2100 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 2101 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers"); 2102 goto intr_adjust_fail; 2103 } 2104 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 2105 2106 /* 2107 * Now that mutex locks are initialized, and the chip is also 2108 * initialized, enable interrupts. 2109 */ 2110 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 2111 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts"); 2112 goto intr_adjust_fail; 2113 } 2114 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 2115 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 2116 ixgbe_error(ixgbe, "IRM CB: Failed to start"); 2117 goto intr_adjust_fail; 2118 } 2119 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST; 2120 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 2121 ixgbe->ixgbe_state |= IXGBE_STARTED; 2122 mutex_exit(&ixgbe->gen_lock); 2123 2124 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2125 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, 2126 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]); 2127 } 2128 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2129 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, 2130 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]); 2131 } 2132 2133 /* Wakeup all Tx rings */ 2134 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2135 mac_tx_ring_update(ixgbe->mac_hdl, 2136 ixgbe->tx_rings[i].ring_handle); 2137 } 2138 2139 IXGBE_DEBUGLOG_3(ixgbe, 2140 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).", 2141 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max); 2142 return (DDI_SUCCESS); 2143 2144 intr_adjust_fail: 2145 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 2146 mutex_exit(&ixgbe->gen_lock); 2147 return (DDI_FAILURE); 2148 } 2149 2150 /* 2151 * ixgbe_intr_cb_register - Register interrupt callback function. 2152 */ 2153 static int 2154 ixgbe_intr_cb_register(ixgbe_t *ixgbe) 2155 { 2156 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc, 2157 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) { 2158 return (IXGBE_FAILURE); 2159 } 2160 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered."); 2161 return (IXGBE_SUCCESS); 2162 } 2163 2164 /* 2165 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 2166 */ 2167 static int 2168 ixgbe_alloc_rings(ixgbe_t *ixgbe) 2169 { 2170 /* 2171 * Allocate memory space for rx rings 2172 */ 2173 ixgbe->rx_rings = kmem_zalloc( 2174 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 2175 KM_NOSLEEP); 2176 2177 if (ixgbe->rx_rings == NULL) { 2178 return (IXGBE_FAILURE); 2179 } 2180 2181 /* 2182 * Allocate memory space for tx rings 2183 */ 2184 ixgbe->tx_rings = kmem_zalloc( 2185 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 2186 KM_NOSLEEP); 2187 2188 if (ixgbe->tx_rings == NULL) { 2189 kmem_free(ixgbe->rx_rings, 2190 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2191 ixgbe->rx_rings = NULL; 2192 return (IXGBE_FAILURE); 2193 } 2194 2195 /* 2196 * Allocate memory space for rx ring groups 2197 */ 2198 ixgbe->rx_groups = kmem_zalloc( 2199 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups, 2200 KM_NOSLEEP); 2201 2202 if (ixgbe->rx_groups == NULL) { 2203 kmem_free(ixgbe->rx_rings, 2204 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2205 kmem_free(ixgbe->tx_rings, 2206 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2207 ixgbe->rx_rings = NULL; 2208 ixgbe->tx_rings = NULL; 2209 return (IXGBE_FAILURE); 2210 } 2211 2212 return (IXGBE_SUCCESS); 2213 } 2214 2215 /* 2216 * ixgbe_free_rings - Free the memory space of rx/tx rings. 2217 */ 2218 static void 2219 ixgbe_free_rings(ixgbe_t *ixgbe) 2220 { 2221 if (ixgbe->rx_rings != NULL) { 2222 kmem_free(ixgbe->rx_rings, 2223 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2224 ixgbe->rx_rings = NULL; 2225 } 2226 2227 if (ixgbe->tx_rings != NULL) { 2228 kmem_free(ixgbe->tx_rings, 2229 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2230 ixgbe->tx_rings = NULL; 2231 } 2232 2233 if (ixgbe->rx_groups != NULL) { 2234 kmem_free(ixgbe->rx_groups, 2235 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); 2236 ixgbe->rx_groups = NULL; 2237 } 2238 } 2239 2240 static int 2241 ixgbe_alloc_rx_data(ixgbe_t *ixgbe) 2242 { 2243 ixgbe_rx_ring_t *rx_ring; 2244 int i; 2245 2246 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2247 rx_ring = &ixgbe->rx_rings[i]; 2248 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS) 2249 goto alloc_rx_rings_failure; 2250 } 2251 return (IXGBE_SUCCESS); 2252 2253 alloc_rx_rings_failure: 2254 ixgbe_free_rx_data(ixgbe); 2255 return (IXGBE_FAILURE); 2256 } 2257 2258 static void 2259 ixgbe_free_rx_data(ixgbe_t *ixgbe) 2260 { 2261 ixgbe_rx_ring_t *rx_ring; 2262 ixgbe_rx_data_t *rx_data; 2263 int i; 2264 2265 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2266 rx_ring = &ixgbe->rx_rings[i]; 2267 2268 mutex_enter(&ixgbe->rx_pending_lock); 2269 rx_data = rx_ring->rx_data; 2270 2271 if (rx_data != NULL) { 2272 rx_data->flag |= IXGBE_RX_STOPPED; 2273 2274 if (rx_data->rcb_pending == 0) { 2275 ixgbe_free_rx_ring_data(rx_data); 2276 rx_ring->rx_data = NULL; 2277 } 2278 } 2279 2280 mutex_exit(&ixgbe->rx_pending_lock); 2281 } 2282 } 2283 2284 /* 2285 * ixgbe_setup_rings - Setup rx/tx rings. 2286 */ 2287 static void 2288 ixgbe_setup_rings(ixgbe_t *ixgbe) 2289 { 2290 /* 2291 * Setup the rx/tx rings, including the following: 2292 * 2293 * 1. Setup the descriptor ring and the control block buffers; 2294 * 2. Initialize necessary registers for receive/transmit; 2295 * 3. Initialize software pointers/parameters for receive/transmit; 2296 */ 2297 ixgbe_setup_rx(ixgbe); 2298 2299 ixgbe_setup_tx(ixgbe); 2300 } 2301 2302 static void 2303 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 2304 { 2305 ixgbe_t *ixgbe = rx_ring->ixgbe; 2306 ixgbe_rx_data_t *rx_data = rx_ring->rx_data; 2307 struct ixgbe_hw *hw = &ixgbe->hw; 2308 rx_control_block_t *rcb; 2309 union ixgbe_adv_rx_desc *rbd; 2310 uint32_t size; 2311 uint32_t buf_low; 2312 uint32_t buf_high; 2313 uint32_t reg_val; 2314 int i; 2315 2316 ASSERT(mutex_owned(&rx_ring->rx_lock)); 2317 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2318 2319 for (i = 0; i < ixgbe->rx_ring_size; i++) { 2320 rcb = rx_data->work_list[i]; 2321 rbd = &rx_data->rbd_ring[i]; 2322 2323 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 2324 rbd->read.hdr_addr = NULL; 2325 } 2326 2327 /* 2328 * Initialize the length register 2329 */ 2330 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc); 2331 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size); 2332 2333 /* 2334 * Initialize the base address registers 2335 */ 2336 buf_low = (uint32_t)rx_data->rbd_area.dma_address; 2337 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); 2338 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high); 2339 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low); 2340 2341 /* 2342 * Setup head & tail pointers 2343 */ 2344 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index), 2345 rx_data->ring_size - 1); 2346 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0); 2347 2348 rx_data->rbd_next = 0; 2349 rx_data->lro_first = 0; 2350 2351 /* 2352 * Setup the Receive Descriptor Control Register (RXDCTL) 2353 * PTHRESH=32 descriptors (half the internal cache) 2354 * HTHRESH=0 descriptors (to minimize latency on fetch) 2355 * WTHRESH defaults to 1 (writeback each descriptor) 2356 */ 2357 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index)); 2358 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 2359 2360 /* Not a valid value for 82599, X540 or X550 */ 2361 if (hw->mac.type == ixgbe_mac_82598EB) { 2362 reg_val |= 0x0020; /* pthresh */ 2363 } 2364 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); 2365 2366 if (hw->mac.type == ixgbe_mac_82599EB || 2367 hw->mac.type == ixgbe_mac_X540 || 2368 hw->mac.type == ixgbe_mac_X550 || 2369 hw->mac.type == ixgbe_mac_X550EM_x) { 2370 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2371 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); 2372 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2373 } 2374 2375 /* 2376 * Setup the Split and Replication Receive Control Register. 2377 * Set the rx buffer size and the advanced descriptor type. 2378 */ 2379 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 2380 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2381 reg_val |= IXGBE_SRRCTL_DROP_EN; 2382 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); 2383 } 2384 2385 static void 2386 ixgbe_setup_rx(ixgbe_t *ixgbe) 2387 { 2388 ixgbe_rx_ring_t *rx_ring; 2389 struct ixgbe_hw *hw = &ixgbe->hw; 2390 uint32_t reg_val; 2391 uint32_t ring_mapping; 2392 uint32_t i, index; 2393 uint32_t psrtype_rss_bit; 2394 2395 /* 2396 * Ensure that Rx is disabled while setting up 2397 * the Rx unit and Rx descriptor ring(s) 2398 */ 2399 ixgbe_disable_rx(hw); 2400 2401 /* PSRTYPE must be configured for 82599 */ 2402 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ && 2403 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) { 2404 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2405 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2406 reg_val |= IXGBE_PSRTYPE_L2HDR; 2407 reg_val |= 0x80000000; 2408 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val); 2409 } else { 2410 if (ixgbe->num_rx_groups > 32) { 2411 psrtype_rss_bit = 0x20000000; 2412 } else { 2413 psrtype_rss_bit = 0x40000000; 2414 } 2415 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) { 2416 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2417 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2418 reg_val |= IXGBE_PSRTYPE_L2HDR; 2419 reg_val |= psrtype_rss_bit; 2420 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val); 2421 } 2422 } 2423 2424 /* 2425 * Set filter control in FCTRL to determine types of packets are passed 2426 * up to the driver. 2427 * - Pass broadcast packets. 2428 * - Do not pass flow control pause frames (82598-specific) 2429 */ 2430 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2431 reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */ 2432 if (hw->mac.type == ixgbe_mac_82598EB) { 2433 reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */ 2434 } 2435 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 2436 2437 /* 2438 * Hardware checksum settings 2439 */ 2440 if (ixgbe->rx_hcksum_enable) { 2441 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2442 reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 2443 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 2444 } 2445 2446 /* 2447 * Setup VMDq and RSS for multiple receive queues 2448 */ 2449 switch (ixgbe->classify_mode) { 2450 case IXGBE_CLASSIFY_RSS: 2451 /* 2452 * One group, only RSS is needed when more than 2453 * one ring enabled. 2454 */ 2455 ixgbe_setup_rss(ixgbe); 2456 break; 2457 2458 case IXGBE_CLASSIFY_VMDQ: 2459 /* 2460 * Multiple groups, each group has one ring, 2461 * only VMDq is needed. 2462 */ 2463 ixgbe_setup_vmdq(ixgbe); 2464 break; 2465 2466 case IXGBE_CLASSIFY_VMDQ_RSS: 2467 /* 2468 * Multiple groups and multiple rings, both 2469 * VMDq and RSS are needed. 2470 */ 2471 ixgbe_setup_vmdq_rss(ixgbe); 2472 break; 2473 2474 default: 2475 break; 2476 } 2477 2478 /* 2479 * Enable the receive unit. This must be done after filter 2480 * control is set in FCTRL. On 82598, we disable the descriptor monitor. 2481 * 82598 is the only adapter which defines this RXCTRL option. 2482 */ 2483 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2484 if (hw->mac.type == ixgbe_mac_82598EB) 2485 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */ 2486 reg_val |= IXGBE_RXCTRL_RXEN; 2487 (void) ixgbe_enable_rx_dma(hw, reg_val); 2488 2489 /* 2490 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 2491 */ 2492 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2493 rx_ring = &ixgbe->rx_rings[i]; 2494 ixgbe_setup_rx_ring(rx_ring); 2495 } 2496 2497 /* 2498 * Setup the per-ring statistics mapping. 2499 */ 2500 ring_mapping = 0; 2501 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2502 index = ixgbe->rx_rings[i].hw_index; 2503 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2)); 2504 ring_mapping |= (i & 0xF) << (8 * (index & 0x3)); 2505 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping); 2506 } 2507 2508 /* 2509 * The Max Frame Size in MHADD/MAXFRS will be internally increased 2510 * by four bytes if the packet has a VLAN field, so includes MTU, 2511 * ethernet header and frame check sequence. 2512 * Register is MAXFRS in 82599. 2513 */ 2514 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD); 2515 reg_val &= ~IXGBE_MHADD_MFS_MASK; 2516 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header) 2517 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 2518 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 2519 2520 /* 2521 * Setup Jumbo Frame enable bit 2522 */ 2523 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2524 if (ixgbe->default_mtu > ETHERMTU) 2525 reg_val |= IXGBE_HLREG0_JUMBOEN; 2526 else 2527 reg_val &= ~IXGBE_HLREG0_JUMBOEN; 2528 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2529 2530 /* 2531 * Setup RSC for multiple receive queues. 2532 */ 2533 if (ixgbe->lro_enable) { 2534 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2535 /* 2536 * Make sure rx_buf_size * MAXDESC not greater 2537 * than 65535. 2538 * Intel recommends 4 for MAXDESC field value. 2539 */ 2540 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i)); 2541 reg_val |= IXGBE_RSCCTL_RSCEN; 2542 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k) 2543 reg_val |= IXGBE_RSCCTL_MAXDESC_1; 2544 else 2545 reg_val |= IXGBE_RSCCTL_MAXDESC_4; 2546 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val); 2547 } 2548 2549 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU); 2550 reg_val |= IXGBE_RSCDBU_RSCACKDIS; 2551 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val); 2552 2553 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2554 reg_val |= IXGBE_RDRXCTL_RSCACKC; 2555 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX; 2556 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2557 2558 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2559 } 2560 } 2561 2562 static void 2563 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 2564 { 2565 ixgbe_t *ixgbe = tx_ring->ixgbe; 2566 struct ixgbe_hw *hw = &ixgbe->hw; 2567 uint32_t size; 2568 uint32_t buf_low; 2569 uint32_t buf_high; 2570 uint32_t reg_val; 2571 2572 ASSERT(mutex_owned(&tx_ring->tx_lock)); 2573 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2574 2575 /* 2576 * Initialize the length register 2577 */ 2578 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 2579 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 2580 2581 /* 2582 * Initialize the base address registers 2583 */ 2584 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 2585 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 2586 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 2587 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 2588 2589 /* 2590 * Setup head & tail pointers 2591 */ 2592 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 2593 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 2594 2595 /* 2596 * Setup head write-back 2597 */ 2598 if (ixgbe->tx_head_wb_enable) { 2599 /* 2600 * The memory of the head write-back is allocated using 2601 * the extra tbd beyond the tail of the tbd ring. 2602 */ 2603 tx_ring->tbd_head_wb = (uint32_t *) 2604 ((uintptr_t)tx_ring->tbd_area.address + size); 2605 *tx_ring->tbd_head_wb = 0; 2606 2607 buf_low = (uint32_t) 2608 (tx_ring->tbd_area.dma_address + size); 2609 buf_high = (uint32_t) 2610 ((tx_ring->tbd_area.dma_address + size) >> 32); 2611 2612 /* Set the head write-back enable bit */ 2613 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 2614 2615 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 2616 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 2617 2618 /* 2619 * Turn off relaxed ordering for head write back or it will 2620 * cause problems with the tx recycling 2621 */ 2622 2623 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ? 2624 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) : 2625 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index)); 2626 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 2627 if (hw->mac.type == ixgbe_mac_82598EB) { 2628 IXGBE_WRITE_REG(hw, 2629 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 2630 } else { 2631 IXGBE_WRITE_REG(hw, 2632 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val); 2633 } 2634 } else { 2635 tx_ring->tbd_head_wb = NULL; 2636 } 2637 2638 tx_ring->tbd_head = 0; 2639 tx_ring->tbd_tail = 0; 2640 tx_ring->tbd_free = tx_ring->ring_size; 2641 2642 if (ixgbe->tx_ring_init == B_TRUE) { 2643 tx_ring->tcb_head = 0; 2644 tx_ring->tcb_tail = 0; 2645 tx_ring->tcb_free = tx_ring->free_list_size; 2646 } 2647 2648 /* 2649 * Initialize the s/w context structure 2650 */ 2651 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); 2652 } 2653 2654 static void 2655 ixgbe_setup_tx(ixgbe_t *ixgbe) 2656 { 2657 struct ixgbe_hw *hw = &ixgbe->hw; 2658 ixgbe_tx_ring_t *tx_ring; 2659 uint32_t reg_val; 2660 uint32_t ring_mapping; 2661 int i; 2662 2663 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2664 tx_ring = &ixgbe->tx_rings[i]; 2665 ixgbe_setup_tx_ring(tx_ring); 2666 } 2667 2668 /* 2669 * Setup the per-ring statistics mapping. 2670 */ 2671 ring_mapping = 0; 2672 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2673 ring_mapping |= (i & 0xF) << (8 * (i & 0x3)); 2674 if ((i & 0x3) == 0x3) { 2675 switch (hw->mac.type) { 2676 case ixgbe_mac_82598EB: 2677 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 2678 ring_mapping); 2679 break; 2680 2681 case ixgbe_mac_82599EB: 2682 case ixgbe_mac_X540: 2683 case ixgbe_mac_X550: 2684 case ixgbe_mac_X550EM_x: 2685 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 2686 ring_mapping); 2687 break; 2688 2689 default: 2690 break; 2691 } 2692 2693 ring_mapping = 0; 2694 } 2695 } 2696 if (i & 0x3) { 2697 switch (hw->mac.type) { 2698 case ixgbe_mac_82598EB: 2699 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); 2700 break; 2701 2702 case ixgbe_mac_82599EB: 2703 case ixgbe_mac_X540: 2704 case ixgbe_mac_X550: 2705 case ixgbe_mac_X550EM_x: 2706 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); 2707 break; 2708 2709 default: 2710 break; 2711 } 2712 } 2713 2714 /* 2715 * Enable CRC appending and TX padding (for short tx frames) 2716 */ 2717 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2718 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 2719 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2720 2721 /* 2722 * enable DMA for 82599, X540 and X550 parts 2723 */ 2724 if (hw->mac.type == ixgbe_mac_82599EB || 2725 hw->mac.type == ixgbe_mac_X540 || 2726 hw->mac.type == ixgbe_mac_X550 || 2727 hw->mac.type == ixgbe_mac_X550EM_x) { 2728 /* DMATXCTL.TE must be set after all Tx config is complete */ 2729 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2730 reg_val |= IXGBE_DMATXCTL_TE; 2731 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); 2732 2733 /* Disable arbiter to set MTQC */ 2734 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2735 reg_val |= IXGBE_RTTDCS_ARBDIS; 2736 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2737 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2738 reg_val &= ~IXGBE_RTTDCS_ARBDIS; 2739 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2740 } 2741 2742 /* 2743 * Enabling tx queues .. 2744 * For 82599 must be done after DMATXCTL.TE is set 2745 */ 2746 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2747 tx_ring = &ixgbe->tx_rings[i]; 2748 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index)); 2749 reg_val |= IXGBE_TXDCTL_ENABLE; 2750 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 2751 } 2752 } 2753 2754 /* 2755 * ixgbe_setup_rss - Setup receive-side scaling feature. 2756 */ 2757 static void 2758 ixgbe_setup_rss(ixgbe_t *ixgbe) 2759 { 2760 struct ixgbe_hw *hw = &ixgbe->hw; 2761 uint32_t mrqc; 2762 2763 /* 2764 * Initialize RETA/ERETA table 2765 */ 2766 ixgbe_setup_rss_table(ixgbe); 2767 2768 /* 2769 * Enable RSS & perform hash on these packet types 2770 */ 2771 mrqc = IXGBE_MRQC_RSSEN | 2772 IXGBE_MRQC_RSS_FIELD_IPV4 | 2773 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2774 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2775 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2776 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2777 IXGBE_MRQC_RSS_FIELD_IPV6 | 2778 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2779 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2780 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2781 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2782 } 2783 2784 /* 2785 * ixgbe_setup_vmdq - Setup MAC classification feature 2786 */ 2787 static void 2788 ixgbe_setup_vmdq(ixgbe_t *ixgbe) 2789 { 2790 struct ixgbe_hw *hw = &ixgbe->hw; 2791 uint32_t vmdctl, i, vtctl; 2792 2793 /* 2794 * Setup the VMDq Control register, enable VMDq based on 2795 * packet destination MAC address: 2796 */ 2797 switch (hw->mac.type) { 2798 case ixgbe_mac_82598EB: 2799 /* 2800 * VMDq Enable = 1; 2801 * VMDq Filter = 0; MAC filtering 2802 * Default VMDq output index = 0; 2803 */ 2804 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2805 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2806 break; 2807 2808 case ixgbe_mac_82599EB: 2809 case ixgbe_mac_X540: 2810 case ixgbe_mac_X550: 2811 case ixgbe_mac_X550EM_x: 2812 /* 2813 * Enable VMDq-only. 2814 */ 2815 vmdctl = IXGBE_MRQC_VMDQEN; 2816 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl); 2817 2818 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2819 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2820 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2821 } 2822 2823 /* 2824 * Enable Virtualization and Replication. 2825 */ 2826 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2827 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2828 2829 /* 2830 * Enable receiving packets to all VFs 2831 */ 2832 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2833 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2834 break; 2835 2836 default: 2837 break; 2838 } 2839 } 2840 2841 /* 2842 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature. 2843 */ 2844 static void 2845 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) 2846 { 2847 struct ixgbe_hw *hw = &ixgbe->hw; 2848 uint32_t i, mrqc; 2849 uint32_t vtctl, vmdctl; 2850 2851 /* 2852 * Initialize RETA/ERETA table 2853 */ 2854 ixgbe_setup_rss_table(ixgbe); 2855 2856 /* 2857 * Enable and setup RSS and VMDq 2858 */ 2859 switch (hw->mac.type) { 2860 case ixgbe_mac_82598EB: 2861 /* 2862 * Enable RSS & Setup RSS Hash functions 2863 */ 2864 mrqc = IXGBE_MRQC_RSSEN | 2865 IXGBE_MRQC_RSS_FIELD_IPV4 | 2866 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2867 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2868 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2869 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2870 IXGBE_MRQC_RSS_FIELD_IPV6 | 2871 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2872 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2873 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2874 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2875 2876 /* 2877 * Enable and Setup VMDq 2878 * VMDq Filter = 0; MAC filtering 2879 * Default VMDq output index = 0; 2880 */ 2881 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2882 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2883 break; 2884 2885 case ixgbe_mac_82599EB: 2886 case ixgbe_mac_X540: 2887 case ixgbe_mac_X550: 2888 case ixgbe_mac_X550EM_x: 2889 /* 2890 * Enable RSS & Setup RSS Hash functions 2891 */ 2892 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 | 2893 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2894 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2895 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2896 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2897 IXGBE_MRQC_RSS_FIELD_IPV6 | 2898 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2899 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2900 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2901 2902 /* 2903 * Enable VMDq+RSS. 2904 */ 2905 if (ixgbe->num_rx_groups > 32) { 2906 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN; 2907 } else { 2908 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN; 2909 } 2910 2911 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2912 2913 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2914 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2915 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2916 } 2917 break; 2918 2919 default: 2920 break; 2921 2922 } 2923 2924 if (hw->mac.type == ixgbe_mac_82599EB || 2925 hw->mac.type == ixgbe_mac_X540 || 2926 hw->mac.type == ixgbe_mac_X550 || 2927 hw->mac.type == ixgbe_mac_X550EM_x) { 2928 /* 2929 * Enable Virtualization and Replication. 2930 */ 2931 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2932 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2933 2934 /* 2935 * Enable receiving packets to all VFs 2936 */ 2937 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2938 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2939 } 2940 } 2941 2942 /* 2943 * ixgbe_setup_rss_table - Setup RSS table 2944 */ 2945 static void 2946 ixgbe_setup_rss_table(ixgbe_t *ixgbe) 2947 { 2948 struct ixgbe_hw *hw = &ixgbe->hw; 2949 uint32_t i, j; 2950 uint32_t random; 2951 uint32_t reta; 2952 uint32_t ring_per_group; 2953 uint32_t ring; 2954 uint32_t table_size; 2955 uint32_t index_mult; 2956 uint32_t rxcsum; 2957 2958 /* 2959 * Set multiplier for RETA setup and table size based on MAC type. 2960 * RETA table sizes vary by model: 2961 * 2962 * 82598, 82599, X540: 128 table entries. 2963 * X550: 512 table entries. 2964 */ 2965 index_mult = 0x1; 2966 table_size = 128; 2967 switch (ixgbe->hw.mac.type) { 2968 case ixgbe_mac_82598EB: 2969 index_mult = 0x11; 2970 break; 2971 case ixgbe_mac_X550: 2972 case ixgbe_mac_X550EM_x: 2973 table_size = 512; 2974 break; 2975 default: 2976 break; 2977 } 2978 2979 /* 2980 * Fill out RSS redirection table. The configuation of the indices is 2981 * hardware-dependent. 2982 * 2983 * 82598: 8 bits wide containing two 4 bit RSS indices 2984 * 82599, X540: 8 bits wide containing one 4 bit RSS index 2985 * X550: 8 bits wide containing one 6 bit RSS index 2986 */ 2987 reta = 0; 2988 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2989 2990 for (i = 0, j = 0; i < table_size; i++, j++) { 2991 if (j == ring_per_group) j = 0; 2992 2993 /* 2994 * The low 8 bits are for hash value (n+0); 2995 * The next 8 bits are for hash value (n+1), etc. 2996 */ 2997 ring = (j * index_mult); 2998 reta = reta >> 8; 2999 reta = reta | (((uint32_t)ring) << 24); 3000 3001 if ((i & 3) == 3) 3002 /* 3003 * The first 128 table entries are programmed into the 3004 * RETA register, with any beyond that (eg; on X550) 3005 * into ERETA. 3006 */ 3007 if (i < 128) 3008 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3009 else 3010 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 3011 reta); 3012 reta = 0; 3013 } 3014 3015 /* 3016 * Fill out hash function seeds with a random constant 3017 */ 3018 for (i = 0; i < 10; i++) { 3019 (void) random_get_pseudo_bytes((uint8_t *)&random, 3020 sizeof (uint32_t)); 3021 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 3022 } 3023 3024 /* 3025 * Disable Packet Checksum to enable RSS for multiple receive queues. 3026 * It is an adapter hardware limitation that Packet Checksum is 3027 * mutually exclusive with RSS. 3028 */ 3029 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3030 rxcsum |= IXGBE_RXCSUM_PCSD; 3031 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 3032 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3033 } 3034 3035 /* 3036 * ixgbe_init_unicst - Initialize the unicast addresses. 3037 */ 3038 static void 3039 ixgbe_init_unicst(ixgbe_t *ixgbe) 3040 { 3041 struct ixgbe_hw *hw = &ixgbe->hw; 3042 uint8_t *mac_addr; 3043 int slot; 3044 /* 3045 * Here we should consider two situations: 3046 * 3047 * 1. Chipset is initialized at the first time, 3048 * Clear all the multiple unicast addresses. 3049 * 3050 * 2. Chipset is reset 3051 * Recover the multiple unicast addresses from the 3052 * software data structure to the RAR registers. 3053 */ 3054 if (!ixgbe->unicst_init) { 3055 /* 3056 * Initialize the multiple unicast addresses 3057 */ 3058 ixgbe->unicst_total = hw->mac.num_rar_entries; 3059 ixgbe->unicst_avail = ixgbe->unicst_total; 3060 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3061 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3062 bzero(mac_addr, ETHERADDRL); 3063 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL); 3064 ixgbe->unicst_addr[slot].mac.set = 0; 3065 } 3066 ixgbe->unicst_init = B_TRUE; 3067 } else { 3068 /* Re-configure the RAR registers */ 3069 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3070 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3071 if (ixgbe->unicst_addr[slot].mac.set == 1) { 3072 (void) ixgbe_set_rar(hw, slot, mac_addr, 3073 ixgbe->unicst_addr[slot].mac.group_index, 3074 IXGBE_RAH_AV); 3075 } else { 3076 bzero(mac_addr, ETHERADDRL); 3077 (void) ixgbe_set_rar(hw, slot, mac_addr, 3078 NULL, NULL); 3079 } 3080 } 3081 } 3082 } 3083 3084 /* 3085 * ixgbe_unicst_find - Find the slot for the specified unicast address 3086 */ 3087 int 3088 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr) 3089 { 3090 int slot; 3091 3092 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3093 3094 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3095 if (bcmp(ixgbe->unicst_addr[slot].mac.addr, 3096 mac_addr, ETHERADDRL) == 0) 3097 return (slot); 3098 } 3099 3100 return (-1); 3101 } 3102 3103 /* 3104 * ixgbe_multicst_add - Add a multicst address. 3105 */ 3106 int 3107 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3108 { 3109 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3110 3111 if ((multiaddr[0] & 01) == 0) { 3112 return (EINVAL); 3113 } 3114 3115 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 3116 return (ENOENT); 3117 } 3118 3119 bcopy(multiaddr, 3120 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 3121 ixgbe->mcast_count++; 3122 3123 /* 3124 * Update the multicast table in the hardware 3125 */ 3126 ixgbe_setup_multicst(ixgbe); 3127 3128 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3129 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3130 return (EIO); 3131 } 3132 3133 return (0); 3134 } 3135 3136 /* 3137 * ixgbe_multicst_remove - Remove a multicst address. 3138 */ 3139 int 3140 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3141 { 3142 int i; 3143 3144 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3145 3146 for (i = 0; i < ixgbe->mcast_count; i++) { 3147 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 3148 ETHERADDRL) == 0) { 3149 for (i++; i < ixgbe->mcast_count; i++) { 3150 ixgbe->mcast_table[i - 1] = 3151 ixgbe->mcast_table[i]; 3152 } 3153 ixgbe->mcast_count--; 3154 break; 3155 } 3156 } 3157 3158 /* 3159 * Update the multicast table in the hardware 3160 */ 3161 ixgbe_setup_multicst(ixgbe); 3162 3163 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3164 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3165 return (EIO); 3166 } 3167 3168 return (0); 3169 } 3170 3171 /* 3172 * ixgbe_setup_multicast - Setup multicast data structures. 3173 * 3174 * This routine initializes all of the multicast related structures 3175 * and save them in the hardware registers. 3176 */ 3177 static void 3178 ixgbe_setup_multicst(ixgbe_t *ixgbe) 3179 { 3180 uint8_t *mc_addr_list; 3181 uint32_t mc_addr_count; 3182 struct ixgbe_hw *hw = &ixgbe->hw; 3183 3184 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3185 3186 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 3187 3188 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 3189 mc_addr_count = ixgbe->mcast_count; 3190 3191 /* 3192 * Update the multicast addresses to the MTA registers 3193 */ 3194 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 3195 ixgbe_mc_table_itr, TRUE); 3196 } 3197 3198 /* 3199 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode). 3200 * 3201 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers. 3202 * Different chipsets may have different allowed configuration of vmdq and rss. 3203 */ 3204 static void 3205 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe) 3206 { 3207 struct ixgbe_hw *hw = &ixgbe->hw; 3208 uint32_t ring_per_group; 3209 3210 switch (hw->mac.type) { 3211 case ixgbe_mac_82598EB: 3212 /* 3213 * 82598 supports the following combination: 3214 * vmdq no. x rss no. 3215 * [5..16] x 1 3216 * [1..4] x [1..16] 3217 * However 8 rss queue per pool (vmdq) is sufficient for 3218 * most cases. 3219 */ 3220 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3221 if (ixgbe->num_rx_groups > 4) { 3222 ixgbe->num_rx_rings = ixgbe->num_rx_groups; 3223 } else { 3224 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3225 min(8, ring_per_group); 3226 } 3227 3228 break; 3229 3230 case ixgbe_mac_82599EB: 3231 case ixgbe_mac_X540: 3232 case ixgbe_mac_X550: 3233 case ixgbe_mac_X550EM_x: 3234 /* 3235 * 82599 supports the following combination: 3236 * vmdq no. x rss no. 3237 * [33..64] x [1..2] 3238 * [2..32] x [1..4] 3239 * 1 x [1..16] 3240 * However 8 rss queue per pool (vmdq) is sufficient for 3241 * most cases. 3242 * 3243 * For now, treat X540 and X550 like the 82599. 3244 */ 3245 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3246 if (ixgbe->num_rx_groups == 1) { 3247 ixgbe->num_rx_rings = min(8, ring_per_group); 3248 } else if (ixgbe->num_rx_groups <= 32) { 3249 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3250 min(4, ring_per_group); 3251 } else if (ixgbe->num_rx_groups <= 64) { 3252 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3253 min(2, ring_per_group); 3254 } 3255 break; 3256 3257 default: 3258 break; 3259 } 3260 3261 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3262 3263 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) { 3264 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3265 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) { 3266 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ; 3267 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) { 3268 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS; 3269 } else { 3270 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS; 3271 } 3272 3273 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d", 3274 ixgbe->num_rx_groups, ixgbe->num_rx_rings); 3275 } 3276 3277 /* 3278 * ixgbe_get_conf - Get driver configurations set in driver.conf. 3279 * 3280 * This routine gets user-configured values out of the configuration 3281 * file ixgbe.conf. 3282 * 3283 * For each configurable value, there is a minimum, a maximum, and a 3284 * default. 3285 * If user does not configure a value, use the default. 3286 * If user configures below the minimum, use the minumum. 3287 * If user configures above the maximum, use the maxumum. 3288 */ 3289 static void 3290 ixgbe_get_conf(ixgbe_t *ixgbe) 3291 { 3292 struct ixgbe_hw *hw = &ixgbe->hw; 3293 uint32_t flow_control; 3294 3295 /* 3296 * ixgbe driver supports the following user configurations: 3297 * 3298 * Jumbo frame configuration: 3299 * default_mtu 3300 * 3301 * Ethernet flow control configuration: 3302 * flow_control 3303 * 3304 * Multiple rings configurations: 3305 * tx_queue_number 3306 * tx_ring_size 3307 * rx_queue_number 3308 * rx_ring_size 3309 * 3310 * Call ixgbe_get_prop() to get the value for a specific 3311 * configuration parameter. 3312 */ 3313 3314 /* 3315 * Jumbo frame configuration - max_frame_size controls host buffer 3316 * allocation, so includes MTU, ethernet header, vlan tag and 3317 * frame check sequence. 3318 */ 3319 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 3320 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU); 3321 3322 ixgbe->max_frame_size = ixgbe->default_mtu + 3323 sizeof (struct ether_vlan_header) + ETHERFCSL; 3324 3325 /* 3326 * Ethernet flow control configuration 3327 */ 3328 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 3329 ixgbe_fc_none, 3, ixgbe_fc_none); 3330 if (flow_control == 3) 3331 flow_control = ixgbe_fc_default; 3332 3333 /* 3334 * fc.requested mode is what the user requests. After autoneg, 3335 * fc.current_mode will be the flow_control mode that was negotiated. 3336 */ 3337 hw->fc.requested_mode = flow_control; 3338 3339 /* 3340 * Multiple rings configurations 3341 */ 3342 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 3343 ixgbe->capab->min_tx_que_num, 3344 ixgbe->capab->max_tx_que_num, 3345 ixgbe->capab->def_tx_que_num); 3346 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 3347 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 3348 3349 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 3350 ixgbe->capab->min_rx_que_num, 3351 ixgbe->capab->max_rx_que_num, 3352 ixgbe->capab->def_rx_que_num); 3353 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 3354 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 3355 3356 /* 3357 * Multiple groups configuration 3358 */ 3359 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM, 3360 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num, 3361 ixgbe->capab->def_rx_grp_num); 3362 3363 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE, 3364 0, 1, DEFAULT_MR_ENABLE); 3365 3366 if (ixgbe->mr_enable == B_FALSE) { 3367 ixgbe->num_tx_rings = 1; 3368 ixgbe->num_rx_rings = 1; 3369 ixgbe->num_rx_groups = 1; 3370 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3371 } else { 3372 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3373 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1); 3374 /* 3375 * The combination of num_rx_rings and num_rx_groups 3376 * may be not supported by h/w. We need to adjust 3377 * them to appropriate values. 3378 */ 3379 ixgbe_setup_vmdq_rss_conf(ixgbe); 3380 } 3381 3382 /* 3383 * Tunable used to force an interrupt type. The only use is 3384 * for testing of the lesser interrupt types. 3385 * 0 = don't force interrupt type 3386 * 1 = force interrupt type MSI-X 3387 * 2 = force interrupt type MSI 3388 * 3 = force interrupt type Legacy 3389 */ 3390 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 3391 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 3392 3393 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 3394 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 3395 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 3396 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 3397 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 3398 0, 1, DEFAULT_LSO_ENABLE); 3399 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE, 3400 0, 1, DEFAULT_LRO_ENABLE); 3401 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 3402 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 3403 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe, 3404 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE); 3405 3406 /* Head Write Back not recommended for 82599, X540 and X550 */ 3407 if (hw->mac.type == ixgbe_mac_82599EB || 3408 hw->mac.type == ixgbe_mac_X540 || 3409 hw->mac.type == ixgbe_mac_X550 || 3410 hw->mac.type == ixgbe_mac_X550EM_x) { 3411 ixgbe->tx_head_wb_enable = B_FALSE; 3412 } 3413 3414 /* 3415 * ixgbe LSO needs the tx h/w checksum support. 3416 * LSO will be disabled if tx h/w checksum is not 3417 * enabled. 3418 */ 3419 if (ixgbe->tx_hcksum_enable == B_FALSE) { 3420 ixgbe->lso_enable = B_FALSE; 3421 } 3422 3423 /* 3424 * ixgbe LRO needs the rx h/w checksum support. 3425 * LRO will be disabled if rx h/w checksum is not 3426 * enabled. 3427 */ 3428 if (ixgbe->rx_hcksum_enable == B_FALSE) { 3429 ixgbe->lro_enable = B_FALSE; 3430 } 3431 3432 /* 3433 * ixgbe LRO only supported by 82599, X540 and X550 3434 */ 3435 if (hw->mac.type == ixgbe_mac_82598EB) { 3436 ixgbe->lro_enable = B_FALSE; 3437 } 3438 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 3439 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 3440 DEFAULT_TX_COPY_THRESHOLD); 3441 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 3442 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 3443 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 3444 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 3445 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 3446 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 3447 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 3448 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 3449 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 3450 3451 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 3452 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 3453 DEFAULT_RX_COPY_THRESHOLD); 3454 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 3455 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 3456 DEFAULT_RX_LIMIT_PER_INTR); 3457 3458 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 3459 ixgbe->capab->min_intr_throttle, 3460 ixgbe->capab->max_intr_throttle, 3461 ixgbe->capab->def_intr_throttle); 3462 /* 3463 * 82599, X540 and X550 require the interrupt throttling rate is 3464 * a multiple of 8. This is enforced by the register definiton. 3465 */ 3466 if (hw->mac.type == ixgbe_mac_82599EB || 3467 hw->mac.type == ixgbe_mac_X540 || 3468 hw->mac.type == ixgbe_mac_X550 || 3469 hw->mac.type == ixgbe_mac_X550EM_x) 3470 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; 3471 3472 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe, 3473 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP); 3474 } 3475 3476 static void 3477 ixgbe_init_params(ixgbe_t *ixgbe) 3478 { 3479 struct ixgbe_hw *hw = &ixgbe->hw; 3480 ixgbe_link_speed speeds_supported = 0; 3481 boolean_t negotiate; 3482 3483 /* 3484 * Get a list of speeds the adapter supports. If the hw struct hasn't 3485 * been populated with this information yet, retrieve it from the 3486 * adapter and save it to our own variable. 3487 * 3488 * On certain adapters, such as ones which use SFPs, the contents of 3489 * hw->phy.speeds_supported (and hw->phy.autoneg_advertised) are not 3490 * updated, so we must rely on calling ixgbe_get_link_capabilities() 3491 * in order to ascertain the speeds which we are capable of supporting, 3492 * and in the case of SFP-equipped adapters, which speed we are 3493 * advertising. If ixgbe_get_link_capabilities() fails for some reason, 3494 * we'll go with a default list of speeds as a last resort. 3495 */ 3496 speeds_supported = hw->phy.speeds_supported; 3497 3498 if (speeds_supported == 0) { 3499 if (ixgbe_get_link_capabilities(hw, &speeds_supported, 3500 &negotiate) != IXGBE_SUCCESS) { 3501 if (hw->mac.type == ixgbe_mac_82598EB) { 3502 speeds_supported = 3503 IXGBE_LINK_SPEED_82598_AUTONEG; 3504 } else { 3505 speeds_supported = 3506 IXGBE_LINK_SPEED_82599_AUTONEG; 3507 } 3508 } 3509 } 3510 ixgbe->speeds_supported = speeds_supported; 3511 3512 /* 3513 * By default, all supported speeds are enabled and advertised. 3514 */ 3515 if (speeds_supported & IXGBE_LINK_SPEED_10GB_FULL) { 3516 ixgbe->param_en_10000fdx_cap = 1; 3517 ixgbe->param_adv_10000fdx_cap = 1; 3518 } else { 3519 ixgbe->param_en_10000fdx_cap = 0; 3520 ixgbe->param_adv_10000fdx_cap = 0; 3521 } 3522 3523 if (speeds_supported & IXGBE_LINK_SPEED_5GB_FULL) { 3524 ixgbe->param_en_5000fdx_cap = 1; 3525 ixgbe->param_adv_5000fdx_cap = 1; 3526 } else { 3527 ixgbe->param_en_5000fdx_cap = 0; 3528 ixgbe->param_adv_5000fdx_cap = 0; 3529 } 3530 3531 if (speeds_supported & IXGBE_LINK_SPEED_2_5GB_FULL) { 3532 ixgbe->param_en_2500fdx_cap = 1; 3533 ixgbe->param_adv_2500fdx_cap = 1; 3534 } else { 3535 ixgbe->param_en_2500fdx_cap = 0; 3536 ixgbe->param_adv_2500fdx_cap = 0; 3537 } 3538 3539 if (speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) { 3540 ixgbe->param_en_1000fdx_cap = 1; 3541 ixgbe->param_adv_1000fdx_cap = 1; 3542 } else { 3543 ixgbe->param_en_1000fdx_cap = 0; 3544 ixgbe->param_adv_1000fdx_cap = 0; 3545 } 3546 3547 if (speeds_supported & IXGBE_LINK_SPEED_100_FULL) { 3548 ixgbe->param_en_100fdx_cap = 1; 3549 ixgbe->param_adv_100fdx_cap = 1; 3550 } else { 3551 ixgbe->param_en_100fdx_cap = 0; 3552 ixgbe->param_adv_100fdx_cap = 0; 3553 } 3554 3555 ixgbe->param_pause_cap = 1; 3556 ixgbe->param_asym_pause_cap = 1; 3557 ixgbe->param_rem_fault = 0; 3558 3559 ixgbe->param_adv_autoneg_cap = 1; 3560 ixgbe->param_adv_pause_cap = 1; 3561 ixgbe->param_adv_asym_pause_cap = 1; 3562 ixgbe->param_adv_rem_fault = 0; 3563 3564 ixgbe->param_lp_10000fdx_cap = 0; 3565 ixgbe->param_lp_5000fdx_cap = 0; 3566 ixgbe->param_lp_2500fdx_cap = 0; 3567 ixgbe->param_lp_1000fdx_cap = 0; 3568 ixgbe->param_lp_100fdx_cap = 0; 3569 ixgbe->param_lp_autoneg_cap = 0; 3570 ixgbe->param_lp_pause_cap = 0; 3571 ixgbe->param_lp_asym_pause_cap = 0; 3572 ixgbe->param_lp_rem_fault = 0; 3573 } 3574 3575 /* 3576 * ixgbe_get_prop - Get a property value out of the configuration file 3577 * ixgbe.conf. 3578 * 3579 * Caller provides the name of the property, a default value, a minimum 3580 * value, and a maximum value. 3581 * 3582 * Return configured value of the property, with default, minimum and 3583 * maximum properly applied. 3584 */ 3585 static int 3586 ixgbe_get_prop(ixgbe_t *ixgbe, 3587 char *propname, /* name of the property */ 3588 int minval, /* minimum acceptable value */ 3589 int maxval, /* maximim acceptable value */ 3590 int defval) /* default value */ 3591 { 3592 int value; 3593 3594 /* 3595 * Call ddi_prop_get_int() to read the conf settings 3596 */ 3597 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 3598 DDI_PROP_DONTPASS, propname, defval); 3599 if (value > maxval) 3600 value = maxval; 3601 3602 if (value < minval) 3603 value = minval; 3604 3605 return (value); 3606 } 3607 3608 /* 3609 * ixgbe_driver_setup_link - Using the link properties to setup the link. 3610 */ 3611 int 3612 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 3613 { 3614 struct ixgbe_hw *hw = &ixgbe->hw; 3615 ixgbe_link_speed advertised = 0; 3616 3617 /* 3618 * Assemble a list of enabled speeds to auto-negotiate with. 3619 */ 3620 if (ixgbe->param_en_10000fdx_cap == 1) 3621 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 3622 3623 if (ixgbe->param_en_5000fdx_cap == 1) 3624 advertised |= IXGBE_LINK_SPEED_5GB_FULL; 3625 3626 if (ixgbe->param_en_2500fdx_cap == 1) 3627 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; 3628 3629 if (ixgbe->param_en_1000fdx_cap == 1) 3630 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 3631 3632 if (ixgbe->param_en_100fdx_cap == 1) 3633 advertised |= IXGBE_LINK_SPEED_100_FULL; 3634 3635 /* 3636 * As a last resort, autoneg with a default list of speeds. 3637 */ 3638 if (ixgbe->param_adv_autoneg_cap == 1 && advertised == 0) { 3639 ixgbe_notice(ixgbe, "Invalid link settings. Setting link " 3640 "to autonegotiate with full capabilities."); 3641 3642 if (hw->mac.type == ixgbe_mac_82598EB) 3643 advertised = IXGBE_LINK_SPEED_82598_AUTONEG; 3644 else 3645 advertised = IXGBE_LINK_SPEED_82599_AUTONEG; 3646 } 3647 3648 if (setup_hw) { 3649 if (ixgbe_setup_link(&ixgbe->hw, advertised, 3650 ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) { 3651 ixgbe_notice(ixgbe, "Setup link failed on this " 3652 "device."); 3653 return (IXGBE_FAILURE); 3654 } 3655 } 3656 3657 return (IXGBE_SUCCESS); 3658 } 3659 3660 /* 3661 * ixgbe_driver_link_check - Link status processing. 3662 * 3663 * This function can be called in both kernel context and interrupt context 3664 */ 3665 static void 3666 ixgbe_driver_link_check(ixgbe_t *ixgbe) 3667 { 3668 struct ixgbe_hw *hw = &ixgbe->hw; 3669 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 3670 boolean_t link_up = B_FALSE; 3671 boolean_t link_changed = B_FALSE; 3672 3673 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3674 3675 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 3676 if (link_up) { 3677 ixgbe->link_check_complete = B_TRUE; 3678 3679 /* Link is up, enable flow control settings */ 3680 (void) ixgbe_fc_enable(hw); 3681 3682 /* 3683 * The Link is up, check whether it was marked as down earlier 3684 */ 3685 if (ixgbe->link_state != LINK_STATE_UP) { 3686 switch (speed) { 3687 case IXGBE_LINK_SPEED_10GB_FULL: 3688 ixgbe->link_speed = SPEED_10GB; 3689 break; 3690 case IXGBE_LINK_SPEED_5GB_FULL: 3691 ixgbe->link_speed = SPEED_5GB; 3692 break; 3693 case IXGBE_LINK_SPEED_2_5GB_FULL: 3694 ixgbe->link_speed = SPEED_2_5GB; 3695 break; 3696 case IXGBE_LINK_SPEED_1GB_FULL: 3697 ixgbe->link_speed = SPEED_1GB; 3698 break; 3699 case IXGBE_LINK_SPEED_100_FULL: 3700 ixgbe->link_speed = SPEED_100; 3701 } 3702 ixgbe->link_duplex = LINK_DUPLEX_FULL; 3703 ixgbe->link_state = LINK_STATE_UP; 3704 link_changed = B_TRUE; 3705 } 3706 } else { 3707 if (ixgbe->link_check_complete == B_TRUE || 3708 (ixgbe->link_check_complete == B_FALSE && 3709 gethrtime() >= ixgbe->link_check_hrtime)) { 3710 /* 3711 * The link is really down 3712 */ 3713 ixgbe->link_check_complete = B_TRUE; 3714 3715 if (ixgbe->link_state != LINK_STATE_DOWN) { 3716 ixgbe->link_speed = 0; 3717 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN; 3718 ixgbe->link_state = LINK_STATE_DOWN; 3719 link_changed = B_TRUE; 3720 } 3721 } 3722 } 3723 3724 /* 3725 * If we are in an interrupt context, need to re-enable the 3726 * interrupt, which was automasked 3727 */ 3728 if (servicing_interrupt() != 0) { 3729 ixgbe->eims |= IXGBE_EICR_LSC; 3730 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3731 } 3732 3733 if (link_changed) { 3734 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3735 } 3736 } 3737 3738 /* 3739 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599. 3740 */ 3741 static void 3742 ixgbe_sfp_check(void *arg) 3743 { 3744 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3745 uint32_t eicr = ixgbe->eicr; 3746 struct ixgbe_hw *hw = &ixgbe->hw; 3747 3748 mutex_enter(&ixgbe->gen_lock); 3749 if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) { 3750 /* clear the interrupt */ 3751 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3752 3753 /* if link up, do multispeed fiber setup */ 3754 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3755 B_TRUE); 3756 ixgbe_driver_link_check(ixgbe); 3757 ixgbe_get_hw_state(ixgbe); 3758 } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) { 3759 /* clear the interrupt */ 3760 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw)); 3761 3762 /* if link up, do sfp module setup */ 3763 (void) hw->mac.ops.setup_sfp(hw); 3764 3765 /* do multispeed fiber setup */ 3766 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3767 B_TRUE); 3768 ixgbe_driver_link_check(ixgbe); 3769 ixgbe_get_hw_state(ixgbe); 3770 } 3771 mutex_exit(&ixgbe->gen_lock); 3772 3773 /* 3774 * We need to fully re-check the link later. 3775 */ 3776 ixgbe->link_check_complete = B_FALSE; 3777 ixgbe->link_check_hrtime = gethrtime() + 3778 (IXGBE_LINK_UP_TIME * 100000000ULL); 3779 } 3780 3781 /* 3782 * ixgbe_overtemp_check - overtemp module processing done in taskq 3783 * 3784 * This routine will only be called on adapters with temperature sensor. 3785 * The indication of over-temperature can be either SDP0 interrupt or the link 3786 * status change interrupt. 3787 */ 3788 static void 3789 ixgbe_overtemp_check(void *arg) 3790 { 3791 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3792 struct ixgbe_hw *hw = &ixgbe->hw; 3793 uint32_t eicr = ixgbe->eicr; 3794 ixgbe_link_speed speed; 3795 boolean_t link_up; 3796 3797 mutex_enter(&ixgbe->gen_lock); 3798 3799 /* make sure we know current state of link */ 3800 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 3801 3802 /* check over-temp condition */ 3803 if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) || 3804 (eicr & IXGBE_EICR_LSC)) { 3805 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) { 3806 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3807 3808 /* 3809 * Disable the adapter interrupts 3810 */ 3811 ixgbe_disable_adapter_interrupts(ixgbe); 3812 3813 /* 3814 * Disable Rx/Tx units 3815 */ 3816 (void) ixgbe_stop_adapter(hw); 3817 3818 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3819 ixgbe_error(ixgbe, 3820 "Problem: Network adapter has been stopped " 3821 "because it has overheated"); 3822 ixgbe_error(ixgbe, 3823 "Action: Restart the computer. " 3824 "If the problem persists, power off the system " 3825 "and replace the adapter"); 3826 } 3827 } 3828 3829 /* write to clear the interrupt */ 3830 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 3831 3832 mutex_exit(&ixgbe->gen_lock); 3833 } 3834 3835 /* 3836 * ixgbe_phy_check - taskq to process interrupts from an external PHY 3837 * 3838 * This routine will only be called on adapters with external PHYs 3839 * (such as X550) that may be trying to raise our attention to some event. 3840 * Currently, this is limited to claiming PHY overtemperature and link status 3841 * change (LSC) events, however this may expand to include other things in 3842 * future adapters. 3843 */ 3844 static void 3845 ixgbe_phy_check(void *arg) 3846 { 3847 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3848 struct ixgbe_hw *hw = &ixgbe->hw; 3849 int rv; 3850 3851 mutex_enter(&ixgbe->gen_lock); 3852 3853 /* 3854 * X550 baseT PHY overtemp and LSC events are handled here. 3855 * 3856 * If an overtemp event occurs, it will be reflected in the 3857 * return value of phy.ops.handle_lasi() and the common code will 3858 * automatically power off the baseT PHY. This is our cue to trigger 3859 * an FMA event. 3860 * 3861 * If a link status change event occurs, phy.ops.handle_lasi() will 3862 * automatically initiate a link setup between the integrated KR PHY 3863 * and the external X557 PHY to ensure that the link speed between 3864 * them matches the link speed of the baseT link. 3865 */ 3866 rv = ixgbe_handle_lasi(hw); 3867 3868 if (rv == IXGBE_ERR_OVERTEMP) { 3869 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3870 3871 /* 3872 * Disable the adapter interrupts 3873 */ 3874 ixgbe_disable_adapter_interrupts(ixgbe); 3875 3876 /* 3877 * Disable Rx/Tx units 3878 */ 3879 (void) ixgbe_stop_adapter(hw); 3880 3881 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3882 ixgbe_error(ixgbe, 3883 "Problem: Network adapter has been stopped due to a " 3884 "overtemperature event being detected."); 3885 ixgbe_error(ixgbe, 3886 "Action: Shut down or restart the computer. If the issue " 3887 "persists, please take action in accordance with the " 3888 "recommendations from your system vendor."); 3889 } 3890 3891 mutex_exit(&ixgbe->gen_lock); 3892 } 3893 3894 /* 3895 * ixgbe_link_timer - timer for link status detection 3896 */ 3897 static void 3898 ixgbe_link_timer(void *arg) 3899 { 3900 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3901 3902 mutex_enter(&ixgbe->gen_lock); 3903 ixgbe_driver_link_check(ixgbe); 3904 mutex_exit(&ixgbe->gen_lock); 3905 } 3906 3907 /* 3908 * ixgbe_local_timer - Driver watchdog function. 3909 * 3910 * This function will handle the transmit stall check and other routines. 3911 */ 3912 static void 3913 ixgbe_local_timer(void *arg) 3914 { 3915 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3916 3917 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP) 3918 goto out; 3919 3920 if (ixgbe->ixgbe_state & IXGBE_ERROR) { 3921 ixgbe->reset_count++; 3922 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3923 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3924 goto out; 3925 } 3926 3927 if (ixgbe_stall_check(ixgbe)) { 3928 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL); 3929 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3930 3931 ixgbe->reset_count++; 3932 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3933 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3934 } 3935 3936 out: 3937 ixgbe_restart_watchdog_timer(ixgbe); 3938 } 3939 3940 /* 3941 * ixgbe_stall_check - Check for transmit stall. 3942 * 3943 * This function checks if the adapter is stalled (in transmit). 3944 * 3945 * It is called each time the watchdog timeout is invoked. 3946 * If the transmit descriptor reclaim continuously fails, 3947 * the watchdog value will increment by 1. If the watchdog 3948 * value exceeds the threshold, the ixgbe is assumed to 3949 * have stalled and need to be reset. 3950 */ 3951 static boolean_t 3952 ixgbe_stall_check(ixgbe_t *ixgbe) 3953 { 3954 ixgbe_tx_ring_t *tx_ring; 3955 boolean_t result; 3956 int i; 3957 3958 if (ixgbe->link_state != LINK_STATE_UP) 3959 return (B_FALSE); 3960 3961 /* 3962 * If any tx ring is stalled, we'll reset the chipset 3963 */ 3964 result = B_FALSE; 3965 for (i = 0; i < ixgbe->num_tx_rings; i++) { 3966 tx_ring = &ixgbe->tx_rings[i]; 3967 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) { 3968 tx_ring->tx_recycle(tx_ring); 3969 } 3970 3971 if (tx_ring->recycle_fail > 0) 3972 tx_ring->stall_watchdog++; 3973 else 3974 tx_ring->stall_watchdog = 0; 3975 3976 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 3977 result = B_TRUE; 3978 break; 3979 } 3980 } 3981 3982 if (result) { 3983 tx_ring->stall_watchdog = 0; 3984 tx_ring->recycle_fail = 0; 3985 } 3986 3987 return (result); 3988 } 3989 3990 3991 /* 3992 * is_valid_mac_addr - Check if the mac address is valid. 3993 */ 3994 static boolean_t 3995 is_valid_mac_addr(uint8_t *mac_addr) 3996 { 3997 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 3998 const uint8_t addr_test2[6] = 3999 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4000 4001 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 4002 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 4003 return (B_FALSE); 4004 4005 return (B_TRUE); 4006 } 4007 4008 static boolean_t 4009 ixgbe_find_mac_address(ixgbe_t *ixgbe) 4010 { 4011 #ifdef __sparc 4012 struct ixgbe_hw *hw = &ixgbe->hw; 4013 uchar_t *bytes; 4014 struct ether_addr sysaddr; 4015 uint_t nelts; 4016 int err; 4017 boolean_t found = B_FALSE; 4018 4019 /* 4020 * The "vendor's factory-set address" may already have 4021 * been extracted from the chip, but if the property 4022 * "local-mac-address" is set we use that instead. 4023 * 4024 * We check whether it looks like an array of 6 4025 * bytes (which it should, if OBP set it). If we can't 4026 * make sense of it this way, we'll ignore it. 4027 */ 4028 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4029 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 4030 if (err == DDI_PROP_SUCCESS) { 4031 if (nelts == ETHERADDRL) { 4032 while (nelts--) 4033 hw->mac.addr[nelts] = bytes[nelts]; 4034 found = B_TRUE; 4035 } 4036 ddi_prop_free(bytes); 4037 } 4038 4039 /* 4040 * Look up the OBP property "local-mac-address?". If the user has set 4041 * 'local-mac-address? = false', use "the system address" instead. 4042 */ 4043 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 4044 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 4045 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 4046 if (localetheraddr(NULL, &sysaddr) != 0) { 4047 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 4048 found = B_TRUE; 4049 } 4050 } 4051 ddi_prop_free(bytes); 4052 } 4053 4054 /* 4055 * Finally(!), if there's a valid "mac-address" property (created 4056 * if we netbooted from this interface), we must use this instead 4057 * of any of the above to ensure that the NFS/install server doesn't 4058 * get confused by the address changing as illumos takes over! 4059 */ 4060 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4061 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 4062 if (err == DDI_PROP_SUCCESS) { 4063 if (nelts == ETHERADDRL) { 4064 while (nelts--) 4065 hw->mac.addr[nelts] = bytes[nelts]; 4066 found = B_TRUE; 4067 } 4068 ddi_prop_free(bytes); 4069 } 4070 4071 if (found) { 4072 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 4073 return (B_TRUE); 4074 } 4075 #else 4076 _NOTE(ARGUNUSED(ixgbe)); 4077 #endif 4078 4079 return (B_TRUE); 4080 } 4081 4082 #pragma inline(ixgbe_arm_watchdog_timer) 4083 static void 4084 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 4085 { 4086 /* 4087 * Fire a watchdog timer 4088 */ 4089 ixgbe->watchdog_tid = 4090 timeout(ixgbe_local_timer, 4091 (void *)ixgbe, 1 * drv_usectohz(1000000)); 4092 4093 } 4094 4095 /* 4096 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 4097 */ 4098 void 4099 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 4100 { 4101 mutex_enter(&ixgbe->watchdog_lock); 4102 4103 if (!ixgbe->watchdog_enable) { 4104 ixgbe->watchdog_enable = B_TRUE; 4105 ixgbe->watchdog_start = B_TRUE; 4106 ixgbe_arm_watchdog_timer(ixgbe); 4107 } 4108 4109 mutex_exit(&ixgbe->watchdog_lock); 4110 } 4111 4112 /* 4113 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 4114 */ 4115 void 4116 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 4117 { 4118 timeout_id_t tid; 4119 4120 mutex_enter(&ixgbe->watchdog_lock); 4121 4122 ixgbe->watchdog_enable = B_FALSE; 4123 ixgbe->watchdog_start = B_FALSE; 4124 tid = ixgbe->watchdog_tid; 4125 ixgbe->watchdog_tid = 0; 4126 4127 mutex_exit(&ixgbe->watchdog_lock); 4128 4129 if (tid != 0) 4130 (void) untimeout(tid); 4131 } 4132 4133 /* 4134 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 4135 */ 4136 void 4137 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 4138 { 4139 mutex_enter(&ixgbe->watchdog_lock); 4140 4141 if (ixgbe->watchdog_enable) { 4142 if (!ixgbe->watchdog_start) { 4143 ixgbe->watchdog_start = B_TRUE; 4144 ixgbe_arm_watchdog_timer(ixgbe); 4145 } 4146 } 4147 4148 mutex_exit(&ixgbe->watchdog_lock); 4149 } 4150 4151 /* 4152 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 4153 */ 4154 static void 4155 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 4156 { 4157 mutex_enter(&ixgbe->watchdog_lock); 4158 4159 if (ixgbe->watchdog_start) 4160 ixgbe_arm_watchdog_timer(ixgbe); 4161 4162 mutex_exit(&ixgbe->watchdog_lock); 4163 } 4164 4165 /* 4166 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 4167 */ 4168 void 4169 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 4170 { 4171 timeout_id_t tid; 4172 4173 mutex_enter(&ixgbe->watchdog_lock); 4174 4175 ixgbe->watchdog_start = B_FALSE; 4176 tid = ixgbe->watchdog_tid; 4177 ixgbe->watchdog_tid = 0; 4178 4179 mutex_exit(&ixgbe->watchdog_lock); 4180 4181 if (tid != 0) 4182 (void) untimeout(tid); 4183 } 4184 4185 /* 4186 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 4187 */ 4188 static void 4189 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 4190 { 4191 struct ixgbe_hw *hw = &ixgbe->hw; 4192 4193 /* 4194 * mask all interrupts off 4195 */ 4196 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 4197 4198 /* 4199 * for MSI-X, also disable autoclear 4200 */ 4201 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4202 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 4203 } 4204 4205 IXGBE_WRITE_FLUSH(hw); 4206 } 4207 4208 /* 4209 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 4210 */ 4211 static void 4212 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 4213 { 4214 struct ixgbe_hw *hw = &ixgbe->hw; 4215 uint32_t eiac, eiam; 4216 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 4217 4218 /* interrupt types to enable */ 4219 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 4220 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 4221 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */ 4222 4223 /* enable automask on "other" causes that this adapter can generate */ 4224 eiam = ixgbe->capab->other_intr; 4225 4226 /* 4227 * msi-x mode 4228 */ 4229 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4230 /* enable autoclear but not on bits 29:20 */ 4231 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR); 4232 4233 /* general purpose interrupt enable */ 4234 gpie |= (IXGBE_GPIE_MSIX_MODE 4235 | IXGBE_GPIE_PBA_SUPPORT 4236 | IXGBE_GPIE_OCD 4237 | IXGBE_GPIE_EIAME); 4238 /* 4239 * non-msi-x mode 4240 */ 4241 } else { 4242 4243 /* disable autoclear, leave gpie at default */ 4244 eiac = 0; 4245 4246 /* 4247 * General purpose interrupt enable. 4248 * For 82599, X540 and X550, extended interrupt 4249 * automask enable only in MSI or MSI-X mode 4250 */ 4251 if ((hw->mac.type == ixgbe_mac_82598EB) || 4252 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) { 4253 gpie |= IXGBE_GPIE_EIAME; 4254 } 4255 } 4256 4257 /* Enable specific "other" interrupt types */ 4258 switch (hw->mac.type) { 4259 case ixgbe_mac_82598EB: 4260 gpie |= ixgbe->capab->other_gpie; 4261 break; 4262 4263 case ixgbe_mac_82599EB: 4264 case ixgbe_mac_X540: 4265 case ixgbe_mac_X550: 4266 case ixgbe_mac_X550EM_x: 4267 gpie |= ixgbe->capab->other_gpie; 4268 4269 /* Enable RSC Delay 8us when LRO enabled */ 4270 if (ixgbe->lro_enable) { 4271 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT); 4272 } 4273 break; 4274 4275 default: 4276 break; 4277 } 4278 4279 /* write to interrupt control registers */ 4280 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4281 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 4282 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam); 4283 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 4284 IXGBE_WRITE_FLUSH(hw); 4285 } 4286 4287 /* 4288 * ixgbe_loopback_ioctl - Loopback support. 4289 */ 4290 enum ioc_reply 4291 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 4292 { 4293 lb_info_sz_t *lbsp; 4294 lb_property_t *lbpp; 4295 uint32_t *lbmp; 4296 uint32_t size; 4297 uint32_t value; 4298 4299 if (mp->b_cont == NULL) 4300 return (IOC_INVAL); 4301 4302 switch (iocp->ioc_cmd) { 4303 default: 4304 return (IOC_INVAL); 4305 4306 case LB_GET_INFO_SIZE: 4307 size = sizeof (lb_info_sz_t); 4308 if (iocp->ioc_count != size) 4309 return (IOC_INVAL); 4310 4311 value = sizeof (lb_normal); 4312 value += sizeof (lb_mac); 4313 value += sizeof (lb_external); 4314 4315 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 4316 *lbsp = value; 4317 break; 4318 4319 case LB_GET_INFO: 4320 value = sizeof (lb_normal); 4321 value += sizeof (lb_mac); 4322 value += sizeof (lb_external); 4323 4324 size = value; 4325 if (iocp->ioc_count != size) 4326 return (IOC_INVAL); 4327 4328 value = 0; 4329 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 4330 4331 lbpp[value++] = lb_normal; 4332 lbpp[value++] = lb_mac; 4333 lbpp[value++] = lb_external; 4334 break; 4335 4336 case LB_GET_MODE: 4337 size = sizeof (uint32_t); 4338 if (iocp->ioc_count != size) 4339 return (IOC_INVAL); 4340 4341 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4342 *lbmp = ixgbe->loopback_mode; 4343 break; 4344 4345 case LB_SET_MODE: 4346 size = 0; 4347 if (iocp->ioc_count != sizeof (uint32_t)) 4348 return (IOC_INVAL); 4349 4350 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4351 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 4352 return (IOC_INVAL); 4353 break; 4354 } 4355 4356 iocp->ioc_count = size; 4357 iocp->ioc_error = 0; 4358 4359 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4360 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4361 return (IOC_INVAL); 4362 } 4363 4364 return (IOC_REPLY); 4365 } 4366 4367 /* 4368 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 4369 */ 4370 static boolean_t 4371 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 4372 { 4373 if (mode == ixgbe->loopback_mode) 4374 return (B_TRUE); 4375 4376 ixgbe->loopback_mode = mode; 4377 4378 if (mode == IXGBE_LB_NONE) { 4379 /* 4380 * Reset the chip 4381 */ 4382 (void) ixgbe_reset(ixgbe); 4383 return (B_TRUE); 4384 } 4385 4386 mutex_enter(&ixgbe->gen_lock); 4387 4388 switch (mode) { 4389 default: 4390 mutex_exit(&ixgbe->gen_lock); 4391 return (B_FALSE); 4392 4393 case IXGBE_LB_EXTERNAL: 4394 break; 4395 4396 case IXGBE_LB_INTERNAL_MAC: 4397 ixgbe_set_internal_mac_loopback(ixgbe); 4398 break; 4399 } 4400 4401 mutex_exit(&ixgbe->gen_lock); 4402 4403 return (B_TRUE); 4404 } 4405 4406 /* 4407 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 4408 */ 4409 static void 4410 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 4411 { 4412 struct ixgbe_hw *hw; 4413 uint32_t reg; 4414 uint8_t atlas; 4415 4416 hw = &ixgbe->hw; 4417 4418 /* 4419 * Setup MAC loopback 4420 */ 4421 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 4422 reg |= IXGBE_HLREG0_LPBK; 4423 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 4424 4425 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4426 reg &= ~IXGBE_AUTOC_LMS_MASK; 4427 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4428 4429 /* 4430 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 4431 */ 4432 switch (hw->mac.type) { 4433 case ixgbe_mac_82598EB: 4434 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4435 &atlas); 4436 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 4437 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4438 atlas); 4439 4440 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4441 &atlas); 4442 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 4443 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4444 atlas); 4445 4446 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4447 &atlas); 4448 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 4449 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4450 atlas); 4451 4452 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4453 &atlas); 4454 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 4455 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4456 atlas); 4457 break; 4458 4459 case ixgbe_mac_82599EB: 4460 case ixgbe_mac_X540: 4461 case ixgbe_mac_X550: 4462 case ixgbe_mac_X550EM_x: 4463 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4464 reg |= (IXGBE_AUTOC_FLU | 4465 IXGBE_AUTOC_10G_KX4); 4466 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4467 4468 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL, 4469 B_FALSE); 4470 break; 4471 4472 default: 4473 break; 4474 } 4475 } 4476 4477 #pragma inline(ixgbe_intr_rx_work) 4478 /* 4479 * ixgbe_intr_rx_work - RX processing of ISR. 4480 */ 4481 static void 4482 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 4483 { 4484 mblk_t *mp; 4485 4486 mutex_enter(&rx_ring->rx_lock); 4487 4488 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4489 mutex_exit(&rx_ring->rx_lock); 4490 4491 if (mp != NULL) 4492 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4493 rx_ring->ring_gen_num); 4494 } 4495 4496 #pragma inline(ixgbe_intr_tx_work) 4497 /* 4498 * ixgbe_intr_tx_work - TX processing of ISR. 4499 */ 4500 static void 4501 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 4502 { 4503 ixgbe_t *ixgbe = tx_ring->ixgbe; 4504 4505 /* 4506 * Recycle the tx descriptors 4507 */ 4508 tx_ring->tx_recycle(tx_ring); 4509 4510 /* 4511 * Schedule the re-transmit 4512 */ 4513 if (tx_ring->reschedule && 4514 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) { 4515 tx_ring->reschedule = B_FALSE; 4516 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl, 4517 tx_ring->ring_handle); 4518 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 4519 } 4520 } 4521 4522 #pragma inline(ixgbe_intr_other_work) 4523 /* 4524 * ixgbe_intr_other_work - Process interrupt types other than tx/rx 4525 */ 4526 static void 4527 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr) 4528 { 4529 struct ixgbe_hw *hw = &ixgbe->hw; 4530 4531 ASSERT(mutex_owned(&ixgbe->gen_lock)); 4532 4533 /* 4534 * handle link status change 4535 */ 4536 if (eicr & IXGBE_EICR_LSC) { 4537 ixgbe_driver_link_check(ixgbe); 4538 ixgbe_get_hw_state(ixgbe); 4539 } 4540 4541 /* 4542 * check for fan failure on adapters with fans 4543 */ 4544 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 4545 (eicr & IXGBE_EICR_GPI_SDP1)) { 4546 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 4547 4548 /* 4549 * Disable the adapter interrupts 4550 */ 4551 ixgbe_disable_adapter_interrupts(ixgbe); 4552 4553 /* 4554 * Disable Rx/Tx units 4555 */ 4556 (void) ixgbe_stop_adapter(&ixgbe->hw); 4557 4558 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 4559 ixgbe_error(ixgbe, 4560 "Problem: Network adapter has been stopped " 4561 "because the fan has stopped.\n"); 4562 ixgbe_error(ixgbe, 4563 "Action: Replace the adapter.\n"); 4564 4565 /* re-enable the interrupt, which was automasked */ 4566 ixgbe->eims |= IXGBE_EICR_GPI_SDP1; 4567 } 4568 4569 /* 4570 * Do SFP check for adapters with hot-plug capability 4571 */ 4572 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) && 4573 ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) || 4574 (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) { 4575 ixgbe->eicr = eicr; 4576 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq, 4577 ixgbe_sfp_check, (void *)ixgbe, 4578 DDI_NOSLEEP)) != DDI_SUCCESS) { 4579 ixgbe_log(ixgbe, "No memory available to dispatch " 4580 "taskq for SFP check"); 4581 } 4582 } 4583 4584 /* 4585 * Do over-temperature check for adapters with temp sensor 4586 */ 4587 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) && 4588 ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) || 4589 (eicr & IXGBE_EICR_LSC))) { 4590 ixgbe->eicr = eicr; 4591 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq, 4592 ixgbe_overtemp_check, (void *)ixgbe, 4593 DDI_NOSLEEP)) != DDI_SUCCESS) { 4594 ixgbe_log(ixgbe, "No memory available to dispatch " 4595 "taskq for overtemp check"); 4596 } 4597 } 4598 4599 /* 4600 * Process an external PHY interrupt 4601 */ 4602 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 4603 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 4604 ixgbe->eicr = eicr; 4605 if ((ddi_taskq_dispatch(ixgbe->phy_taskq, 4606 ixgbe_phy_check, (void *)ixgbe, 4607 DDI_NOSLEEP)) != DDI_SUCCESS) { 4608 ixgbe_log(ixgbe, "No memory available to dispatch " 4609 "taskq for PHY check"); 4610 } 4611 } 4612 } 4613 4614 /* 4615 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 4616 */ 4617 static uint_t 4618 ixgbe_intr_legacy(void *arg1, void *arg2) 4619 { 4620 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4621 struct ixgbe_hw *hw = &ixgbe->hw; 4622 ixgbe_tx_ring_t *tx_ring; 4623 ixgbe_rx_ring_t *rx_ring; 4624 uint32_t eicr; 4625 mblk_t *mp; 4626 boolean_t tx_reschedule; 4627 uint_t result; 4628 4629 _NOTE(ARGUNUSED(arg2)); 4630 4631 mutex_enter(&ixgbe->gen_lock); 4632 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 4633 mutex_exit(&ixgbe->gen_lock); 4634 return (DDI_INTR_UNCLAIMED); 4635 } 4636 4637 mp = NULL; 4638 tx_reschedule = B_FALSE; 4639 4640 /* 4641 * Any bit set in eicr: claim this interrupt 4642 */ 4643 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4644 4645 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4646 mutex_exit(&ixgbe->gen_lock); 4647 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4648 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4649 return (DDI_INTR_CLAIMED); 4650 } 4651 4652 if (eicr) { 4653 /* 4654 * For legacy interrupt, we have only one interrupt, 4655 * so we have only one rx ring and one tx ring enabled. 4656 */ 4657 ASSERT(ixgbe->num_rx_rings == 1); 4658 ASSERT(ixgbe->num_tx_rings == 1); 4659 4660 /* 4661 * For legacy interrupt, rx rings[0] will use RTxQ[0]. 4662 */ 4663 if (eicr & 0x1) { 4664 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE; 4665 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4666 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4667 /* 4668 * Clean the rx descriptors 4669 */ 4670 rx_ring = &ixgbe->rx_rings[0]; 4671 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4672 } 4673 4674 /* 4675 * For legacy interrupt, tx rings[0] will use RTxQ[1]. 4676 */ 4677 if (eicr & 0x2) { 4678 /* 4679 * Recycle the tx descriptors 4680 */ 4681 tx_ring = &ixgbe->tx_rings[0]; 4682 tx_ring->tx_recycle(tx_ring); 4683 4684 /* 4685 * Schedule the re-transmit 4686 */ 4687 tx_reschedule = (tx_ring->reschedule && 4688 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)); 4689 } 4690 4691 /* any interrupt type other than tx/rx */ 4692 if (eicr & ixgbe->capab->other_intr) { 4693 switch (hw->mac.type) { 4694 case ixgbe_mac_82598EB: 4695 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4696 break; 4697 4698 case ixgbe_mac_82599EB: 4699 case ixgbe_mac_X540: 4700 case ixgbe_mac_X550: 4701 case ixgbe_mac_X550EM_x: 4702 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4703 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4704 break; 4705 4706 default: 4707 break; 4708 } 4709 ixgbe_intr_other_work(ixgbe, eicr); 4710 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4711 } 4712 4713 mutex_exit(&ixgbe->gen_lock); 4714 4715 result = DDI_INTR_CLAIMED; 4716 } else { 4717 mutex_exit(&ixgbe->gen_lock); 4718 4719 /* 4720 * No interrupt cause bits set: don't claim this interrupt. 4721 */ 4722 result = DDI_INTR_UNCLAIMED; 4723 } 4724 4725 /* re-enable the interrupts which were automasked */ 4726 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4727 4728 /* 4729 * Do the following work outside of the gen_lock 4730 */ 4731 if (mp != NULL) { 4732 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4733 rx_ring->ring_gen_num); 4734 } 4735 4736 if (tx_reschedule) { 4737 tx_ring->reschedule = B_FALSE; 4738 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle); 4739 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 4740 } 4741 4742 return (result); 4743 } 4744 4745 /* 4746 * ixgbe_intr_msi - Interrupt handler for MSI. 4747 */ 4748 static uint_t 4749 ixgbe_intr_msi(void *arg1, void *arg2) 4750 { 4751 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4752 struct ixgbe_hw *hw = &ixgbe->hw; 4753 uint32_t eicr; 4754 4755 _NOTE(ARGUNUSED(arg2)); 4756 4757 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4758 4759 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4760 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4761 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4762 return (DDI_INTR_CLAIMED); 4763 } 4764 4765 /* 4766 * For MSI interrupt, we have only one vector, 4767 * so we have only one rx ring and one tx ring enabled. 4768 */ 4769 ASSERT(ixgbe->num_rx_rings == 1); 4770 ASSERT(ixgbe->num_tx_rings == 1); 4771 4772 /* 4773 * For MSI interrupt, rx rings[0] will use RTxQ[0]. 4774 */ 4775 if (eicr & 0x1) { 4776 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 4777 } 4778 4779 /* 4780 * For MSI interrupt, tx rings[0] will use RTxQ[1]. 4781 */ 4782 if (eicr & 0x2) { 4783 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 4784 } 4785 4786 /* any interrupt type other than tx/rx */ 4787 if (eicr & ixgbe->capab->other_intr) { 4788 mutex_enter(&ixgbe->gen_lock); 4789 switch (hw->mac.type) { 4790 case ixgbe_mac_82598EB: 4791 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4792 break; 4793 4794 case ixgbe_mac_82599EB: 4795 case ixgbe_mac_X540: 4796 case ixgbe_mac_X550: 4797 case ixgbe_mac_X550EM_x: 4798 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4799 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4800 break; 4801 4802 default: 4803 break; 4804 } 4805 ixgbe_intr_other_work(ixgbe, eicr); 4806 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4807 mutex_exit(&ixgbe->gen_lock); 4808 } 4809 4810 /* re-enable the interrupts which were automasked */ 4811 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4812 4813 return (DDI_INTR_CLAIMED); 4814 } 4815 4816 /* 4817 * ixgbe_intr_msix - Interrupt handler for MSI-X. 4818 */ 4819 static uint_t 4820 ixgbe_intr_msix(void *arg1, void *arg2) 4821 { 4822 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1; 4823 ixgbe_t *ixgbe = vect->ixgbe; 4824 struct ixgbe_hw *hw = &ixgbe->hw; 4825 uint32_t eicr; 4826 int r_idx = 0; 4827 4828 _NOTE(ARGUNUSED(arg2)); 4829 4830 /* 4831 * Clean each rx ring that has its bit set in the map 4832 */ 4833 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 4834 while (r_idx >= 0) { 4835 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 4836 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 4837 (ixgbe->num_rx_rings - 1)); 4838 } 4839 4840 /* 4841 * Clean each tx ring that has its bit set in the map 4842 */ 4843 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1)); 4844 while (r_idx >= 0) { 4845 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]); 4846 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 4847 (ixgbe->num_tx_rings - 1)); 4848 } 4849 4850 4851 /* 4852 * Clean other interrupt (link change) that has its bit set in the map 4853 */ 4854 if (BT_TEST(vect->other_map, 0) == 1) { 4855 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4856 4857 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != 4858 DDI_FM_OK) { 4859 ddi_fm_service_impact(ixgbe->dip, 4860 DDI_SERVICE_DEGRADED); 4861 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4862 return (DDI_INTR_CLAIMED); 4863 } 4864 4865 /* 4866 * Check "other" cause bits: any interrupt type other than tx/rx 4867 */ 4868 if (eicr & ixgbe->capab->other_intr) { 4869 mutex_enter(&ixgbe->gen_lock); 4870 switch (hw->mac.type) { 4871 case ixgbe_mac_82598EB: 4872 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4873 ixgbe_intr_other_work(ixgbe, eicr); 4874 break; 4875 4876 case ixgbe_mac_82599EB: 4877 case ixgbe_mac_X540: 4878 case ixgbe_mac_X550: 4879 case ixgbe_mac_X550EM_x: 4880 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4881 ixgbe_intr_other_work(ixgbe, eicr); 4882 break; 4883 4884 default: 4885 break; 4886 } 4887 mutex_exit(&ixgbe->gen_lock); 4888 } 4889 4890 /* re-enable the interrupts which were automasked */ 4891 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4892 } 4893 4894 return (DDI_INTR_CLAIMED); 4895 } 4896 4897 /* 4898 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 4899 * 4900 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 4901 * if not successful, try Legacy. 4902 * ixgbe->intr_force can be used to force sequence to start with 4903 * any of the 3 types. 4904 * If MSI-X is not used, number of tx/rx rings is forced to 1. 4905 */ 4906 static int 4907 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 4908 { 4909 dev_info_t *devinfo; 4910 int intr_types; 4911 int rc; 4912 4913 devinfo = ixgbe->dip; 4914 4915 /* 4916 * Get supported interrupt types 4917 */ 4918 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 4919 4920 if (rc != DDI_SUCCESS) { 4921 ixgbe_log(ixgbe, 4922 "Get supported interrupt types failed: %d", rc); 4923 return (IXGBE_FAILURE); 4924 } 4925 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 4926 4927 ixgbe->intr_type = 0; 4928 4929 /* 4930 * Install MSI-X interrupts 4931 */ 4932 if ((intr_types & DDI_INTR_TYPE_MSIX) && 4933 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 4934 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 4935 if (rc == IXGBE_SUCCESS) 4936 return (IXGBE_SUCCESS); 4937 4938 ixgbe_log(ixgbe, 4939 "Allocate MSI-X failed, trying MSI interrupts..."); 4940 } 4941 4942 /* 4943 * MSI-X not used, force rings and groups to 1 4944 */ 4945 ixgbe->num_rx_rings = 1; 4946 ixgbe->num_rx_groups = 1; 4947 ixgbe->num_tx_rings = 1; 4948 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 4949 ixgbe_log(ixgbe, 4950 "MSI-X not used, force rings and groups number to 1"); 4951 4952 /* 4953 * Install MSI interrupts 4954 */ 4955 if ((intr_types & DDI_INTR_TYPE_MSI) && 4956 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 4957 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 4958 if (rc == IXGBE_SUCCESS) 4959 return (IXGBE_SUCCESS); 4960 4961 ixgbe_log(ixgbe, 4962 "Allocate MSI failed, trying Legacy interrupts..."); 4963 } 4964 4965 /* 4966 * Install legacy interrupts 4967 */ 4968 if (intr_types & DDI_INTR_TYPE_FIXED) { 4969 /* 4970 * Disallow legacy interrupts for X550. X550 has a silicon 4971 * bug which prevents Shared Legacy interrupts from working. 4972 * For details, please reference: 4973 * 4974 * Intel Ethernet Controller X550 Specification Update rev. 2.1 4975 * May 2016, erratum 22: PCIe Interrupt Status Bit 4976 */ 4977 if (ixgbe->hw.mac.type == ixgbe_mac_X550 || 4978 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x || 4979 ixgbe->hw.mac.type == ixgbe_mac_X550_vf || 4980 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x_vf) { 4981 ixgbe_log(ixgbe, 4982 "Legacy interrupts are not supported on this " 4983 "adapter. Please use MSI or MSI-X instead."); 4984 return (IXGBE_FAILURE); 4985 } 4986 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 4987 if (rc == IXGBE_SUCCESS) 4988 return (IXGBE_SUCCESS); 4989 4990 ixgbe_log(ixgbe, 4991 "Allocate Legacy interrupts failed"); 4992 } 4993 4994 /* 4995 * If none of the 3 types succeeded, return failure 4996 */ 4997 return (IXGBE_FAILURE); 4998 } 4999 5000 /* 5001 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 5002 * 5003 * For legacy and MSI, only 1 handle is needed. For MSI-X, 5004 * if fewer than 2 handles are available, return failure. 5005 * Upon success, this maps the vectors to rx and tx rings for 5006 * interrupts. 5007 */ 5008 static int 5009 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 5010 { 5011 dev_info_t *devinfo; 5012 int request, count, actual; 5013 int minimum; 5014 int rc; 5015 uint32_t ring_per_group; 5016 5017 devinfo = ixgbe->dip; 5018 5019 switch (intr_type) { 5020 case DDI_INTR_TYPE_FIXED: 5021 request = 1; /* Request 1 legacy interrupt handle */ 5022 minimum = 1; 5023 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 5024 break; 5025 5026 case DDI_INTR_TYPE_MSI: 5027 request = 1; /* Request 1 MSI interrupt handle */ 5028 minimum = 1; 5029 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 5030 break; 5031 5032 case DDI_INTR_TYPE_MSIX: 5033 /* 5034 * Best number of vectors for the adapter is 5035 * (# rx rings + # tx rings), however we will 5036 * limit the request number. 5037 */ 5038 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings); 5039 if (request > ixgbe->capab->max_ring_vect) 5040 request = ixgbe->capab->max_ring_vect; 5041 minimum = 1; 5042 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 5043 break; 5044 5045 default: 5046 ixgbe_log(ixgbe, 5047 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 5048 intr_type); 5049 return (IXGBE_FAILURE); 5050 } 5051 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 5052 request, minimum); 5053 5054 /* 5055 * Get number of supported interrupts 5056 */ 5057 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 5058 if ((rc != DDI_SUCCESS) || (count < minimum)) { 5059 ixgbe_log(ixgbe, 5060 "Get interrupt number failed. Return: %d, count: %d", 5061 rc, count); 5062 return (IXGBE_FAILURE); 5063 } 5064 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 5065 5066 actual = 0; 5067 ixgbe->intr_cnt = 0; 5068 ixgbe->intr_cnt_max = 0; 5069 ixgbe->intr_cnt_min = 0; 5070 5071 /* 5072 * Allocate an array of interrupt handles 5073 */ 5074 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 5075 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 5076 5077 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 5078 request, &actual, DDI_INTR_ALLOC_NORMAL); 5079 if (rc != DDI_SUCCESS) { 5080 ixgbe_log(ixgbe, "Allocate interrupts failed. " 5081 "return: %d, request: %d, actual: %d", 5082 rc, request, actual); 5083 goto alloc_handle_fail; 5084 } 5085 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 5086 5087 /* 5088 * upper/lower limit of interrupts 5089 */ 5090 ixgbe->intr_cnt = actual; 5091 ixgbe->intr_cnt_max = request; 5092 ixgbe->intr_cnt_min = minimum; 5093 5094 /* 5095 * rss number per group should not exceed the rx interrupt number, 5096 * else need to adjust rx ring number. 5097 */ 5098 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5099 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0); 5100 if (actual < ring_per_group) { 5101 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual; 5102 ixgbe_setup_vmdq_rss_conf(ixgbe); 5103 } 5104 5105 /* 5106 * Now we know the actual number of vectors. Here we map the vector 5107 * to other, rx rings and tx ring. 5108 */ 5109 if (actual < minimum) { 5110 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 5111 actual); 5112 goto alloc_handle_fail; 5113 } 5114 5115 /* 5116 * Get priority for first vector, assume remaining are all the same 5117 */ 5118 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 5119 if (rc != DDI_SUCCESS) { 5120 ixgbe_log(ixgbe, 5121 "Get interrupt priority failed: %d", rc); 5122 goto alloc_handle_fail; 5123 } 5124 5125 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 5126 if (rc != DDI_SUCCESS) { 5127 ixgbe_log(ixgbe, 5128 "Get interrupt cap failed: %d", rc); 5129 goto alloc_handle_fail; 5130 } 5131 5132 ixgbe->intr_type = intr_type; 5133 5134 return (IXGBE_SUCCESS); 5135 5136 alloc_handle_fail: 5137 ixgbe_rem_intrs(ixgbe); 5138 5139 return (IXGBE_FAILURE); 5140 } 5141 5142 /* 5143 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 5144 * 5145 * Before adding the interrupt handlers, the interrupt vectors have 5146 * been allocated, and the rx/tx rings have also been allocated. 5147 */ 5148 static int 5149 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 5150 { 5151 int vector = 0; 5152 int rc; 5153 5154 switch (ixgbe->intr_type) { 5155 case DDI_INTR_TYPE_MSIX: 5156 /* 5157 * Add interrupt handler for all vectors 5158 */ 5159 for (vector = 0; vector < ixgbe->intr_cnt; vector++) { 5160 /* 5161 * install pointer to vect_map[vector] 5162 */ 5163 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5164 (ddi_intr_handler_t *)ixgbe_intr_msix, 5165 (void *)&ixgbe->vect_map[vector], NULL); 5166 5167 if (rc != DDI_SUCCESS) { 5168 ixgbe_log(ixgbe, 5169 "Add interrupt handler failed. " 5170 "return: %d, vector: %d", rc, vector); 5171 for (vector--; vector >= 0; vector--) { 5172 (void) ddi_intr_remove_handler( 5173 ixgbe->htable[vector]); 5174 } 5175 return (IXGBE_FAILURE); 5176 } 5177 } 5178 5179 break; 5180 5181 case DDI_INTR_TYPE_MSI: 5182 /* 5183 * Add interrupt handlers for the only vector 5184 */ 5185 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5186 (ddi_intr_handler_t *)ixgbe_intr_msi, 5187 (void *)ixgbe, NULL); 5188 5189 if (rc != DDI_SUCCESS) { 5190 ixgbe_log(ixgbe, 5191 "Add MSI interrupt handler failed: %d", rc); 5192 return (IXGBE_FAILURE); 5193 } 5194 5195 break; 5196 5197 case DDI_INTR_TYPE_FIXED: 5198 /* 5199 * Add interrupt handlers for the only vector 5200 */ 5201 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5202 (ddi_intr_handler_t *)ixgbe_intr_legacy, 5203 (void *)ixgbe, NULL); 5204 5205 if (rc != DDI_SUCCESS) { 5206 ixgbe_log(ixgbe, 5207 "Add legacy interrupt handler failed: %d", rc); 5208 return (IXGBE_FAILURE); 5209 } 5210 5211 break; 5212 5213 default: 5214 return (IXGBE_FAILURE); 5215 } 5216 5217 return (IXGBE_SUCCESS); 5218 } 5219 5220 #pragma inline(ixgbe_map_rxring_to_vector) 5221 /* 5222 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 5223 */ 5224 static void 5225 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 5226 { 5227 /* 5228 * Set bit in map 5229 */ 5230 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 5231 5232 /* 5233 * Count bits set 5234 */ 5235 ixgbe->vect_map[v_idx].rxr_cnt++; 5236 5237 /* 5238 * Remember bit position 5239 */ 5240 ixgbe->rx_rings[r_idx].intr_vector = v_idx; 5241 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 5242 } 5243 5244 #pragma inline(ixgbe_map_txring_to_vector) 5245 /* 5246 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 5247 */ 5248 static void 5249 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 5250 { 5251 /* 5252 * Set bit in map 5253 */ 5254 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 5255 5256 /* 5257 * Count bits set 5258 */ 5259 ixgbe->vect_map[v_idx].txr_cnt++; 5260 5261 /* 5262 * Remember bit position 5263 */ 5264 ixgbe->tx_rings[t_idx].intr_vector = v_idx; 5265 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 5266 } 5267 5268 /* 5269 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector 5270 * allocation register (IVAR). 5271 * cause: 5272 * -1 : other cause 5273 * 0 : rx 5274 * 1 : tx 5275 */ 5276 static void 5277 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector, 5278 int8_t cause) 5279 { 5280 struct ixgbe_hw *hw = &ixgbe->hw; 5281 u32 ivar, index; 5282 5283 switch (hw->mac.type) { 5284 case ixgbe_mac_82598EB: 5285 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5286 if (cause == -1) { 5287 cause = 0; 5288 } 5289 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5290 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5291 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3))); 5292 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3))); 5293 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5294 break; 5295 5296 case ixgbe_mac_82599EB: 5297 case ixgbe_mac_X540: 5298 case ixgbe_mac_X550: 5299 case ixgbe_mac_X550EM_x: 5300 if (cause == -1) { 5301 /* other causes */ 5302 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5303 index = (intr_alloc_entry & 1) * 8; 5304 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5305 ivar &= ~(0xFF << index); 5306 ivar |= (msix_vector << index); 5307 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5308 } else { 5309 /* tx or rx causes */ 5310 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5311 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5312 ivar = IXGBE_READ_REG(hw, 5313 IXGBE_IVAR(intr_alloc_entry >> 1)); 5314 ivar &= ~(0xFF << index); 5315 ivar |= (msix_vector << index); 5316 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5317 ivar); 5318 } 5319 break; 5320 5321 default: 5322 break; 5323 } 5324 } 5325 5326 /* 5327 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of 5328 * given interrupt vector allocation register (IVAR). 5329 * cause: 5330 * -1 : other cause 5331 * 0 : rx 5332 * 1 : tx 5333 */ 5334 static void 5335 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5336 { 5337 struct ixgbe_hw *hw = &ixgbe->hw; 5338 u32 ivar, index; 5339 5340 switch (hw->mac.type) { 5341 case ixgbe_mac_82598EB: 5342 if (cause == -1) { 5343 cause = 0; 5344 } 5345 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5346 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5347 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 * 5348 (intr_alloc_entry & 0x3))); 5349 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5350 break; 5351 5352 case ixgbe_mac_82599EB: 5353 case ixgbe_mac_X540: 5354 case ixgbe_mac_X550: 5355 case ixgbe_mac_X550EM_x: 5356 if (cause == -1) { 5357 /* other causes */ 5358 index = (intr_alloc_entry & 1) * 8; 5359 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5360 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5361 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5362 } else { 5363 /* tx or rx causes */ 5364 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5365 ivar = IXGBE_READ_REG(hw, 5366 IXGBE_IVAR(intr_alloc_entry >> 1)); 5367 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5368 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5369 ivar); 5370 } 5371 break; 5372 5373 default: 5374 break; 5375 } 5376 } 5377 5378 /* 5379 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of 5380 * given interrupt vector allocation register (IVAR). 5381 * cause: 5382 * -1 : other cause 5383 * 0 : rx 5384 * 1 : tx 5385 */ 5386 static void 5387 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5388 { 5389 struct ixgbe_hw *hw = &ixgbe->hw; 5390 u32 ivar, index; 5391 5392 switch (hw->mac.type) { 5393 case ixgbe_mac_82598EB: 5394 if (cause == -1) { 5395 cause = 0; 5396 } 5397 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5398 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5399 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 * 5400 (intr_alloc_entry & 0x3))); 5401 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5402 break; 5403 5404 case ixgbe_mac_82599EB: 5405 case ixgbe_mac_X540: 5406 case ixgbe_mac_X550: 5407 case ixgbe_mac_X550EM_x: 5408 if (cause == -1) { 5409 /* other causes */ 5410 index = (intr_alloc_entry & 1) * 8; 5411 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5412 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5413 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5414 } else { 5415 /* tx or rx causes */ 5416 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5417 ivar = IXGBE_READ_REG(hw, 5418 IXGBE_IVAR(intr_alloc_entry >> 1)); 5419 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5420 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5421 ivar); 5422 } 5423 break; 5424 5425 default: 5426 break; 5427 } 5428 } 5429 5430 /* 5431 * Convert the rx ring index driver maintained to the rx ring index 5432 * in h/w. 5433 */ 5434 static uint32_t 5435 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index) 5436 { 5437 5438 struct ixgbe_hw *hw = &ixgbe->hw; 5439 uint32_t rx_ring_per_group, hw_rx_index; 5440 5441 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS || 5442 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) { 5443 return (sw_rx_index); 5444 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) { 5445 switch (hw->mac.type) { 5446 case ixgbe_mac_82598EB: 5447 return (sw_rx_index); 5448 5449 case ixgbe_mac_82599EB: 5450 case ixgbe_mac_X540: 5451 case ixgbe_mac_X550: 5452 case ixgbe_mac_X550EM_x: 5453 return (sw_rx_index * 2); 5454 5455 default: 5456 break; 5457 } 5458 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) { 5459 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5460 5461 switch (hw->mac.type) { 5462 case ixgbe_mac_82598EB: 5463 hw_rx_index = (sw_rx_index / rx_ring_per_group) * 5464 16 + (sw_rx_index % rx_ring_per_group); 5465 return (hw_rx_index); 5466 5467 case ixgbe_mac_82599EB: 5468 case ixgbe_mac_X540: 5469 case ixgbe_mac_X550: 5470 case ixgbe_mac_X550EM_x: 5471 if (ixgbe->num_rx_groups > 32) { 5472 hw_rx_index = (sw_rx_index / 5473 rx_ring_per_group) * 2 + 5474 (sw_rx_index % rx_ring_per_group); 5475 } else { 5476 hw_rx_index = (sw_rx_index / 5477 rx_ring_per_group) * 4 + 5478 (sw_rx_index % rx_ring_per_group); 5479 } 5480 return (hw_rx_index); 5481 5482 default: 5483 break; 5484 } 5485 } 5486 5487 /* 5488 * Should never reach. Just to make compiler happy. 5489 */ 5490 return (sw_rx_index); 5491 } 5492 5493 /* 5494 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors. 5495 * 5496 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt 5497 * to vector[0 - (intr_cnt -1)]. 5498 */ 5499 static int 5500 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe) 5501 { 5502 int i, vector = 0; 5503 5504 /* initialize vector map */ 5505 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 5506 for (i = 0; i < ixgbe->intr_cnt; i++) { 5507 ixgbe->vect_map[i].ixgbe = ixgbe; 5508 } 5509 5510 /* 5511 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0], 5512 * tx rings[0] on RTxQ[1]. 5513 */ 5514 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5515 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 5516 ixgbe_map_txring_to_vector(ixgbe, 0, 1); 5517 return (IXGBE_SUCCESS); 5518 } 5519 5520 /* 5521 * Interrupts/vectors mapping for MSI-X 5522 */ 5523 5524 /* 5525 * Map other interrupt to vector 0, 5526 * Set bit in map and count the bits set. 5527 */ 5528 BT_SET(ixgbe->vect_map[vector].other_map, 0); 5529 ixgbe->vect_map[vector].other_cnt++; 5530 5531 /* 5532 * Map rx ring interrupts to vectors 5533 */ 5534 for (i = 0; i < ixgbe->num_rx_rings; i++) { 5535 ixgbe_map_rxring_to_vector(ixgbe, i, vector); 5536 vector = (vector +1) % ixgbe->intr_cnt; 5537 } 5538 5539 /* 5540 * Map tx ring interrupts to vectors 5541 */ 5542 for (i = 0; i < ixgbe->num_tx_rings; i++) { 5543 ixgbe_map_txring_to_vector(ixgbe, i, vector); 5544 vector = (vector +1) % ixgbe->intr_cnt; 5545 } 5546 5547 return (IXGBE_SUCCESS); 5548 } 5549 5550 /* 5551 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 5552 * 5553 * This relies on ring/vector mapping already set up in the 5554 * vect_map[] structures 5555 */ 5556 static void 5557 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 5558 { 5559 struct ixgbe_hw *hw = &ixgbe->hw; 5560 ixgbe_intr_vector_t *vect; /* vector bitmap */ 5561 int r_idx; /* ring index */ 5562 int v_idx; /* vector index */ 5563 uint32_t hw_index; 5564 5565 /* 5566 * Clear any previous entries 5567 */ 5568 switch (hw->mac.type) { 5569 case ixgbe_mac_82598EB: 5570 for (v_idx = 0; v_idx < 25; v_idx++) 5571 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5572 break; 5573 5574 case ixgbe_mac_82599EB: 5575 case ixgbe_mac_X540: 5576 case ixgbe_mac_X550: 5577 case ixgbe_mac_X550EM_x: 5578 for (v_idx = 0; v_idx < 64; v_idx++) 5579 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5580 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0); 5581 break; 5582 5583 default: 5584 break; 5585 } 5586 5587 /* 5588 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and 5589 * tx rings[0] will use RTxQ[1]. 5590 */ 5591 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5592 ixgbe_setup_ivar(ixgbe, 0, 0, 0); 5593 ixgbe_setup_ivar(ixgbe, 0, 1, 1); 5594 return; 5595 } 5596 5597 /* 5598 * For MSI-X interrupt, "Other" is always on vector[0]. 5599 */ 5600 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1); 5601 5602 /* 5603 * For each interrupt vector, populate the IVAR table 5604 */ 5605 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 5606 vect = &ixgbe->vect_map[v_idx]; 5607 5608 /* 5609 * For each rx ring bit set 5610 */ 5611 r_idx = bt_getlowbit(vect->rx_map, 0, 5612 (ixgbe->num_rx_rings - 1)); 5613 5614 while (r_idx >= 0) { 5615 hw_index = ixgbe->rx_rings[r_idx].hw_index; 5616 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0); 5617 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 5618 (ixgbe->num_rx_rings - 1)); 5619 } 5620 5621 /* 5622 * For each tx ring bit set 5623 */ 5624 r_idx = bt_getlowbit(vect->tx_map, 0, 5625 (ixgbe->num_tx_rings - 1)); 5626 5627 while (r_idx >= 0) { 5628 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1); 5629 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 5630 (ixgbe->num_tx_rings - 1)); 5631 } 5632 } 5633 } 5634 5635 /* 5636 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 5637 */ 5638 static void 5639 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 5640 { 5641 int i; 5642 int rc; 5643 5644 for (i = 0; i < ixgbe->intr_cnt; i++) { 5645 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 5646 if (rc != DDI_SUCCESS) { 5647 IXGBE_DEBUGLOG_1(ixgbe, 5648 "Remove intr handler failed: %d", rc); 5649 } 5650 } 5651 } 5652 5653 /* 5654 * ixgbe_rem_intrs - Remove the allocated interrupts. 5655 */ 5656 static void 5657 ixgbe_rem_intrs(ixgbe_t *ixgbe) 5658 { 5659 int i; 5660 int rc; 5661 5662 for (i = 0; i < ixgbe->intr_cnt; i++) { 5663 rc = ddi_intr_free(ixgbe->htable[i]); 5664 if (rc != DDI_SUCCESS) { 5665 IXGBE_DEBUGLOG_1(ixgbe, 5666 "Free intr failed: %d", rc); 5667 } 5668 } 5669 5670 kmem_free(ixgbe->htable, ixgbe->intr_size); 5671 ixgbe->htable = NULL; 5672 } 5673 5674 /* 5675 * ixgbe_enable_intrs - Enable all the ddi interrupts. 5676 */ 5677 static int 5678 ixgbe_enable_intrs(ixgbe_t *ixgbe) 5679 { 5680 int i; 5681 int rc; 5682 5683 /* 5684 * Enable interrupts 5685 */ 5686 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5687 /* 5688 * Call ddi_intr_block_enable() for MSI 5689 */ 5690 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 5691 if (rc != DDI_SUCCESS) { 5692 ixgbe_log(ixgbe, 5693 "Enable block intr failed: %d", rc); 5694 return (IXGBE_FAILURE); 5695 } 5696 } else { 5697 /* 5698 * Call ddi_intr_enable() for Legacy/MSI non block enable 5699 */ 5700 for (i = 0; i < ixgbe->intr_cnt; i++) { 5701 rc = ddi_intr_enable(ixgbe->htable[i]); 5702 if (rc != DDI_SUCCESS) { 5703 ixgbe_log(ixgbe, 5704 "Enable intr failed: %d", rc); 5705 return (IXGBE_FAILURE); 5706 } 5707 } 5708 } 5709 5710 return (IXGBE_SUCCESS); 5711 } 5712 5713 /* 5714 * ixgbe_disable_intrs - Disable all the interrupts. 5715 */ 5716 static int 5717 ixgbe_disable_intrs(ixgbe_t *ixgbe) 5718 { 5719 int i; 5720 int rc; 5721 5722 /* 5723 * Disable all interrupts 5724 */ 5725 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5726 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 5727 if (rc != DDI_SUCCESS) { 5728 ixgbe_log(ixgbe, 5729 "Disable block intr failed: %d", rc); 5730 return (IXGBE_FAILURE); 5731 } 5732 } else { 5733 for (i = 0; i < ixgbe->intr_cnt; i++) { 5734 rc = ddi_intr_disable(ixgbe->htable[i]); 5735 if (rc != DDI_SUCCESS) { 5736 ixgbe_log(ixgbe, 5737 "Disable intr failed: %d", rc); 5738 return (IXGBE_FAILURE); 5739 } 5740 } 5741 } 5742 5743 return (IXGBE_SUCCESS); 5744 } 5745 5746 /* 5747 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 5748 */ 5749 static void 5750 ixgbe_get_hw_state(ixgbe_t *ixgbe) 5751 { 5752 struct ixgbe_hw *hw = &ixgbe->hw; 5753 ixgbe_link_speed speed = 0; 5754 boolean_t link_up = B_FALSE; 5755 uint32_t pcs1g_anlp = 0; 5756 5757 ASSERT(mutex_owned(&ixgbe->gen_lock)); 5758 ixgbe->param_lp_1000fdx_cap = 0; 5759 ixgbe->param_lp_100fdx_cap = 0; 5760 5761 /* check for link, don't wait */ 5762 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 5763 5764 /* 5765 * Update the observed Link Partner's capabilities. Not all adapters 5766 * can provide full information on the LP's capable speeds, so we 5767 * provide what we can. 5768 */ 5769 if (link_up) { 5770 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 5771 5772 ixgbe->param_lp_1000fdx_cap = 5773 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5774 ixgbe->param_lp_100fdx_cap = 5775 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5776 } 5777 5778 /* 5779 * Update GLD's notion of the adapter's currently advertised speeds. 5780 * Since the common code doesn't always record the current autonegotiate 5781 * settings in the phy struct for all parts (specifically, adapters with 5782 * SFPs) we first test to see if it is 0, and if so, we fall back to 5783 * using the adapter's speed capabilities which we saved during instance 5784 * init in ixgbe_init_params(). 5785 * 5786 * Adapters with SFPs will always be shown as advertising all of their 5787 * supported speeds, and adapters with baseT PHYs (where the phy struct 5788 * is maintained by the common code) will always have a factual view of 5789 * their currently-advertised speeds. In the case of SFPs, this is 5790 * acceptable as we default to advertising all speeds that the adapter 5791 * claims to support, and those properties are immutable; unlike on 5792 * baseT (copper) PHYs, where speeds can be enabled or disabled at will. 5793 */ 5794 speed = hw->phy.autoneg_advertised; 5795 if (speed == 0) 5796 speed = ixgbe->speeds_supported; 5797 5798 ixgbe->param_adv_10000fdx_cap = 5799 (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 1 : 0; 5800 ixgbe->param_adv_5000fdx_cap = 5801 (speed & IXGBE_LINK_SPEED_5GB_FULL) ? 1 : 0; 5802 ixgbe->param_adv_2500fdx_cap = 5803 (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 1 : 0; 5804 ixgbe->param_adv_1000fdx_cap = 5805 (speed & IXGBE_LINK_SPEED_1GB_FULL) ? 1 : 0; 5806 ixgbe->param_adv_100fdx_cap = 5807 (speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0; 5808 } 5809 5810 /* 5811 * ixgbe_get_driver_control - Notify that driver is in control of device. 5812 */ 5813 static void 5814 ixgbe_get_driver_control(struct ixgbe_hw *hw) 5815 { 5816 uint32_t ctrl_ext; 5817 5818 /* 5819 * Notify firmware that driver is in control of device 5820 */ 5821 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5822 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 5823 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5824 } 5825 5826 /* 5827 * ixgbe_release_driver_control - Notify that driver is no longer in control 5828 * of device. 5829 */ 5830 static void 5831 ixgbe_release_driver_control(struct ixgbe_hw *hw) 5832 { 5833 uint32_t ctrl_ext; 5834 5835 /* 5836 * Notify firmware that driver is no longer in control of device 5837 */ 5838 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5839 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 5840 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5841 } 5842 5843 /* 5844 * ixgbe_atomic_reserve - Atomic decrease operation. 5845 */ 5846 int 5847 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 5848 { 5849 uint32_t oldval; 5850 uint32_t newval; 5851 5852 /* 5853 * ATOMICALLY 5854 */ 5855 do { 5856 oldval = *count_p; 5857 if (oldval < n) 5858 return (-1); 5859 newval = oldval - n; 5860 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 5861 5862 return (newval); 5863 } 5864 5865 /* 5866 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 5867 */ 5868 static uint8_t * 5869 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 5870 { 5871 uint8_t *addr = *upd_ptr; 5872 uint8_t *new_ptr; 5873 5874 _NOTE(ARGUNUSED(hw)); 5875 _NOTE(ARGUNUSED(vmdq)); 5876 5877 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 5878 *upd_ptr = new_ptr; 5879 return (addr); 5880 } 5881 5882 /* 5883 * FMA support 5884 */ 5885 int 5886 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 5887 { 5888 ddi_fm_error_t de; 5889 5890 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5891 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 5892 return (de.fme_status); 5893 } 5894 5895 int 5896 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 5897 { 5898 ddi_fm_error_t de; 5899 5900 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5901 return (de.fme_status); 5902 } 5903 5904 /* 5905 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 5906 */ 5907 static int 5908 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5909 { 5910 _NOTE(ARGUNUSED(impl_data)); 5911 /* 5912 * as the driver can always deal with an error in any dma or 5913 * access handle, we can just return the fme_status value. 5914 */ 5915 pci_ereport_post(dip, err, NULL); 5916 return (err->fme_status); 5917 } 5918 5919 static void 5920 ixgbe_fm_init(ixgbe_t *ixgbe) 5921 { 5922 ddi_iblock_cookie_t iblk; 5923 int fma_dma_flag; 5924 5925 /* 5926 * Only register with IO Fault Services if we have some capability 5927 */ 5928 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 5929 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5930 } else { 5931 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5932 } 5933 5934 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 5935 fma_dma_flag = 1; 5936 } else { 5937 fma_dma_flag = 0; 5938 } 5939 5940 ixgbe_set_fma_flags(fma_dma_flag); 5941 5942 if (ixgbe->fm_capabilities) { 5943 5944 /* 5945 * Register capabilities with IO Fault Services 5946 */ 5947 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 5948 5949 /* 5950 * Initialize pci ereport capabilities if ereport capable 5951 */ 5952 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5953 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5954 pci_ereport_setup(ixgbe->dip); 5955 5956 /* 5957 * Register error callback if error callback capable 5958 */ 5959 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5960 ddi_fm_handler_register(ixgbe->dip, 5961 ixgbe_fm_error_cb, (void*) ixgbe); 5962 } 5963 } 5964 5965 static void 5966 ixgbe_fm_fini(ixgbe_t *ixgbe) 5967 { 5968 /* 5969 * Only unregister FMA capabilities if they are registered 5970 */ 5971 if (ixgbe->fm_capabilities) { 5972 5973 /* 5974 * Release any resources allocated by pci_ereport_setup() 5975 */ 5976 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5977 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5978 pci_ereport_teardown(ixgbe->dip); 5979 5980 /* 5981 * Un-register error callback if error callback capable 5982 */ 5983 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5984 ddi_fm_handler_unregister(ixgbe->dip); 5985 5986 /* 5987 * Unregister from IO Fault Service 5988 */ 5989 ddi_fm_fini(ixgbe->dip); 5990 } 5991 } 5992 5993 void 5994 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 5995 { 5996 uint64_t ena; 5997 char buf[FM_MAX_CLASS]; 5998 5999 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6000 ena = fm_ena_generate(0, FM_ENA_FMT1); 6001 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 6002 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 6003 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6004 } 6005 } 6006 6007 static int 6008 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 6009 { 6010 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh; 6011 6012 mutex_enter(&rx_ring->rx_lock); 6013 rx_ring->ring_gen_num = mr_gen_num; 6014 mutex_exit(&rx_ring->rx_lock); 6015 return (0); 6016 } 6017 6018 /* 6019 * Get the global ring index by a ring index within a group. 6020 */ 6021 static int 6022 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex) 6023 { 6024 ixgbe_rx_ring_t *rx_ring; 6025 int i; 6026 6027 for (i = 0; i < ixgbe->num_rx_rings; i++) { 6028 rx_ring = &ixgbe->rx_rings[i]; 6029 if (rx_ring->group_index == gindex) 6030 rindex--; 6031 if (rindex < 0) 6032 return (i); 6033 } 6034 6035 return (-1); 6036 } 6037 6038 /* 6039 * Callback funtion for MAC layer to register all rings. 6040 */ 6041 /* ARGSUSED */ 6042 void 6043 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index, 6044 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 6045 { 6046 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6047 mac_intr_t *mintr = &infop->mri_intr; 6048 6049 switch (rtype) { 6050 case MAC_RING_TYPE_RX: { 6051 /* 6052 * 'index' is the ring index within the group. 6053 * Need to get the global ring index by searching in groups. 6054 */ 6055 int global_ring_index = ixgbe_get_rx_ring_index( 6056 ixgbe, group_index, ring_index); 6057 6058 ASSERT(global_ring_index >= 0); 6059 6060 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index]; 6061 rx_ring->ring_handle = rh; 6062 6063 infop->mri_driver = (mac_ring_driver_t)rx_ring; 6064 infop->mri_start = ixgbe_ring_start; 6065 infop->mri_stop = NULL; 6066 infop->mri_poll = ixgbe_ring_rx_poll; 6067 infop->mri_stat = ixgbe_rx_ring_stat; 6068 6069 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 6070 mintr->mi_enable = ixgbe_rx_ring_intr_enable; 6071 mintr->mi_disable = ixgbe_rx_ring_intr_disable; 6072 if (ixgbe->intr_type & 6073 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6074 mintr->mi_ddi_handle = 6075 ixgbe->htable[rx_ring->intr_vector]; 6076 } 6077 6078 break; 6079 } 6080 case MAC_RING_TYPE_TX: { 6081 ASSERT(group_index == -1); 6082 ASSERT(ring_index < ixgbe->num_tx_rings); 6083 6084 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index]; 6085 tx_ring->ring_handle = rh; 6086 6087 infop->mri_driver = (mac_ring_driver_t)tx_ring; 6088 infop->mri_start = NULL; 6089 infop->mri_stop = NULL; 6090 infop->mri_tx = ixgbe_ring_tx; 6091 infop->mri_stat = ixgbe_tx_ring_stat; 6092 if (ixgbe->intr_type & 6093 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6094 mintr->mi_ddi_handle = 6095 ixgbe->htable[tx_ring->intr_vector]; 6096 } 6097 break; 6098 } 6099 default: 6100 break; 6101 } 6102 } 6103 6104 /* 6105 * Callback funtion for MAC layer to register all groups. 6106 */ 6107 void 6108 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index, 6109 mac_group_info_t *infop, mac_group_handle_t gh) 6110 { 6111 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6112 6113 switch (rtype) { 6114 case MAC_RING_TYPE_RX: { 6115 ixgbe_rx_group_t *rx_group; 6116 6117 rx_group = &ixgbe->rx_groups[index]; 6118 rx_group->group_handle = gh; 6119 6120 infop->mgi_driver = (mac_group_driver_t)rx_group; 6121 infop->mgi_start = NULL; 6122 infop->mgi_stop = NULL; 6123 infop->mgi_addmac = ixgbe_addmac; 6124 infop->mgi_remmac = ixgbe_remmac; 6125 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups); 6126 6127 break; 6128 } 6129 case MAC_RING_TYPE_TX: 6130 break; 6131 default: 6132 break; 6133 } 6134 } 6135 6136 /* 6137 * Enable interrupt on the specificed rx ring. 6138 */ 6139 int 6140 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh) 6141 { 6142 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6143 ixgbe_t *ixgbe = rx_ring->ixgbe; 6144 int r_idx = rx_ring->index; 6145 int hw_r_idx = rx_ring->hw_index; 6146 int v_idx = rx_ring->intr_vector; 6147 6148 mutex_enter(&ixgbe->gen_lock); 6149 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6150 mutex_exit(&ixgbe->gen_lock); 6151 /* 6152 * Simply return 0. 6153 * Interrupts are being adjusted. ixgbe_intr_adjust() 6154 * will eventually re-enable the interrupt when it's 6155 * done with the adjustment. 6156 */ 6157 return (0); 6158 } 6159 6160 /* 6161 * To enable interrupt by setting the VAL bit of given interrupt 6162 * vector allocation register (IVAR). 6163 */ 6164 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0); 6165 6166 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 6167 6168 /* 6169 * Trigger a Rx interrupt on this ring 6170 */ 6171 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx)); 6172 IXGBE_WRITE_FLUSH(&ixgbe->hw); 6173 6174 mutex_exit(&ixgbe->gen_lock); 6175 6176 return (0); 6177 } 6178 6179 /* 6180 * Disable interrupt on the specificed rx ring. 6181 */ 6182 int 6183 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh) 6184 { 6185 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6186 ixgbe_t *ixgbe = rx_ring->ixgbe; 6187 int r_idx = rx_ring->index; 6188 int hw_r_idx = rx_ring->hw_index; 6189 int v_idx = rx_ring->intr_vector; 6190 6191 mutex_enter(&ixgbe->gen_lock); 6192 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6193 mutex_exit(&ixgbe->gen_lock); 6194 /* 6195 * Simply return 0. 6196 * In the rare case where an interrupt is being 6197 * disabled while interrupts are being adjusted, 6198 * we don't fail the operation. No interrupts will 6199 * be generated while they are adjusted, and 6200 * ixgbe_intr_adjust() will cause the interrupts 6201 * to be re-enabled once it completes. Note that 6202 * in this case, packets may be delivered to the 6203 * stack via interrupts before xgbe_rx_ring_intr_enable() 6204 * is called again. This is acceptable since interrupt 6205 * adjustment is infrequent, and the stack will be 6206 * able to handle these packets. 6207 */ 6208 return (0); 6209 } 6210 6211 /* 6212 * To disable interrupt by clearing the VAL bit of given interrupt 6213 * vector allocation register (IVAR). 6214 */ 6215 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0); 6216 6217 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx); 6218 6219 mutex_exit(&ixgbe->gen_lock); 6220 6221 return (0); 6222 } 6223 6224 /* 6225 * Add a mac address. 6226 */ 6227 static int 6228 ixgbe_addmac(void *arg, const uint8_t *mac_addr) 6229 { 6230 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6231 ixgbe_t *ixgbe = rx_group->ixgbe; 6232 struct ixgbe_hw *hw = &ixgbe->hw; 6233 int slot, i; 6234 6235 mutex_enter(&ixgbe->gen_lock); 6236 6237 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6238 mutex_exit(&ixgbe->gen_lock); 6239 return (ECANCELED); 6240 } 6241 6242 if (ixgbe->unicst_avail == 0) { 6243 /* no slots available */ 6244 mutex_exit(&ixgbe->gen_lock); 6245 return (ENOSPC); 6246 } 6247 6248 /* 6249 * The first ixgbe->num_rx_groups slots are reserved for each respective 6250 * group. The rest slots are shared by all groups. While adding a 6251 * MAC address, reserved slots are firstly checked then the shared 6252 * slots are searched. 6253 */ 6254 slot = -1; 6255 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) { 6256 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) { 6257 if (ixgbe->unicst_addr[i].mac.set == 0) { 6258 slot = i; 6259 break; 6260 } 6261 } 6262 } else { 6263 slot = rx_group->index; 6264 } 6265 6266 if (slot == -1) { 6267 /* no slots available */ 6268 mutex_exit(&ixgbe->gen_lock); 6269 return (ENOSPC); 6270 } 6271 6272 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6273 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr, 6274 rx_group->index, IXGBE_RAH_AV); 6275 ixgbe->unicst_addr[slot].mac.set = 1; 6276 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index; 6277 ixgbe->unicst_avail--; 6278 6279 mutex_exit(&ixgbe->gen_lock); 6280 6281 return (0); 6282 } 6283 6284 /* 6285 * Remove a mac address. 6286 */ 6287 static int 6288 ixgbe_remmac(void *arg, const uint8_t *mac_addr) 6289 { 6290 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6291 ixgbe_t *ixgbe = rx_group->ixgbe; 6292 struct ixgbe_hw *hw = &ixgbe->hw; 6293 int slot; 6294 6295 mutex_enter(&ixgbe->gen_lock); 6296 6297 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6298 mutex_exit(&ixgbe->gen_lock); 6299 return (ECANCELED); 6300 } 6301 6302 slot = ixgbe_unicst_find(ixgbe, mac_addr); 6303 if (slot == -1) { 6304 mutex_exit(&ixgbe->gen_lock); 6305 return (EINVAL); 6306 } 6307 6308 if (ixgbe->unicst_addr[slot].mac.set == 0) { 6309 mutex_exit(&ixgbe->gen_lock); 6310 return (EINVAL); 6311 } 6312 6313 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6314 (void) ixgbe_clear_rar(hw, slot); 6315 ixgbe->unicst_addr[slot].mac.set = 0; 6316 ixgbe->unicst_avail++; 6317 6318 mutex_exit(&ixgbe->gen_lock); 6319 6320 return (0); 6321 } 6322