1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 28 * Copyright (c) 2017, Joyent, Inc. 29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved. 31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved. 32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved. 33 */ 34 35 #include "ixgbe_sw.h" 36 37 static char ixgbe_ident[] = "Intel 10Gb Ethernet"; 38 39 /* 40 * Local function protoypes 41 */ 42 static int ixgbe_register_mac(ixgbe_t *); 43 static int ixgbe_identify_hardware(ixgbe_t *); 44 static int ixgbe_regs_map(ixgbe_t *); 45 static void ixgbe_init_properties(ixgbe_t *); 46 static int ixgbe_init_driver_settings(ixgbe_t *); 47 static void ixgbe_init_locks(ixgbe_t *); 48 static void ixgbe_destroy_locks(ixgbe_t *); 49 static int ixgbe_init(ixgbe_t *); 50 static int ixgbe_chip_start(ixgbe_t *); 51 static void ixgbe_chip_stop(ixgbe_t *); 52 static int ixgbe_reset(ixgbe_t *); 53 static void ixgbe_tx_clean(ixgbe_t *); 54 static boolean_t ixgbe_tx_drain(ixgbe_t *); 55 static boolean_t ixgbe_rx_drain(ixgbe_t *); 56 static int ixgbe_alloc_rings(ixgbe_t *); 57 static void ixgbe_free_rings(ixgbe_t *); 58 static int ixgbe_alloc_rx_data(ixgbe_t *); 59 static void ixgbe_free_rx_data(ixgbe_t *); 60 static void ixgbe_setup_rings(ixgbe_t *); 61 static void ixgbe_setup_rx(ixgbe_t *); 62 static void ixgbe_setup_tx(ixgbe_t *); 63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 65 static void ixgbe_setup_rss(ixgbe_t *); 66 static void ixgbe_setup_vmdq(ixgbe_t *); 67 static void ixgbe_setup_vmdq_rss(ixgbe_t *); 68 static void ixgbe_setup_rss_table(ixgbe_t *); 69 static void ixgbe_init_unicst(ixgbe_t *); 70 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); 71 static void ixgbe_setup_multicst(ixgbe_t *); 72 static void ixgbe_get_hw_state(ixgbe_t *); 73 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); 74 static void ixgbe_get_conf(ixgbe_t *); 75 static void ixgbe_init_params(ixgbe_t *); 76 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 77 static void ixgbe_driver_link_check(ixgbe_t *); 78 static void ixgbe_sfp_check(void *); 79 static void ixgbe_overtemp_check(void *); 80 static void ixgbe_phy_check(void *); 81 static void ixgbe_link_timer(void *); 82 static void ixgbe_local_timer(void *); 83 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 84 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 85 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 86 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 87 static boolean_t is_valid_mac_addr(uint8_t *); 88 static boolean_t ixgbe_stall_check(ixgbe_t *); 89 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 90 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 91 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 92 static int ixgbe_alloc_intrs(ixgbe_t *); 93 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 94 static int ixgbe_add_intr_handlers(ixgbe_t *); 95 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 96 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 97 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t); 98 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t); 99 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t); 100 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index); 101 static int ixgbe_map_intrs_to_vectors(ixgbe_t *); 102 static void ixgbe_setup_adapter_vector(ixgbe_t *); 103 static void ixgbe_rem_intr_handlers(ixgbe_t *); 104 static void ixgbe_rem_intrs(ixgbe_t *); 105 static int ixgbe_enable_intrs(ixgbe_t *); 106 static int ixgbe_disable_intrs(ixgbe_t *); 107 static uint_t ixgbe_intr_legacy(void *, void *); 108 static uint_t ixgbe_intr_msi(void *, void *); 109 static uint_t ixgbe_intr_msix(void *, void *); 110 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 111 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 112 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); 113 static void ixgbe_get_driver_control(struct ixgbe_hw *); 114 static int ixgbe_addmac(void *, const uint8_t *); 115 static int ixgbe_remmac(void *, const uint8_t *); 116 static void ixgbe_release_driver_control(struct ixgbe_hw *); 117 118 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 119 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 120 static int ixgbe_resume(dev_info_t *); 121 static int ixgbe_suspend(dev_info_t *); 122 static int ixgbe_quiesce(dev_info_t *); 123 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 124 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 125 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *); 126 static int ixgbe_intr_cb_register(ixgbe_t *); 127 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int); 128 129 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 130 const void *impl_data); 131 static void ixgbe_fm_init(ixgbe_t *); 132 static void ixgbe_fm_fini(ixgbe_t *); 133 134 char *ixgbe_priv_props[] = { 135 "_tx_copy_thresh", 136 "_tx_recycle_thresh", 137 "_tx_overload_thresh", 138 "_tx_resched_thresh", 139 "_rx_copy_thresh", 140 "_rx_limit_per_intr", 141 "_intr_throttling", 142 "_adv_pause_cap", 143 "_adv_asym_pause_cap", 144 NULL 145 }; 146 147 #define IXGBE_MAX_PRIV_PROPS \ 148 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t)) 149 150 static struct cb_ops ixgbe_cb_ops = { 151 nulldev, /* cb_open */ 152 nulldev, /* cb_close */ 153 nodev, /* cb_strategy */ 154 nodev, /* cb_print */ 155 nodev, /* cb_dump */ 156 nodev, /* cb_read */ 157 nodev, /* cb_write */ 158 nodev, /* cb_ioctl */ 159 nodev, /* cb_devmap */ 160 nodev, /* cb_mmap */ 161 nodev, /* cb_segmap */ 162 nochpoll, /* cb_chpoll */ 163 ddi_prop_op, /* cb_prop_op */ 164 NULL, /* cb_stream */ 165 D_MP | D_HOTPLUG, /* cb_flag */ 166 CB_REV, /* cb_rev */ 167 nodev, /* cb_aread */ 168 nodev /* cb_awrite */ 169 }; 170 171 static struct dev_ops ixgbe_dev_ops = { 172 DEVO_REV, /* devo_rev */ 173 0, /* devo_refcnt */ 174 NULL, /* devo_getinfo */ 175 nulldev, /* devo_identify */ 176 nulldev, /* devo_probe */ 177 ixgbe_attach, /* devo_attach */ 178 ixgbe_detach, /* devo_detach */ 179 nodev, /* devo_reset */ 180 &ixgbe_cb_ops, /* devo_cb_ops */ 181 NULL, /* devo_bus_ops */ 182 ddi_power, /* devo_power */ 183 ixgbe_quiesce, /* devo_quiesce */ 184 }; 185 186 static struct modldrv ixgbe_modldrv = { 187 &mod_driverops, /* Type of module. This one is a driver */ 188 ixgbe_ident, /* Discription string */ 189 &ixgbe_dev_ops /* driver ops */ 190 }; 191 192 static struct modlinkage ixgbe_modlinkage = { 193 MODREV_1, &ixgbe_modldrv, NULL 194 }; 195 196 /* 197 * Access attributes for register mapping 198 */ 199 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 200 DDI_DEVICE_ATTR_V1, 201 DDI_STRUCTURE_LE_ACC, 202 DDI_STRICTORDER_ACC, 203 DDI_FLAGERR_ACC 204 }; 205 206 /* 207 * Loopback property 208 */ 209 static lb_property_t lb_normal = { 210 normal, "normal", IXGBE_LB_NONE 211 }; 212 213 static lb_property_t lb_mac = { 214 internal, "MAC", IXGBE_LB_INTERNAL_MAC 215 }; 216 217 static lb_property_t lb_external = { 218 external, "External", IXGBE_LB_EXTERNAL 219 }; 220 221 #define IXGBE_M_CALLBACK_FLAGS \ 222 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 223 224 static mac_callbacks_t ixgbe_m_callbacks = { 225 IXGBE_M_CALLBACK_FLAGS, 226 ixgbe_m_stat, 227 ixgbe_m_start, 228 ixgbe_m_stop, 229 ixgbe_m_promisc, 230 ixgbe_m_multicst, 231 NULL, 232 NULL, 233 NULL, 234 ixgbe_m_ioctl, 235 ixgbe_m_getcapab, 236 NULL, 237 NULL, 238 ixgbe_m_setprop, 239 ixgbe_m_getprop, 240 ixgbe_m_propinfo 241 }; 242 243 /* 244 * Initialize capabilities of each supported adapter type 245 */ 246 static adapter_info_t ixgbe_82598eb_cap = { 247 64, /* maximum number of rx queues */ 248 1, /* minimum number of rx queues */ 249 64, /* default number of rx queues */ 250 16, /* maximum number of rx groups */ 251 1, /* minimum number of rx groups */ 252 1, /* default number of rx groups */ 253 32, /* maximum number of tx queues */ 254 1, /* minimum number of tx queues */ 255 8, /* default number of tx queues */ 256 16366, /* maximum MTU size */ 257 0xFFFF, /* maximum interrupt throttle rate */ 258 0, /* minimum interrupt throttle rate */ 259 200, /* default interrupt throttle rate */ 260 18, /* maximum total msix vectors */ 261 16, /* maximum number of ring vectors */ 262 2, /* maximum number of other vectors */ 263 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 264 0, /* "other" interrupt types enable mask */ 265 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 266 | IXGBE_FLAG_RSS_CAPABLE 267 | IXGBE_FLAG_VMDQ_CAPABLE) 268 }; 269 270 static adapter_info_t ixgbe_82599eb_cap = { 271 128, /* maximum number of rx queues */ 272 1, /* minimum number of rx queues */ 273 128, /* default number of rx queues */ 274 64, /* maximum number of rx groups */ 275 1, /* minimum number of rx groups */ 276 1, /* default number of rx groups */ 277 128, /* maximum number of tx queues */ 278 1, /* minimum number of tx queues */ 279 8, /* default number of tx queues */ 280 15500, /* maximum MTU size */ 281 0xFF8, /* maximum interrupt throttle rate */ 282 0, /* minimum interrupt throttle rate */ 283 200, /* default interrupt throttle rate */ 284 64, /* maximum total msix vectors */ 285 16, /* maximum number of ring vectors */ 286 2, /* maximum number of other vectors */ 287 (IXGBE_EICR_LSC 288 | IXGBE_EICR_GPI_SDP1 289 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */ 290 291 (IXGBE_SDP1_GPIEN 292 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */ 293 294 (IXGBE_FLAG_DCA_CAPABLE 295 | IXGBE_FLAG_RSS_CAPABLE 296 | IXGBE_FLAG_VMDQ_CAPABLE 297 | IXGBE_FLAG_RSC_CAPABLE 298 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */ 299 }; 300 301 static adapter_info_t ixgbe_X540_cap = { 302 128, /* maximum number of rx queues */ 303 1, /* minimum number of rx queues */ 304 128, /* default number of rx queues */ 305 64, /* maximum number of rx groups */ 306 1, /* minimum number of rx groups */ 307 1, /* default number of rx groups */ 308 128, /* maximum number of tx queues */ 309 1, /* minimum number of tx queues */ 310 8, /* default number of tx queues */ 311 15500, /* maximum MTU size */ 312 0xFF8, /* maximum interrupt throttle rate */ 313 0, /* minimum interrupt throttle rate */ 314 200, /* default interrupt throttle rate */ 315 64, /* maximum total msix vectors */ 316 16, /* maximum number of ring vectors */ 317 2, /* maximum number of other vectors */ 318 (IXGBE_EICR_LSC 319 | IXGBE_EICR_GPI_SDP1_X540 320 | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */ 321 322 (IXGBE_SDP1_GPIEN_X540 323 | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */ 324 325 (IXGBE_FLAG_DCA_CAPABLE 326 | IXGBE_FLAG_RSS_CAPABLE 327 | IXGBE_FLAG_VMDQ_CAPABLE 328 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 329 }; 330 331 static adapter_info_t ixgbe_X550_cap = { 332 128, /* maximum number of rx queues */ 333 1, /* minimum number of rx queues */ 334 128, /* default number of rx queues */ 335 64, /* maximum number of rx groups */ 336 1, /* minimum number of rx groups */ 337 1, /* default number of rx groups */ 338 128, /* maximum number of tx queues */ 339 1, /* minimum number of tx queues */ 340 8, /* default number of tx queues */ 341 15500, /* maximum MTU size */ 342 0xFF8, /* maximum interrupt throttle rate */ 343 0, /* minimum interrupt throttle rate */ 344 0x200, /* default interrupt throttle rate */ 345 64, /* maximum total msix vectors */ 346 16, /* maximum number of ring vectors */ 347 2, /* maximum number of other vectors */ 348 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 349 0, /* "other" interrupt types enable mask */ 350 (IXGBE_FLAG_RSS_CAPABLE 351 | IXGBE_FLAG_VMDQ_CAPABLE 352 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 353 }; 354 355 /* 356 * Module Initialization Functions. 357 */ 358 359 int 360 _init(void) 361 { 362 int status; 363 364 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 365 366 status = mod_install(&ixgbe_modlinkage); 367 368 if (status != DDI_SUCCESS) { 369 mac_fini_ops(&ixgbe_dev_ops); 370 } 371 372 return (status); 373 } 374 375 int 376 _fini(void) 377 { 378 int status; 379 380 status = mod_remove(&ixgbe_modlinkage); 381 382 if (status == DDI_SUCCESS) { 383 mac_fini_ops(&ixgbe_dev_ops); 384 } 385 386 return (status); 387 } 388 389 int 390 _info(struct modinfo *modinfop) 391 { 392 int status; 393 394 status = mod_info(&ixgbe_modlinkage, modinfop); 395 396 return (status); 397 } 398 399 /* 400 * ixgbe_attach - Driver attach. 401 * 402 * This function is the device specific initialization entry 403 * point. This entry point is required and must be written. 404 * The DDI_ATTACH command must be provided in the attach entry 405 * point. When attach() is called with cmd set to DDI_ATTACH, 406 * all normal kernel services (such as kmem_alloc(9F)) are 407 * available for use by the driver. 408 * 409 * The attach() function will be called once for each instance 410 * of the device on the system with cmd set to DDI_ATTACH. 411 * Until attach() succeeds, the only driver entry points which 412 * may be called are open(9E) and getinfo(9E). 413 */ 414 static int 415 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 416 { 417 ixgbe_t *ixgbe; 418 struct ixgbe_osdep *osdep; 419 struct ixgbe_hw *hw; 420 int instance; 421 char taskqname[32]; 422 423 /* 424 * Check the command and perform corresponding operations 425 */ 426 switch (cmd) { 427 default: 428 return (DDI_FAILURE); 429 430 case DDI_RESUME: 431 return (ixgbe_resume(devinfo)); 432 433 case DDI_ATTACH: 434 break; 435 } 436 437 /* Get the device instance */ 438 instance = ddi_get_instance(devinfo); 439 440 /* Allocate memory for the instance data structure */ 441 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 442 443 ixgbe->dip = devinfo; 444 ixgbe->instance = instance; 445 446 hw = &ixgbe->hw; 447 osdep = &ixgbe->osdep; 448 hw->back = osdep; 449 osdep->ixgbe = ixgbe; 450 451 /* Attach the instance pointer to the dev_info data structure */ 452 ddi_set_driver_private(devinfo, ixgbe); 453 454 /* 455 * Initialize for FMA support 456 */ 457 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 458 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 459 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 460 ixgbe_fm_init(ixgbe); 461 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 462 463 /* 464 * Map PCI config space registers 465 */ 466 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 467 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 468 goto attach_fail; 469 } 470 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 471 472 /* 473 * Identify the chipset family 474 */ 475 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 476 ixgbe_error(ixgbe, "Failed to identify hardware"); 477 goto attach_fail; 478 } 479 480 /* 481 * Map device registers 482 */ 483 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 484 ixgbe_error(ixgbe, "Failed to map device registers"); 485 goto attach_fail; 486 } 487 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 488 489 /* 490 * Initialize driver parameters 491 */ 492 ixgbe_init_properties(ixgbe); 493 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 494 495 /* 496 * Register interrupt callback 497 */ 498 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) { 499 ixgbe_error(ixgbe, "Failed to register interrupt callback"); 500 goto attach_fail; 501 } 502 503 /* 504 * Allocate interrupts 505 */ 506 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 507 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 508 goto attach_fail; 509 } 510 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 511 512 /* 513 * Allocate rx/tx rings based on the ring numbers. 514 * The actual numbers of rx/tx rings are decided by the number of 515 * allocated interrupt vectors, so we should allocate the rings after 516 * interrupts are allocated. 517 */ 518 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 519 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 520 goto attach_fail; 521 } 522 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 523 524 /* 525 * Map rings to interrupt vectors 526 */ 527 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 528 ixgbe_error(ixgbe, "Failed to map interrupts to vectors"); 529 goto attach_fail; 530 } 531 532 /* 533 * Add interrupt handlers 534 */ 535 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 536 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 537 goto attach_fail; 538 } 539 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 540 541 /* 542 * Create a taskq for sfp-change 543 */ 544 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance); 545 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname, 546 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 547 ixgbe_error(ixgbe, "sfp_taskq create failed"); 548 goto attach_fail; 549 } 550 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ; 551 552 /* 553 * Create a taskq for over-temp 554 */ 555 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance); 556 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname, 557 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 558 ixgbe_error(ixgbe, "overtemp_taskq create failed"); 559 goto attach_fail; 560 } 561 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ; 562 563 /* 564 * Create a taskq for processing external PHY interrupts 565 */ 566 (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance); 567 if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname, 568 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 569 ixgbe_error(ixgbe, "phy_taskq create failed"); 570 goto attach_fail; 571 } 572 ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ; 573 574 /* 575 * Initialize driver parameters 576 */ 577 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 578 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 579 goto attach_fail; 580 } 581 582 /* 583 * Initialize mutexes for this device. 584 * Do this before enabling the interrupt handler and 585 * register the softint to avoid the condition where 586 * interrupt handler can try using uninitialized mutex. 587 */ 588 ixgbe_init_locks(ixgbe); 589 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 590 591 /* 592 * Initialize chipset hardware 593 */ 594 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 595 ixgbe_error(ixgbe, "Failed to initialize adapter"); 596 goto attach_fail; 597 } 598 ixgbe->link_check_complete = B_FALSE; 599 ixgbe->link_check_hrtime = gethrtime() + 600 (IXGBE_LINK_UP_TIME * 100000000ULL); 601 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 602 603 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 604 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 605 goto attach_fail; 606 } 607 608 /* 609 * Initialize adapter capabilities 610 */ 611 ixgbe_init_params(ixgbe); 612 613 /* 614 * Initialize statistics 615 */ 616 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 617 ixgbe_error(ixgbe, "Failed to initialize statistics"); 618 goto attach_fail; 619 } 620 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 621 622 /* 623 * Register the driver to the MAC 624 */ 625 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 626 ixgbe_error(ixgbe, "Failed to register MAC"); 627 goto attach_fail; 628 } 629 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 630 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 631 632 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe, 633 IXGBE_CYCLIC_PERIOD, DDI_IPL_0); 634 if (ixgbe->periodic_id == 0) { 635 ixgbe_error(ixgbe, "Failed to add the link check timer"); 636 goto attach_fail; 637 } 638 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER; 639 640 /* 641 * Now that mutex locks are initialized, and the chip is also 642 * initialized, enable interrupts. 643 */ 644 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 645 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 646 goto attach_fail; 647 } 648 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 649 650 ixgbe_log(ixgbe, "%s", ixgbe_ident); 651 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED); 652 653 return (DDI_SUCCESS); 654 655 attach_fail: 656 ixgbe_unconfigure(devinfo, ixgbe); 657 return (DDI_FAILURE); 658 } 659 660 /* 661 * ixgbe_detach - Driver detach. 662 * 663 * The detach() function is the complement of the attach routine. 664 * If cmd is set to DDI_DETACH, detach() is used to remove the 665 * state associated with a given instance of a device node 666 * prior to the removal of that instance from the system. 667 * 668 * The detach() function will be called once for each instance 669 * of the device for which there has been a successful attach() 670 * once there are no longer any opens on the device. 671 * 672 * Interrupts routine are disabled, All memory allocated by this 673 * driver are freed. 674 */ 675 static int 676 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 677 { 678 ixgbe_t *ixgbe; 679 680 /* 681 * Check detach command 682 */ 683 switch (cmd) { 684 default: 685 return (DDI_FAILURE); 686 687 case DDI_SUSPEND: 688 return (ixgbe_suspend(devinfo)); 689 690 case DDI_DETACH: 691 break; 692 } 693 694 /* 695 * Get the pointer to the driver private data structure 696 */ 697 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 698 if (ixgbe == NULL) 699 return (DDI_FAILURE); 700 701 /* 702 * If the device is still running, it needs to be stopped first. 703 * This check is necessary because under some specific circumstances, 704 * the detach routine can be called without stopping the interface 705 * first. 706 */ 707 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 708 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 709 mutex_enter(&ixgbe->gen_lock); 710 ixgbe_stop(ixgbe, B_TRUE); 711 mutex_exit(&ixgbe->gen_lock); 712 /* Disable and stop the watchdog timer */ 713 ixgbe_disable_watchdog_timer(ixgbe); 714 } 715 716 /* 717 * Check if there are still rx buffers held by the upper layer. 718 * If so, fail the detach. 719 */ 720 if (!ixgbe_rx_drain(ixgbe)) 721 return (DDI_FAILURE); 722 723 /* 724 * Do the remaining unconfigure routines 725 */ 726 ixgbe_unconfigure(devinfo, ixgbe); 727 728 return (DDI_SUCCESS); 729 } 730 731 /* 732 * quiesce(9E) entry point. 733 * 734 * This function is called when the system is single-threaded at high 735 * PIL with preemption disabled. Therefore, this function must not be 736 * blocked. 737 * 738 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 739 * DDI_FAILURE indicates an error condition and should almost never happen. 740 */ 741 static int 742 ixgbe_quiesce(dev_info_t *devinfo) 743 { 744 ixgbe_t *ixgbe; 745 struct ixgbe_hw *hw; 746 747 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 748 749 if (ixgbe == NULL) 750 return (DDI_FAILURE); 751 752 hw = &ixgbe->hw; 753 754 /* 755 * Disable the adapter interrupts 756 */ 757 ixgbe_disable_adapter_interrupts(ixgbe); 758 759 /* 760 * Tell firmware driver is no longer in control 761 */ 762 ixgbe_release_driver_control(hw); 763 764 /* 765 * Reset the chipset 766 */ 767 (void) ixgbe_reset_hw(hw); 768 769 /* 770 * Reset PHY 771 */ 772 (void) ixgbe_reset_phy(hw); 773 774 return (DDI_SUCCESS); 775 } 776 777 static void 778 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 779 { 780 /* 781 * Disable interrupt 782 */ 783 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 784 (void) ixgbe_disable_intrs(ixgbe); 785 } 786 787 /* 788 * remove the link check timer 789 */ 790 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) { 791 if (ixgbe->periodic_id != NULL) { 792 ddi_periodic_delete(ixgbe->periodic_id); 793 ixgbe->periodic_id = NULL; 794 } 795 } 796 797 /* 798 * Unregister MAC 799 */ 800 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 801 (void) mac_unregister(ixgbe->mac_hdl); 802 } 803 804 /* 805 * Free statistics 806 */ 807 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 808 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 809 } 810 811 /* 812 * Remove interrupt handlers 813 */ 814 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 815 ixgbe_rem_intr_handlers(ixgbe); 816 } 817 818 /* 819 * Remove taskq for sfp-status-change 820 */ 821 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) { 822 ddi_taskq_destroy(ixgbe->sfp_taskq); 823 } 824 825 /* 826 * Remove taskq for over-temp 827 */ 828 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) { 829 ddi_taskq_destroy(ixgbe->overtemp_taskq); 830 } 831 832 /* 833 * Remove taskq for external PHYs 834 */ 835 if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) { 836 ddi_taskq_destroy(ixgbe->phy_taskq); 837 } 838 839 /* 840 * Remove interrupts 841 */ 842 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 843 ixgbe_rem_intrs(ixgbe); 844 } 845 846 /* 847 * Unregister interrupt callback handler 848 */ 849 if (ixgbe->cb_hdl != NULL) { 850 (void) ddi_cb_unregister(ixgbe->cb_hdl); 851 } 852 853 /* 854 * Remove driver properties 855 */ 856 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 857 (void) ddi_prop_remove_all(devinfo); 858 } 859 860 /* 861 * Stop the chipset 862 */ 863 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 864 mutex_enter(&ixgbe->gen_lock); 865 ixgbe_chip_stop(ixgbe); 866 mutex_exit(&ixgbe->gen_lock); 867 } 868 869 /* 870 * Free register handle 871 */ 872 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 873 if (ixgbe->osdep.reg_handle != NULL) 874 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 875 } 876 877 /* 878 * Free PCI config handle 879 */ 880 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 881 if (ixgbe->osdep.cfg_handle != NULL) 882 pci_config_teardown(&ixgbe->osdep.cfg_handle); 883 } 884 885 /* 886 * Free locks 887 */ 888 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 889 ixgbe_destroy_locks(ixgbe); 890 } 891 892 /* 893 * Free the rx/tx rings 894 */ 895 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 896 ixgbe_free_rings(ixgbe); 897 } 898 899 /* 900 * Unregister FMA capabilities 901 */ 902 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 903 ixgbe_fm_fini(ixgbe); 904 } 905 906 /* 907 * Free the driver data structure 908 */ 909 kmem_free(ixgbe, sizeof (ixgbe_t)); 910 911 ddi_set_driver_private(devinfo, NULL); 912 } 913 914 /* 915 * ixgbe_register_mac - Register the driver and its function pointers with 916 * the GLD interface. 917 */ 918 static int 919 ixgbe_register_mac(ixgbe_t *ixgbe) 920 { 921 struct ixgbe_hw *hw = &ixgbe->hw; 922 mac_register_t *mac; 923 int status; 924 925 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 926 return (IXGBE_FAILURE); 927 928 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 929 mac->m_driver = ixgbe; 930 mac->m_dip = ixgbe->dip; 931 mac->m_src_addr = hw->mac.addr; 932 mac->m_callbacks = &ixgbe_m_callbacks; 933 mac->m_min_sdu = 0; 934 mac->m_max_sdu = ixgbe->default_mtu; 935 mac->m_margin = VLAN_TAGSZ; 936 mac->m_priv_props = ixgbe_priv_props; 937 mac->m_v12n = MAC_VIRT_LEVEL1; 938 939 status = mac_register(mac, &ixgbe->mac_hdl); 940 941 mac_free(mac); 942 943 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 944 } 945 946 /* 947 * ixgbe_identify_hardware - Identify the type of the chipset. 948 */ 949 static int 950 ixgbe_identify_hardware(ixgbe_t *ixgbe) 951 { 952 struct ixgbe_hw *hw = &ixgbe->hw; 953 struct ixgbe_osdep *osdep = &ixgbe->osdep; 954 955 /* 956 * Get the device id 957 */ 958 hw->vendor_id = 959 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 960 hw->device_id = 961 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 962 hw->revision_id = 963 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 964 hw->subsystem_device_id = 965 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 966 hw->subsystem_vendor_id = 967 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 968 969 /* 970 * Set the mac type of the adapter based on the device id 971 */ 972 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) { 973 return (IXGBE_FAILURE); 974 } 975 976 /* 977 * Install adapter capabilities 978 */ 979 switch (hw->mac.type) { 980 case ixgbe_mac_82598EB: 981 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n"); 982 ixgbe->capab = &ixgbe_82598eb_cap; 983 984 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { 985 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 986 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1; 987 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN; 988 } 989 break; 990 991 case ixgbe_mac_82599EB: 992 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n"); 993 ixgbe->capab = &ixgbe_82599eb_cap; 994 995 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) { 996 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE; 997 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0; 998 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN; 999 } 1000 break; 1001 1002 case ixgbe_mac_X540: 1003 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n"); 1004 ixgbe->capab = &ixgbe_X540_cap; 1005 /* 1006 * For now, X540 is all set in its capab structure. 1007 * As other X540 variants show up, things can change here. 1008 */ 1009 break; 1010 1011 case ixgbe_mac_X550: 1012 case ixgbe_mac_X550EM_x: 1013 IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n"); 1014 ixgbe->capab = &ixgbe_X550_cap; 1015 1016 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) 1017 ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE; 1018 1019 /* 1020 * Link detection on X552 SFP+ and X552/X557-AT 1021 */ 1022 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 1023 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 1024 ixgbe->capab->other_intr |= 1025 IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 1026 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540; 1027 } 1028 break; 1029 1030 default: 1031 IXGBE_DEBUGLOG_1(ixgbe, 1032 "adapter not supported in ixgbe_identify_hardware(): %d\n", 1033 hw->mac.type); 1034 return (IXGBE_FAILURE); 1035 } 1036 1037 return (IXGBE_SUCCESS); 1038 } 1039 1040 /* 1041 * ixgbe_regs_map - Map the device registers. 1042 * 1043 */ 1044 static int 1045 ixgbe_regs_map(ixgbe_t *ixgbe) 1046 { 1047 dev_info_t *devinfo = ixgbe->dip; 1048 struct ixgbe_hw *hw = &ixgbe->hw; 1049 struct ixgbe_osdep *osdep = &ixgbe->osdep; 1050 off_t mem_size; 1051 1052 /* 1053 * First get the size of device registers to be mapped. 1054 */ 1055 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size) 1056 != DDI_SUCCESS) { 1057 return (IXGBE_FAILURE); 1058 } 1059 1060 /* 1061 * Call ddi_regs_map_setup() to map registers 1062 */ 1063 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET, 1064 (caddr_t *)&hw->hw_addr, 0, 1065 mem_size, &ixgbe_regs_acc_attr, 1066 &osdep->reg_handle)) != DDI_SUCCESS) { 1067 return (IXGBE_FAILURE); 1068 } 1069 1070 return (IXGBE_SUCCESS); 1071 } 1072 1073 /* 1074 * ixgbe_init_properties - Initialize driver properties. 1075 */ 1076 static void 1077 ixgbe_init_properties(ixgbe_t *ixgbe) 1078 { 1079 /* 1080 * Get conf file properties, including link settings 1081 * jumbo frames, ring number, descriptor number, etc. 1082 */ 1083 ixgbe_get_conf(ixgbe); 1084 } 1085 1086 /* 1087 * ixgbe_init_driver_settings - Initialize driver settings. 1088 * 1089 * The settings include hardware function pointers, bus information, 1090 * rx/tx rings settings, link state, and any other parameters that 1091 * need to be setup during driver initialization. 1092 */ 1093 static int 1094 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 1095 { 1096 struct ixgbe_hw *hw = &ixgbe->hw; 1097 dev_info_t *devinfo = ixgbe->dip; 1098 ixgbe_rx_ring_t *rx_ring; 1099 ixgbe_rx_group_t *rx_group; 1100 ixgbe_tx_ring_t *tx_ring; 1101 uint32_t rx_size; 1102 uint32_t tx_size; 1103 uint32_t ring_per_group; 1104 int i; 1105 1106 /* 1107 * Initialize chipset specific hardware function pointers 1108 */ 1109 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 1110 return (IXGBE_FAILURE); 1111 } 1112 1113 /* 1114 * Get the system page size 1115 */ 1116 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1); 1117 1118 /* 1119 * Set rx buffer size 1120 * 1121 * The IP header alignment room is counted in the calculation. 1122 * The rx buffer size is in unit of 1K that is required by the 1123 * chipset hardware. 1124 */ 1125 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 1126 ixgbe->rx_buf_size = ((rx_size >> 10) + 1127 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1128 1129 /* 1130 * Set tx buffer size 1131 */ 1132 tx_size = ixgbe->max_frame_size; 1133 ixgbe->tx_buf_size = ((tx_size >> 10) + 1134 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1135 1136 /* 1137 * Initialize rx/tx rings/groups parameters 1138 */ 1139 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 1140 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1141 rx_ring = &ixgbe->rx_rings[i]; 1142 rx_ring->index = i; 1143 rx_ring->ixgbe = ixgbe; 1144 rx_ring->group_index = i / ring_per_group; 1145 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i); 1146 } 1147 1148 for (i = 0; i < ixgbe->num_rx_groups; i++) { 1149 rx_group = &ixgbe->rx_groups[i]; 1150 rx_group->index = i; 1151 rx_group->ixgbe = ixgbe; 1152 } 1153 1154 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1155 tx_ring = &ixgbe->tx_rings[i]; 1156 tx_ring->index = i; 1157 tx_ring->ixgbe = ixgbe; 1158 if (ixgbe->tx_head_wb_enable) 1159 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 1160 else 1161 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 1162 1163 tx_ring->ring_size = ixgbe->tx_ring_size; 1164 tx_ring->free_list_size = ixgbe->tx_ring_size + 1165 (ixgbe->tx_ring_size >> 1); 1166 } 1167 1168 /* 1169 * Initialize values of interrupt throttling rate 1170 */ 1171 for (i = 1; i < MAX_INTR_VECTOR; i++) 1172 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 1173 1174 /* 1175 * The initial link state should be "unknown" 1176 */ 1177 ixgbe->link_state = LINK_STATE_UNKNOWN; 1178 1179 return (IXGBE_SUCCESS); 1180 } 1181 1182 /* 1183 * ixgbe_init_locks - Initialize locks. 1184 */ 1185 static void 1186 ixgbe_init_locks(ixgbe_t *ixgbe) 1187 { 1188 ixgbe_rx_ring_t *rx_ring; 1189 ixgbe_tx_ring_t *tx_ring; 1190 int i; 1191 1192 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1193 rx_ring = &ixgbe->rx_rings[i]; 1194 mutex_init(&rx_ring->rx_lock, NULL, 1195 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1196 } 1197 1198 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1199 tx_ring = &ixgbe->tx_rings[i]; 1200 mutex_init(&tx_ring->tx_lock, NULL, 1201 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1202 mutex_init(&tx_ring->recycle_lock, NULL, 1203 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1204 mutex_init(&tx_ring->tcb_head_lock, NULL, 1205 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1206 mutex_init(&tx_ring->tcb_tail_lock, NULL, 1207 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1208 } 1209 1210 mutex_init(&ixgbe->gen_lock, NULL, 1211 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1212 1213 mutex_init(&ixgbe->watchdog_lock, NULL, 1214 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1215 } 1216 1217 /* 1218 * ixgbe_destroy_locks - Destroy locks. 1219 */ 1220 static void 1221 ixgbe_destroy_locks(ixgbe_t *ixgbe) 1222 { 1223 ixgbe_rx_ring_t *rx_ring; 1224 ixgbe_tx_ring_t *tx_ring; 1225 int i; 1226 1227 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1228 rx_ring = &ixgbe->rx_rings[i]; 1229 mutex_destroy(&rx_ring->rx_lock); 1230 } 1231 1232 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1233 tx_ring = &ixgbe->tx_rings[i]; 1234 mutex_destroy(&tx_ring->tx_lock); 1235 mutex_destroy(&tx_ring->recycle_lock); 1236 mutex_destroy(&tx_ring->tcb_head_lock); 1237 mutex_destroy(&tx_ring->tcb_tail_lock); 1238 } 1239 1240 mutex_destroy(&ixgbe->gen_lock); 1241 mutex_destroy(&ixgbe->watchdog_lock); 1242 } 1243 1244 static int 1245 ixgbe_resume(dev_info_t *devinfo) 1246 { 1247 ixgbe_t *ixgbe; 1248 int i; 1249 1250 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1251 if (ixgbe == NULL) 1252 return (DDI_FAILURE); 1253 1254 mutex_enter(&ixgbe->gen_lock); 1255 1256 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1257 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1258 mutex_exit(&ixgbe->gen_lock); 1259 return (DDI_FAILURE); 1260 } 1261 1262 /* 1263 * Enable and start the watchdog timer 1264 */ 1265 ixgbe_enable_watchdog_timer(ixgbe); 1266 } 1267 1268 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED); 1269 1270 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1271 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1272 mac_tx_ring_update(ixgbe->mac_hdl, 1273 ixgbe->tx_rings[i].ring_handle); 1274 } 1275 } 1276 1277 mutex_exit(&ixgbe->gen_lock); 1278 1279 return (DDI_SUCCESS); 1280 } 1281 1282 static int 1283 ixgbe_suspend(dev_info_t *devinfo) 1284 { 1285 ixgbe_t *ixgbe; 1286 1287 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1288 if (ixgbe == NULL) 1289 return (DDI_FAILURE); 1290 1291 mutex_enter(&ixgbe->gen_lock); 1292 1293 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED); 1294 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1295 mutex_exit(&ixgbe->gen_lock); 1296 return (DDI_SUCCESS); 1297 } 1298 ixgbe_stop(ixgbe, B_FALSE); 1299 1300 mutex_exit(&ixgbe->gen_lock); 1301 1302 /* 1303 * Disable and stop the watchdog timer 1304 */ 1305 ixgbe_disable_watchdog_timer(ixgbe); 1306 1307 return (DDI_SUCCESS); 1308 } 1309 1310 /* 1311 * ixgbe_init - Initialize the device. 1312 */ 1313 static int 1314 ixgbe_init(ixgbe_t *ixgbe) 1315 { 1316 struct ixgbe_hw *hw = &ixgbe->hw; 1317 u8 pbanum[IXGBE_PBANUM_LENGTH]; 1318 int rv; 1319 1320 mutex_enter(&ixgbe->gen_lock); 1321 1322 /* 1323 * Configure/Initialize hardware 1324 */ 1325 rv = ixgbe_init_hw(hw); 1326 if (rv != IXGBE_SUCCESS) { 1327 switch (rv) { 1328 1329 /* 1330 * The first three errors are not prohibitive to us progressing 1331 * further, and are maily advisory in nature. In the case of a 1332 * SFP module not being present or not deemed supported by the 1333 * common code, we adivse the operator of this fact but carry on 1334 * instead of failing hard, as SFPs can be inserted or replaced 1335 * while the driver is running. In the case of a unknown error, 1336 * we fail-hard, logging the reason and emitting a FMA event. 1337 */ 1338 case IXGBE_ERR_EEPROM_VERSION: 1339 ixgbe_error(ixgbe, 1340 "This Intel 10Gb Ethernet device is pre-release and" 1341 " contains outdated firmware. Please contact your" 1342 " hardware vendor for a replacement."); 1343 break; 1344 case IXGBE_ERR_SFP_NOT_PRESENT: 1345 ixgbe_error(ixgbe, 1346 "No SFP+ module detected on this interface. Please " 1347 "install a supported SFP+ module for this " 1348 "interface to become operational."); 1349 break; 1350 case IXGBE_ERR_SFP_NOT_SUPPORTED: 1351 ixgbe_error(ixgbe, 1352 "Unsupported SFP+ module detected. Please replace " 1353 "it with a supported SFP+ module per Intel " 1354 "documentation, or bypass this check with " 1355 "allow_unsupported_sfp=1 in ixgbe.conf."); 1356 break; 1357 default: 1358 ixgbe_error(ixgbe, 1359 "Failed to initialize hardware. ixgbe_init_hw " 1360 "returned %d", rv); 1361 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1362 goto init_fail; 1363 } 1364 } 1365 1366 /* 1367 * Need to init eeprom before validating the checksum. 1368 */ 1369 if (ixgbe_init_eeprom_params(hw) < 0) { 1370 ixgbe_error(ixgbe, 1371 "Unable to intitialize the eeprom interface."); 1372 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1373 goto init_fail; 1374 } 1375 1376 /* 1377 * NVM validation 1378 */ 1379 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1380 /* 1381 * Some PCI-E parts fail the first check due to 1382 * the link being in sleep state. Call it again, 1383 * if it fails a second time it's a real issue. 1384 */ 1385 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1386 ixgbe_error(ixgbe, 1387 "Invalid NVM checksum. Please contact " 1388 "the vendor to update the NVM."); 1389 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1390 goto init_fail; 1391 } 1392 } 1393 1394 /* 1395 * Setup default flow control thresholds - enable/disable 1396 * & flow control type is controlled by ixgbe.conf 1397 */ 1398 hw->fc.high_water[0] = DEFAULT_FCRTH; 1399 hw->fc.low_water[0] = DEFAULT_FCRTL; 1400 hw->fc.pause_time = DEFAULT_FCPAUSE; 1401 hw->fc.send_xon = B_TRUE; 1402 1403 /* 1404 * Initialize flow control 1405 */ 1406 (void) ixgbe_start_hw(hw); 1407 1408 /* 1409 * Initialize link settings 1410 */ 1411 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1412 1413 /* 1414 * Initialize the chipset hardware 1415 */ 1416 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1417 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1418 goto init_fail; 1419 } 1420 1421 /* 1422 * Read identifying information and place in devinfo. 1423 */ 1424 pbanum[0] = '\0'; 1425 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum)); 1426 if (*pbanum != '\0') { 1427 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip, 1428 "printed-board-assembly", (char *)pbanum); 1429 } 1430 1431 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1432 goto init_fail; 1433 } 1434 1435 mutex_exit(&ixgbe->gen_lock); 1436 return (IXGBE_SUCCESS); 1437 1438 init_fail: 1439 /* 1440 * Reset PHY 1441 */ 1442 (void) ixgbe_reset_phy(hw); 1443 1444 mutex_exit(&ixgbe->gen_lock); 1445 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1446 return (IXGBE_FAILURE); 1447 } 1448 1449 /* 1450 * ixgbe_chip_start - Initialize and start the chipset hardware. 1451 */ 1452 static int 1453 ixgbe_chip_start(ixgbe_t *ixgbe) 1454 { 1455 struct ixgbe_hw *hw = &ixgbe->hw; 1456 int i; 1457 1458 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1459 1460 /* 1461 * Get the mac address 1462 * This function should handle SPARC case correctly. 1463 */ 1464 if (!ixgbe_find_mac_address(ixgbe)) { 1465 ixgbe_error(ixgbe, "Failed to get the mac address"); 1466 return (IXGBE_FAILURE); 1467 } 1468 1469 /* 1470 * Validate the mac address 1471 */ 1472 (void) ixgbe_init_rx_addrs(hw); 1473 if (!is_valid_mac_addr(hw->mac.addr)) { 1474 ixgbe_error(ixgbe, "Invalid mac address"); 1475 return (IXGBE_FAILURE); 1476 } 1477 1478 /* 1479 * Re-enable relaxed ordering for performance. It is disabled 1480 * by default in the hardware init. 1481 */ 1482 if (ixgbe->relax_order_enable == B_TRUE) 1483 ixgbe_enable_relaxed_ordering(hw); 1484 1485 /* 1486 * Setup adapter interrupt vectors 1487 */ 1488 ixgbe_setup_adapter_vector(ixgbe); 1489 1490 /* 1491 * Initialize unicast addresses. 1492 */ 1493 ixgbe_init_unicst(ixgbe); 1494 1495 /* 1496 * Setup and initialize the mctable structures. 1497 */ 1498 ixgbe_setup_multicst(ixgbe); 1499 1500 /* 1501 * Set interrupt throttling rate 1502 */ 1503 for (i = 0; i < ixgbe->intr_cnt; i++) { 1504 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1505 } 1506 1507 /* 1508 * Disable Wake-on-LAN 1509 */ 1510 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 1511 1512 /* 1513 * Some adapters offer Energy Efficient Ethernet (EEE) support. 1514 * Due to issues with EEE in e1000g/igb, we disable this by default 1515 * as a precautionary measure. 1516 * 1517 * Currently, the only known adapter which supports EEE in the ixgbe 1518 * line is 8086,15AB (IXGBE_DEV_ID_X550EM_X_KR), and only after the 1519 * first revision of it, as well as any X550 with MAC type 6 (non-EM) 1520 */ 1521 (void) ixgbe_setup_eee(hw, B_FALSE); 1522 1523 /* 1524 * Turn on any present SFP Tx laser 1525 */ 1526 ixgbe_enable_tx_laser(hw); 1527 1528 /* 1529 * Power on the PHY 1530 */ 1531 (void) ixgbe_set_phy_power(hw, B_TRUE); 1532 1533 /* 1534 * Save the state of the PHY 1535 */ 1536 ixgbe_get_hw_state(ixgbe); 1537 1538 /* 1539 * Make sure driver has control 1540 */ 1541 ixgbe_get_driver_control(hw); 1542 1543 return (IXGBE_SUCCESS); 1544 } 1545 1546 /* 1547 * ixgbe_chip_stop - Stop the chipset hardware 1548 */ 1549 static void 1550 ixgbe_chip_stop(ixgbe_t *ixgbe) 1551 { 1552 struct ixgbe_hw *hw = &ixgbe->hw; 1553 int rv; 1554 1555 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1556 1557 /* 1558 * Stop interupt generation and disable Tx unit 1559 */ 1560 hw->adapter_stopped = B_FALSE; 1561 (void) ixgbe_stop_adapter(hw); 1562 1563 /* 1564 * Reset the chipset 1565 */ 1566 (void) ixgbe_reset_hw(hw); 1567 1568 /* 1569 * Reset PHY 1570 */ 1571 (void) ixgbe_reset_phy(hw); 1572 1573 /* 1574 * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting 1575 * the PHY while doing so. Else, just power down the PHY. 1576 */ 1577 if (hw->phy.ops.enter_lplu != NULL) { 1578 hw->phy.reset_disable = B_TRUE; 1579 rv = hw->phy.ops.enter_lplu(hw); 1580 if (rv != IXGBE_SUCCESS) 1581 ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv); 1582 hw->phy.reset_disable = B_FALSE; 1583 } else { 1584 (void) ixgbe_set_phy_power(hw, B_FALSE); 1585 } 1586 1587 /* 1588 * Turn off any present SFP Tx laser 1589 * Expected for health and safety reasons 1590 */ 1591 ixgbe_disable_tx_laser(hw); 1592 1593 /* 1594 * Tell firmware driver is no longer in control 1595 */ 1596 ixgbe_release_driver_control(hw); 1597 1598 } 1599 1600 /* 1601 * ixgbe_reset - Reset the chipset and re-start the driver. 1602 * 1603 * It involves stopping and re-starting the chipset, 1604 * and re-configuring the rx/tx rings. 1605 */ 1606 static int 1607 ixgbe_reset(ixgbe_t *ixgbe) 1608 { 1609 int i; 1610 1611 /* 1612 * Disable and stop the watchdog timer 1613 */ 1614 ixgbe_disable_watchdog_timer(ixgbe); 1615 1616 mutex_enter(&ixgbe->gen_lock); 1617 1618 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1619 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 1620 1621 ixgbe_stop(ixgbe, B_FALSE); 1622 1623 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1624 mutex_exit(&ixgbe->gen_lock); 1625 return (IXGBE_FAILURE); 1626 } 1627 1628 /* 1629 * After resetting, need to recheck the link status. 1630 */ 1631 ixgbe->link_check_complete = B_FALSE; 1632 ixgbe->link_check_hrtime = gethrtime() + 1633 (IXGBE_LINK_UP_TIME * 100000000ULL); 1634 1635 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED); 1636 1637 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) { 1638 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1639 mac_tx_ring_update(ixgbe->mac_hdl, 1640 ixgbe->tx_rings[i].ring_handle); 1641 } 1642 } 1643 1644 mutex_exit(&ixgbe->gen_lock); 1645 1646 /* 1647 * Enable and start the watchdog timer 1648 */ 1649 ixgbe_enable_watchdog_timer(ixgbe); 1650 1651 return (IXGBE_SUCCESS); 1652 } 1653 1654 /* 1655 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1656 */ 1657 static void 1658 ixgbe_tx_clean(ixgbe_t *ixgbe) 1659 { 1660 ixgbe_tx_ring_t *tx_ring; 1661 tx_control_block_t *tcb; 1662 link_list_t pending_list; 1663 uint32_t desc_num; 1664 int i, j; 1665 1666 LINK_LIST_INIT(&pending_list); 1667 1668 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1669 tx_ring = &ixgbe->tx_rings[i]; 1670 1671 mutex_enter(&tx_ring->recycle_lock); 1672 1673 /* 1674 * Clean the pending tx data - the pending packets in the 1675 * work_list that have no chances to be transmitted again. 1676 * 1677 * We must ensure the chipset is stopped or the link is down 1678 * before cleaning the transmit packets. 1679 */ 1680 desc_num = 0; 1681 for (j = 0; j < tx_ring->ring_size; j++) { 1682 tcb = tx_ring->work_list[j]; 1683 if (tcb != NULL) { 1684 desc_num += tcb->desc_num; 1685 1686 tx_ring->work_list[j] = NULL; 1687 1688 ixgbe_free_tcb(tcb); 1689 1690 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1691 } 1692 } 1693 1694 if (desc_num > 0) { 1695 atomic_add_32(&tx_ring->tbd_free, desc_num); 1696 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1697 1698 /* 1699 * Reset the head and tail pointers of the tbd ring; 1700 * Reset the writeback head if it's enable. 1701 */ 1702 tx_ring->tbd_head = 0; 1703 tx_ring->tbd_tail = 0; 1704 if (ixgbe->tx_head_wb_enable) 1705 *tx_ring->tbd_head_wb = 0; 1706 1707 IXGBE_WRITE_REG(&ixgbe->hw, 1708 IXGBE_TDH(tx_ring->index), 0); 1709 IXGBE_WRITE_REG(&ixgbe->hw, 1710 IXGBE_TDT(tx_ring->index), 0); 1711 } 1712 1713 mutex_exit(&tx_ring->recycle_lock); 1714 1715 /* 1716 * Add the tx control blocks in the pending list to 1717 * the free list. 1718 */ 1719 ixgbe_put_free_list(tx_ring, &pending_list); 1720 } 1721 } 1722 1723 /* 1724 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1725 * transmitted. 1726 */ 1727 static boolean_t 1728 ixgbe_tx_drain(ixgbe_t *ixgbe) 1729 { 1730 ixgbe_tx_ring_t *tx_ring; 1731 boolean_t done; 1732 int i, j; 1733 1734 /* 1735 * Wait for a specific time to allow pending tx packets 1736 * to be transmitted. 1737 * 1738 * Check the counter tbd_free to see if transmission is done. 1739 * No lock protection is needed here. 1740 * 1741 * Return B_TRUE if all pending packets have been transmitted; 1742 * Otherwise return B_FALSE; 1743 */ 1744 for (i = 0; i < TX_DRAIN_TIME; i++) { 1745 1746 done = B_TRUE; 1747 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1748 tx_ring = &ixgbe->tx_rings[j]; 1749 done = done && 1750 (tx_ring->tbd_free == tx_ring->ring_size); 1751 } 1752 1753 if (done) 1754 break; 1755 1756 msec_delay(1); 1757 } 1758 1759 return (done); 1760 } 1761 1762 /* 1763 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1764 */ 1765 static boolean_t 1766 ixgbe_rx_drain(ixgbe_t *ixgbe) 1767 { 1768 boolean_t done = B_TRUE; 1769 int i; 1770 1771 /* 1772 * Polling the rx free list to check if those rx buffers held by 1773 * the upper layer are released. 1774 * 1775 * Check the counter rcb_free to see if all pending buffers are 1776 * released. No lock protection is needed here. 1777 * 1778 * Return B_TRUE if all pending buffers have been released; 1779 * Otherwise return B_FALSE; 1780 */ 1781 for (i = 0; i < RX_DRAIN_TIME; i++) { 1782 done = (ixgbe->rcb_pending == 0); 1783 1784 if (done) 1785 break; 1786 1787 msec_delay(1); 1788 } 1789 1790 return (done); 1791 } 1792 1793 /* 1794 * ixgbe_start - Start the driver/chipset. 1795 */ 1796 int 1797 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer) 1798 { 1799 struct ixgbe_hw *hw = &ixgbe->hw; 1800 int i; 1801 1802 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1803 1804 if (alloc_buffer) { 1805 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) { 1806 ixgbe_error(ixgbe, 1807 "Failed to allocate software receive rings"); 1808 return (IXGBE_FAILURE); 1809 } 1810 1811 /* Allocate buffers for all the rx/tx rings */ 1812 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) { 1813 ixgbe_error(ixgbe, "Failed to allocate DMA resource"); 1814 return (IXGBE_FAILURE); 1815 } 1816 1817 ixgbe->tx_ring_init = B_TRUE; 1818 } else { 1819 ixgbe->tx_ring_init = B_FALSE; 1820 } 1821 1822 for (i = 0; i < ixgbe->num_rx_rings; i++) 1823 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1824 for (i = 0; i < ixgbe->num_tx_rings; i++) 1825 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1826 1827 /* 1828 * Start the chipset hardware 1829 */ 1830 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1831 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1832 goto start_failure; 1833 } 1834 1835 /* 1836 * Configure link now for X550 1837 * 1838 * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the 1839 * resting state of the adapter at a 1Gb FDX speed. Prior to the X550, 1840 * the resting state of the link would be the maximum speed that 1841 * autonegotiation will allow (usually 10Gb, infrastructure allowing) 1842 * so we never bothered with explicitly setting the link to 10Gb as it 1843 * would already be at that state on driver attach. With X550, we must 1844 * trigger a re-negotiation of the link in order to switch from a LPLU 1845 * 1Gb link to 10Gb (cable and link partner permitting.) 1846 */ 1847 if (hw->mac.type == ixgbe_mac_X550 || 1848 hw->mac.type == ixgbe_mac_X550EM_x) { 1849 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE); 1850 ixgbe_get_hw_state(ixgbe); 1851 } 1852 1853 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1854 goto start_failure; 1855 } 1856 1857 /* 1858 * Setup the rx/tx rings 1859 */ 1860 ixgbe_setup_rings(ixgbe); 1861 1862 /* 1863 * ixgbe_start() will be called when resetting, however if reset 1864 * happens, we need to clear the ERROR, STALL and OVERTEMP flags 1865 * before enabling the interrupts. 1866 */ 1867 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR 1868 | IXGBE_STALL| IXGBE_OVERTEMP)); 1869 1870 /* 1871 * Enable adapter interrupts 1872 * The interrupts must be enabled after the driver state is START 1873 */ 1874 ixgbe_enable_adapter_interrupts(ixgbe); 1875 1876 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1877 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1878 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1879 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1880 1881 return (IXGBE_SUCCESS); 1882 1883 start_failure: 1884 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1885 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1886 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1887 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1888 1889 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1890 1891 return (IXGBE_FAILURE); 1892 } 1893 1894 /* 1895 * ixgbe_stop - Stop the driver/chipset. 1896 */ 1897 void 1898 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer) 1899 { 1900 int i; 1901 1902 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1903 1904 /* 1905 * Disable the adapter interrupts 1906 */ 1907 ixgbe_disable_adapter_interrupts(ixgbe); 1908 1909 /* 1910 * Drain the pending tx packets 1911 */ 1912 (void) ixgbe_tx_drain(ixgbe); 1913 1914 for (i = 0; i < ixgbe->num_rx_rings; i++) 1915 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1916 for (i = 0; i < ixgbe->num_tx_rings; i++) 1917 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1918 1919 /* 1920 * Stop the chipset hardware 1921 */ 1922 ixgbe_chip_stop(ixgbe); 1923 1924 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1925 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1926 } 1927 1928 /* 1929 * Clean the pending tx data/resources 1930 */ 1931 ixgbe_tx_clean(ixgbe); 1932 1933 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1934 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1935 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1936 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1937 1938 if (ixgbe->link_state == LINK_STATE_UP) { 1939 ixgbe->link_state = LINK_STATE_UNKNOWN; 1940 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 1941 } 1942 1943 if (free_buffer) { 1944 /* 1945 * Release the DMA/memory resources of rx/tx rings 1946 */ 1947 ixgbe_free_dma(ixgbe); 1948 ixgbe_free_rx_data(ixgbe); 1949 } 1950 } 1951 1952 /* 1953 * ixgbe_cbfunc - Driver interface for generic DDI callbacks 1954 */ 1955 /* ARGSUSED */ 1956 static int 1957 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg, 1958 void *arg1, void *arg2) 1959 { 1960 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 1961 1962 switch (cbaction) { 1963 /* IRM callback */ 1964 int count; 1965 case DDI_CB_INTR_ADD: 1966 case DDI_CB_INTR_REMOVE: 1967 count = (int)(uintptr_t)cbarg; 1968 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX); 1969 DTRACE_PROBE2(ixgbe__irm__callback, int, count, 1970 int, ixgbe->intr_cnt); 1971 if (ixgbe_intr_adjust(ixgbe, cbaction, count) != 1972 DDI_SUCCESS) { 1973 ixgbe_error(ixgbe, 1974 "IRM CB: Failed to adjust interrupts"); 1975 goto cb_fail; 1976 } 1977 break; 1978 default: 1979 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported", 1980 cbaction); 1981 return (DDI_ENOTSUP); 1982 } 1983 return (DDI_SUCCESS); 1984 cb_fail: 1985 return (DDI_FAILURE); 1986 } 1987 1988 /* 1989 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request. 1990 */ 1991 static int 1992 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count) 1993 { 1994 int i, rc, actual; 1995 1996 if (count == 0) 1997 return (DDI_SUCCESS); 1998 1999 if ((cbaction == DDI_CB_INTR_ADD && 2000 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) || 2001 (cbaction == DDI_CB_INTR_REMOVE && 2002 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)) 2003 return (DDI_FAILURE); 2004 2005 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 2006 return (DDI_FAILURE); 2007 } 2008 2009 for (i = 0; i < ixgbe->num_rx_rings; i++) 2010 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL); 2011 for (i = 0; i < ixgbe->num_tx_rings; i++) 2012 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL); 2013 2014 mutex_enter(&ixgbe->gen_lock); 2015 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 2016 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST; 2017 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 2018 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 2019 2020 ixgbe_stop(ixgbe, B_FALSE); 2021 /* 2022 * Disable interrupts 2023 */ 2024 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 2025 rc = ixgbe_disable_intrs(ixgbe); 2026 ASSERT(rc == IXGBE_SUCCESS); 2027 } 2028 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR; 2029 2030 /* 2031 * Remove interrupt handlers 2032 */ 2033 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 2034 ixgbe_rem_intr_handlers(ixgbe); 2035 } 2036 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR; 2037 2038 /* 2039 * Clear vect_map 2040 */ 2041 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 2042 switch (cbaction) { 2043 case DDI_CB_INTR_ADD: 2044 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable, 2045 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual, 2046 DDI_INTR_ALLOC_NORMAL); 2047 if (rc != DDI_SUCCESS || actual != count) { 2048 ixgbe_log(ixgbe, "Adjust interrupts failed." 2049 "return: %d, irm cb size: %d, actual: %d", 2050 rc, count, actual); 2051 goto intr_adjust_fail; 2052 } 2053 ixgbe->intr_cnt += count; 2054 break; 2055 2056 case DDI_CB_INTR_REMOVE: 2057 for (i = ixgbe->intr_cnt - count; 2058 i < ixgbe->intr_cnt; i ++) { 2059 rc = ddi_intr_free(ixgbe->htable[i]); 2060 ixgbe->htable[i] = NULL; 2061 if (rc != DDI_SUCCESS) { 2062 ixgbe_log(ixgbe, "Adjust interrupts failed." 2063 "return: %d, irm cb size: %d, actual: %d", 2064 rc, count, actual); 2065 goto intr_adjust_fail; 2066 } 2067 } 2068 ixgbe->intr_cnt -= count; 2069 break; 2070 } 2071 2072 /* 2073 * Get priority for first vector, assume remaining are all the same 2074 */ 2075 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 2076 if (rc != DDI_SUCCESS) { 2077 ixgbe_log(ixgbe, 2078 "Get interrupt priority failed: %d", rc); 2079 goto intr_adjust_fail; 2080 } 2081 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 2082 if (rc != DDI_SUCCESS) { 2083 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc); 2084 goto intr_adjust_fail; 2085 } 2086 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 2087 2088 /* 2089 * Map rings to interrupt vectors 2090 */ 2091 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 2092 ixgbe_error(ixgbe, 2093 "IRM CB: Failed to map interrupts to vectors"); 2094 goto intr_adjust_fail; 2095 } 2096 2097 /* 2098 * Add interrupt handlers 2099 */ 2100 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 2101 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers"); 2102 goto intr_adjust_fail; 2103 } 2104 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 2105 2106 /* 2107 * Now that mutex locks are initialized, and the chip is also 2108 * initialized, enable interrupts. 2109 */ 2110 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 2111 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts"); 2112 goto intr_adjust_fail; 2113 } 2114 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 2115 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 2116 ixgbe_error(ixgbe, "IRM CB: Failed to start"); 2117 goto intr_adjust_fail; 2118 } 2119 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST; 2120 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 2121 ixgbe->ixgbe_state |= IXGBE_STARTED; 2122 mutex_exit(&ixgbe->gen_lock); 2123 2124 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2125 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, 2126 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]); 2127 } 2128 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2129 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, 2130 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]); 2131 } 2132 2133 /* Wakeup all Tx rings */ 2134 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2135 mac_tx_ring_update(ixgbe->mac_hdl, 2136 ixgbe->tx_rings[i].ring_handle); 2137 } 2138 2139 IXGBE_DEBUGLOG_3(ixgbe, 2140 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).", 2141 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max); 2142 return (DDI_SUCCESS); 2143 2144 intr_adjust_fail: 2145 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 2146 mutex_exit(&ixgbe->gen_lock); 2147 return (DDI_FAILURE); 2148 } 2149 2150 /* 2151 * ixgbe_intr_cb_register - Register interrupt callback function. 2152 */ 2153 static int 2154 ixgbe_intr_cb_register(ixgbe_t *ixgbe) 2155 { 2156 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc, 2157 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) { 2158 return (IXGBE_FAILURE); 2159 } 2160 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered."); 2161 return (IXGBE_SUCCESS); 2162 } 2163 2164 /* 2165 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 2166 */ 2167 static int 2168 ixgbe_alloc_rings(ixgbe_t *ixgbe) 2169 { 2170 /* 2171 * Allocate memory space for rx rings 2172 */ 2173 ixgbe->rx_rings = kmem_zalloc( 2174 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 2175 KM_NOSLEEP); 2176 2177 if (ixgbe->rx_rings == NULL) { 2178 return (IXGBE_FAILURE); 2179 } 2180 2181 /* 2182 * Allocate memory space for tx rings 2183 */ 2184 ixgbe->tx_rings = kmem_zalloc( 2185 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 2186 KM_NOSLEEP); 2187 2188 if (ixgbe->tx_rings == NULL) { 2189 kmem_free(ixgbe->rx_rings, 2190 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2191 ixgbe->rx_rings = NULL; 2192 return (IXGBE_FAILURE); 2193 } 2194 2195 /* 2196 * Allocate memory space for rx ring groups 2197 */ 2198 ixgbe->rx_groups = kmem_zalloc( 2199 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups, 2200 KM_NOSLEEP); 2201 2202 if (ixgbe->rx_groups == NULL) { 2203 kmem_free(ixgbe->rx_rings, 2204 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2205 kmem_free(ixgbe->tx_rings, 2206 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2207 ixgbe->rx_rings = NULL; 2208 ixgbe->tx_rings = NULL; 2209 return (IXGBE_FAILURE); 2210 } 2211 2212 return (IXGBE_SUCCESS); 2213 } 2214 2215 /* 2216 * ixgbe_free_rings - Free the memory space of rx/tx rings. 2217 */ 2218 static void 2219 ixgbe_free_rings(ixgbe_t *ixgbe) 2220 { 2221 if (ixgbe->rx_rings != NULL) { 2222 kmem_free(ixgbe->rx_rings, 2223 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2224 ixgbe->rx_rings = NULL; 2225 } 2226 2227 if (ixgbe->tx_rings != NULL) { 2228 kmem_free(ixgbe->tx_rings, 2229 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2230 ixgbe->tx_rings = NULL; 2231 } 2232 2233 if (ixgbe->rx_groups != NULL) { 2234 kmem_free(ixgbe->rx_groups, 2235 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); 2236 ixgbe->rx_groups = NULL; 2237 } 2238 } 2239 2240 static int 2241 ixgbe_alloc_rx_data(ixgbe_t *ixgbe) 2242 { 2243 ixgbe_rx_ring_t *rx_ring; 2244 int i; 2245 2246 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2247 rx_ring = &ixgbe->rx_rings[i]; 2248 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS) 2249 goto alloc_rx_rings_failure; 2250 } 2251 return (IXGBE_SUCCESS); 2252 2253 alloc_rx_rings_failure: 2254 ixgbe_free_rx_data(ixgbe); 2255 return (IXGBE_FAILURE); 2256 } 2257 2258 static void 2259 ixgbe_free_rx_data(ixgbe_t *ixgbe) 2260 { 2261 ixgbe_rx_ring_t *rx_ring; 2262 ixgbe_rx_data_t *rx_data; 2263 int i; 2264 2265 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2266 rx_ring = &ixgbe->rx_rings[i]; 2267 2268 mutex_enter(&ixgbe->rx_pending_lock); 2269 rx_data = rx_ring->rx_data; 2270 2271 if (rx_data != NULL) { 2272 rx_data->flag |= IXGBE_RX_STOPPED; 2273 2274 if (rx_data->rcb_pending == 0) { 2275 ixgbe_free_rx_ring_data(rx_data); 2276 rx_ring->rx_data = NULL; 2277 } 2278 } 2279 2280 mutex_exit(&ixgbe->rx_pending_lock); 2281 } 2282 } 2283 2284 /* 2285 * ixgbe_setup_rings - Setup rx/tx rings. 2286 */ 2287 static void 2288 ixgbe_setup_rings(ixgbe_t *ixgbe) 2289 { 2290 /* 2291 * Setup the rx/tx rings, including the following: 2292 * 2293 * 1. Setup the descriptor ring and the control block buffers; 2294 * 2. Initialize necessary registers for receive/transmit; 2295 * 3. Initialize software pointers/parameters for receive/transmit; 2296 */ 2297 ixgbe_setup_rx(ixgbe); 2298 2299 ixgbe_setup_tx(ixgbe); 2300 } 2301 2302 static void 2303 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 2304 { 2305 ixgbe_t *ixgbe = rx_ring->ixgbe; 2306 ixgbe_rx_data_t *rx_data = rx_ring->rx_data; 2307 struct ixgbe_hw *hw = &ixgbe->hw; 2308 rx_control_block_t *rcb; 2309 union ixgbe_adv_rx_desc *rbd; 2310 uint32_t size; 2311 uint32_t buf_low; 2312 uint32_t buf_high; 2313 uint32_t reg_val; 2314 int i; 2315 2316 ASSERT(mutex_owned(&rx_ring->rx_lock)); 2317 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2318 2319 for (i = 0; i < ixgbe->rx_ring_size; i++) { 2320 rcb = rx_data->work_list[i]; 2321 rbd = &rx_data->rbd_ring[i]; 2322 2323 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 2324 rbd->read.hdr_addr = NULL; 2325 } 2326 2327 /* 2328 * Initialize the length register 2329 */ 2330 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc); 2331 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size); 2332 2333 /* 2334 * Initialize the base address registers 2335 */ 2336 buf_low = (uint32_t)rx_data->rbd_area.dma_address; 2337 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); 2338 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high); 2339 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low); 2340 2341 /* 2342 * Setup head & tail pointers 2343 */ 2344 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index), 2345 rx_data->ring_size - 1); 2346 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0); 2347 2348 rx_data->rbd_next = 0; 2349 rx_data->lro_first = 0; 2350 2351 /* 2352 * Setup the Receive Descriptor Control Register (RXDCTL) 2353 * PTHRESH=32 descriptors (half the internal cache) 2354 * HTHRESH=0 descriptors (to minimize latency on fetch) 2355 * WTHRESH defaults to 1 (writeback each descriptor) 2356 */ 2357 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index)); 2358 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 2359 2360 /* Not a valid value for 82599, X540 or X550 */ 2361 if (hw->mac.type == ixgbe_mac_82598EB) { 2362 reg_val |= 0x0020; /* pthresh */ 2363 } 2364 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); 2365 2366 if (hw->mac.type == ixgbe_mac_82599EB || 2367 hw->mac.type == ixgbe_mac_X540 || 2368 hw->mac.type == ixgbe_mac_X550 || 2369 hw->mac.type == ixgbe_mac_X550EM_x) { 2370 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2371 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); 2372 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2373 } 2374 2375 /* 2376 * Setup the Split and Replication Receive Control Register. 2377 * Set the rx buffer size and the advanced descriptor type. 2378 */ 2379 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 2380 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2381 reg_val |= IXGBE_SRRCTL_DROP_EN; 2382 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); 2383 } 2384 2385 static void 2386 ixgbe_setup_rx(ixgbe_t *ixgbe) 2387 { 2388 ixgbe_rx_ring_t *rx_ring; 2389 struct ixgbe_hw *hw = &ixgbe->hw; 2390 uint32_t reg_val; 2391 uint32_t ring_mapping; 2392 uint32_t i, index; 2393 uint32_t psrtype_rss_bit; 2394 2395 /* 2396 * Ensure that Rx is disabled while setting up 2397 * the Rx unit and Rx descriptor ring(s) 2398 */ 2399 ixgbe_disable_rx(hw); 2400 2401 /* PSRTYPE must be configured for 82599 */ 2402 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ && 2403 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) { 2404 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2405 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2406 reg_val |= IXGBE_PSRTYPE_L2HDR; 2407 reg_val |= 0x80000000; 2408 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val); 2409 } else { 2410 if (ixgbe->num_rx_groups > 32) { 2411 psrtype_rss_bit = 0x20000000; 2412 } else { 2413 psrtype_rss_bit = 0x40000000; 2414 } 2415 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) { 2416 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2417 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2418 reg_val |= IXGBE_PSRTYPE_L2HDR; 2419 reg_val |= psrtype_rss_bit; 2420 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val); 2421 } 2422 } 2423 2424 /* 2425 * Set filter control in FCTRL to determine types of packets are passed 2426 * up to the driver. 2427 * - Pass broadcast packets. 2428 * - Do not pass flow control pause frames (82598-specific) 2429 */ 2430 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2431 reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */ 2432 if (hw->mac.type == ixgbe_mac_82598EB) { 2433 reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */ 2434 } 2435 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 2436 2437 /* 2438 * Hardware checksum settings 2439 */ 2440 if (ixgbe->rx_hcksum_enable) { 2441 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2442 reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 2443 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 2444 } 2445 2446 /* 2447 * Setup VMDq and RSS for multiple receive queues 2448 */ 2449 switch (ixgbe->classify_mode) { 2450 case IXGBE_CLASSIFY_RSS: 2451 /* 2452 * One group, only RSS is needed when more than 2453 * one ring enabled. 2454 */ 2455 ixgbe_setup_rss(ixgbe); 2456 break; 2457 2458 case IXGBE_CLASSIFY_VMDQ: 2459 /* 2460 * Multiple groups, each group has one ring, 2461 * only VMDq is needed. 2462 */ 2463 ixgbe_setup_vmdq(ixgbe); 2464 break; 2465 2466 case IXGBE_CLASSIFY_VMDQ_RSS: 2467 /* 2468 * Multiple groups and multiple rings, both 2469 * VMDq and RSS are needed. 2470 */ 2471 ixgbe_setup_vmdq_rss(ixgbe); 2472 break; 2473 2474 default: 2475 break; 2476 } 2477 2478 /* 2479 * Enable the receive unit. This must be done after filter 2480 * control is set in FCTRL. On 82598, we disable the descriptor monitor. 2481 * 82598 is the only adapter which defines this RXCTRL option. 2482 */ 2483 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2484 if (hw->mac.type == ixgbe_mac_82598EB) 2485 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */ 2486 reg_val |= IXGBE_RXCTRL_RXEN; 2487 (void) ixgbe_enable_rx_dma(hw, reg_val); 2488 2489 /* 2490 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 2491 */ 2492 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2493 rx_ring = &ixgbe->rx_rings[i]; 2494 ixgbe_setup_rx_ring(rx_ring); 2495 } 2496 2497 /* 2498 * Setup the per-ring statistics mapping. 2499 */ 2500 ring_mapping = 0; 2501 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2502 index = ixgbe->rx_rings[i].hw_index; 2503 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2)); 2504 ring_mapping |= (i & 0xF) << (8 * (index & 0x3)); 2505 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping); 2506 } 2507 2508 /* 2509 * The Max Frame Size in MHADD/MAXFRS will be internally increased 2510 * by four bytes if the packet has a VLAN field, so includes MTU, 2511 * ethernet header and frame check sequence. 2512 * Register is MAXFRS in 82599. 2513 */ 2514 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD); 2515 reg_val &= ~IXGBE_MHADD_MFS_MASK; 2516 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header) 2517 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 2518 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 2519 2520 /* 2521 * Setup Jumbo Frame enable bit 2522 */ 2523 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2524 if (ixgbe->default_mtu > ETHERMTU) 2525 reg_val |= IXGBE_HLREG0_JUMBOEN; 2526 else 2527 reg_val &= ~IXGBE_HLREG0_JUMBOEN; 2528 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2529 2530 /* 2531 * Setup RSC for multiple receive queues. 2532 */ 2533 if (ixgbe->lro_enable) { 2534 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2535 /* 2536 * Make sure rx_buf_size * MAXDESC not greater 2537 * than 65535. 2538 * Intel recommends 4 for MAXDESC field value. 2539 */ 2540 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i)); 2541 reg_val |= IXGBE_RSCCTL_RSCEN; 2542 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k) 2543 reg_val |= IXGBE_RSCCTL_MAXDESC_1; 2544 else 2545 reg_val |= IXGBE_RSCCTL_MAXDESC_4; 2546 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val); 2547 } 2548 2549 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU); 2550 reg_val |= IXGBE_RSCDBU_RSCACKDIS; 2551 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val); 2552 2553 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2554 reg_val |= IXGBE_RDRXCTL_RSCACKC; 2555 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX; 2556 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2557 2558 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2559 } 2560 } 2561 2562 static void 2563 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 2564 { 2565 ixgbe_t *ixgbe = tx_ring->ixgbe; 2566 struct ixgbe_hw *hw = &ixgbe->hw; 2567 uint32_t size; 2568 uint32_t buf_low; 2569 uint32_t buf_high; 2570 uint32_t reg_val; 2571 2572 ASSERT(mutex_owned(&tx_ring->tx_lock)); 2573 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2574 2575 /* 2576 * Initialize the length register 2577 */ 2578 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 2579 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 2580 2581 /* 2582 * Initialize the base address registers 2583 */ 2584 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 2585 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 2586 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 2587 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 2588 2589 /* 2590 * Setup head & tail pointers 2591 */ 2592 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 2593 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 2594 2595 /* 2596 * Setup head write-back 2597 */ 2598 if (ixgbe->tx_head_wb_enable) { 2599 /* 2600 * The memory of the head write-back is allocated using 2601 * the extra tbd beyond the tail of the tbd ring. 2602 */ 2603 tx_ring->tbd_head_wb = (uint32_t *) 2604 ((uintptr_t)tx_ring->tbd_area.address + size); 2605 *tx_ring->tbd_head_wb = 0; 2606 2607 buf_low = (uint32_t) 2608 (tx_ring->tbd_area.dma_address + size); 2609 buf_high = (uint32_t) 2610 ((tx_ring->tbd_area.dma_address + size) >> 32); 2611 2612 /* Set the head write-back enable bit */ 2613 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 2614 2615 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 2616 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 2617 2618 /* 2619 * Turn off relaxed ordering for head write back or it will 2620 * cause problems with the tx recycling 2621 */ 2622 2623 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ? 2624 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) : 2625 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index)); 2626 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 2627 if (hw->mac.type == ixgbe_mac_82598EB) { 2628 IXGBE_WRITE_REG(hw, 2629 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 2630 } else { 2631 IXGBE_WRITE_REG(hw, 2632 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val); 2633 } 2634 } else { 2635 tx_ring->tbd_head_wb = NULL; 2636 } 2637 2638 tx_ring->tbd_head = 0; 2639 tx_ring->tbd_tail = 0; 2640 tx_ring->tbd_free = tx_ring->ring_size; 2641 2642 if (ixgbe->tx_ring_init == B_TRUE) { 2643 tx_ring->tcb_head = 0; 2644 tx_ring->tcb_tail = 0; 2645 tx_ring->tcb_free = tx_ring->free_list_size; 2646 } 2647 2648 /* 2649 * Initialize the s/w context structure 2650 */ 2651 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); 2652 } 2653 2654 static void 2655 ixgbe_setup_tx(ixgbe_t *ixgbe) 2656 { 2657 struct ixgbe_hw *hw = &ixgbe->hw; 2658 ixgbe_tx_ring_t *tx_ring; 2659 uint32_t reg_val; 2660 uint32_t ring_mapping; 2661 int i; 2662 2663 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2664 tx_ring = &ixgbe->tx_rings[i]; 2665 ixgbe_setup_tx_ring(tx_ring); 2666 } 2667 2668 /* 2669 * Setup the per-ring statistics mapping. 2670 */ 2671 ring_mapping = 0; 2672 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2673 ring_mapping |= (i & 0xF) << (8 * (i & 0x3)); 2674 if ((i & 0x3) == 0x3) { 2675 switch (hw->mac.type) { 2676 case ixgbe_mac_82598EB: 2677 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 2678 ring_mapping); 2679 break; 2680 2681 case ixgbe_mac_82599EB: 2682 case ixgbe_mac_X540: 2683 case ixgbe_mac_X550: 2684 case ixgbe_mac_X550EM_x: 2685 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 2686 ring_mapping); 2687 break; 2688 2689 default: 2690 break; 2691 } 2692 2693 ring_mapping = 0; 2694 } 2695 } 2696 if (i & 0x3) { 2697 switch (hw->mac.type) { 2698 case ixgbe_mac_82598EB: 2699 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); 2700 break; 2701 2702 case ixgbe_mac_82599EB: 2703 case ixgbe_mac_X540: 2704 case ixgbe_mac_X550: 2705 case ixgbe_mac_X550EM_x: 2706 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); 2707 break; 2708 2709 default: 2710 break; 2711 } 2712 } 2713 2714 /* 2715 * Enable CRC appending and TX padding (for short tx frames) 2716 */ 2717 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2718 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 2719 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2720 2721 /* 2722 * enable DMA for 82599, X540 and X550 parts 2723 */ 2724 if (hw->mac.type == ixgbe_mac_82599EB || 2725 hw->mac.type == ixgbe_mac_X540 || 2726 hw->mac.type == ixgbe_mac_X550 || 2727 hw->mac.type == ixgbe_mac_X550EM_x) { 2728 /* DMATXCTL.TE must be set after all Tx config is complete */ 2729 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2730 reg_val |= IXGBE_DMATXCTL_TE; 2731 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); 2732 2733 /* Disable arbiter to set MTQC */ 2734 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2735 reg_val |= IXGBE_RTTDCS_ARBDIS; 2736 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2737 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2738 reg_val &= ~IXGBE_RTTDCS_ARBDIS; 2739 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2740 } 2741 2742 /* 2743 * Enabling tx queues .. 2744 * For 82599 must be done after DMATXCTL.TE is set 2745 */ 2746 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2747 tx_ring = &ixgbe->tx_rings[i]; 2748 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index)); 2749 reg_val |= IXGBE_TXDCTL_ENABLE; 2750 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 2751 } 2752 } 2753 2754 /* 2755 * ixgbe_setup_rss - Setup receive-side scaling feature. 2756 */ 2757 static void 2758 ixgbe_setup_rss(ixgbe_t *ixgbe) 2759 { 2760 struct ixgbe_hw *hw = &ixgbe->hw; 2761 uint32_t mrqc; 2762 2763 /* 2764 * Initialize RETA/ERETA table 2765 */ 2766 ixgbe_setup_rss_table(ixgbe); 2767 2768 /* 2769 * Enable RSS & perform hash on these packet types 2770 */ 2771 mrqc = IXGBE_MRQC_RSSEN | 2772 IXGBE_MRQC_RSS_FIELD_IPV4 | 2773 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2774 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2775 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2776 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2777 IXGBE_MRQC_RSS_FIELD_IPV6 | 2778 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2779 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2780 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2781 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2782 } 2783 2784 /* 2785 * ixgbe_setup_vmdq - Setup MAC classification feature 2786 */ 2787 static void 2788 ixgbe_setup_vmdq(ixgbe_t *ixgbe) 2789 { 2790 struct ixgbe_hw *hw = &ixgbe->hw; 2791 uint32_t vmdctl, i, vtctl; 2792 2793 /* 2794 * Setup the VMDq Control register, enable VMDq based on 2795 * packet destination MAC address: 2796 */ 2797 switch (hw->mac.type) { 2798 case ixgbe_mac_82598EB: 2799 /* 2800 * VMDq Enable = 1; 2801 * VMDq Filter = 0; MAC filtering 2802 * Default VMDq output index = 0; 2803 */ 2804 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2805 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2806 break; 2807 2808 case ixgbe_mac_82599EB: 2809 case ixgbe_mac_X540: 2810 case ixgbe_mac_X550: 2811 case ixgbe_mac_X550EM_x: 2812 /* 2813 * Enable VMDq-only. 2814 */ 2815 vmdctl = IXGBE_MRQC_VMDQEN; 2816 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl); 2817 2818 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2819 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2820 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2821 } 2822 2823 /* 2824 * Enable Virtualization and Replication. 2825 */ 2826 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2827 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2828 2829 /* 2830 * Enable receiving packets to all VFs 2831 */ 2832 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2833 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2834 break; 2835 2836 default: 2837 break; 2838 } 2839 } 2840 2841 /* 2842 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature. 2843 */ 2844 static void 2845 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) 2846 { 2847 struct ixgbe_hw *hw = &ixgbe->hw; 2848 uint32_t i, mrqc; 2849 uint32_t vtctl, vmdctl; 2850 2851 /* 2852 * Initialize RETA/ERETA table 2853 */ 2854 ixgbe_setup_rss_table(ixgbe); 2855 2856 /* 2857 * Enable and setup RSS and VMDq 2858 */ 2859 switch (hw->mac.type) { 2860 case ixgbe_mac_82598EB: 2861 /* 2862 * Enable RSS & Setup RSS Hash functions 2863 */ 2864 mrqc = IXGBE_MRQC_RSSEN | 2865 IXGBE_MRQC_RSS_FIELD_IPV4 | 2866 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2867 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2868 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2869 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2870 IXGBE_MRQC_RSS_FIELD_IPV6 | 2871 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2872 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2873 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2874 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2875 2876 /* 2877 * Enable and Setup VMDq 2878 * VMDq Filter = 0; MAC filtering 2879 * Default VMDq output index = 0; 2880 */ 2881 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2882 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2883 break; 2884 2885 case ixgbe_mac_82599EB: 2886 case ixgbe_mac_X540: 2887 case ixgbe_mac_X550: 2888 case ixgbe_mac_X550EM_x: 2889 /* 2890 * Enable RSS & Setup RSS Hash functions 2891 */ 2892 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 | 2893 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2894 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2895 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2896 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2897 IXGBE_MRQC_RSS_FIELD_IPV6 | 2898 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2899 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2900 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2901 2902 /* 2903 * Enable VMDq+RSS. 2904 */ 2905 if (ixgbe->num_rx_groups > 32) { 2906 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN; 2907 } else { 2908 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN; 2909 } 2910 2911 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2912 2913 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2914 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2915 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2916 } 2917 break; 2918 2919 default: 2920 break; 2921 2922 } 2923 2924 if (hw->mac.type == ixgbe_mac_82599EB || 2925 hw->mac.type == ixgbe_mac_X540 || 2926 hw->mac.type == ixgbe_mac_X550 || 2927 hw->mac.type == ixgbe_mac_X550EM_x) { 2928 /* 2929 * Enable Virtualization and Replication. 2930 */ 2931 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2932 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2933 2934 /* 2935 * Enable receiving packets to all VFs 2936 */ 2937 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2938 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2939 } 2940 } 2941 2942 /* 2943 * ixgbe_setup_rss_table - Setup RSS table 2944 */ 2945 static void 2946 ixgbe_setup_rss_table(ixgbe_t *ixgbe) 2947 { 2948 struct ixgbe_hw *hw = &ixgbe->hw; 2949 uint32_t i, j; 2950 uint32_t random; 2951 uint32_t reta; 2952 uint32_t ring_per_group; 2953 uint32_t ring; 2954 uint32_t table_size; 2955 uint32_t index_mult; 2956 uint32_t rxcsum; 2957 2958 /* 2959 * Set multiplier for RETA setup and table size based on MAC type. 2960 * RETA table sizes vary by model: 2961 * 2962 * 82598, 82599, X540: 128 table entries. 2963 * X550: 512 table entries. 2964 */ 2965 index_mult = 0x1; 2966 table_size = 128; 2967 switch (ixgbe->hw.mac.type) { 2968 case ixgbe_mac_82598EB: 2969 index_mult = 0x11; 2970 break; 2971 case ixgbe_mac_X550: 2972 case ixgbe_mac_X550EM_x: 2973 table_size = 512; 2974 break; 2975 default: 2976 break; 2977 } 2978 2979 /* 2980 * Fill out RSS redirection table. The configuation of the indices is 2981 * hardware-dependent. 2982 * 2983 * 82598: 8 bits wide containing two 4 bit RSS indices 2984 * 82599, X540: 8 bits wide containing one 4 bit RSS index 2985 * X550: 8 bits wide containing one 6 bit RSS index 2986 */ 2987 reta = 0; 2988 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2989 2990 for (i = 0, j = 0; i < table_size; i++, j++) { 2991 if (j == ring_per_group) j = 0; 2992 2993 /* 2994 * The low 8 bits are for hash value (n+0); 2995 * The next 8 bits are for hash value (n+1), etc. 2996 */ 2997 ring = (j * index_mult); 2998 reta = reta >> 8; 2999 reta = reta | (((uint32_t)ring) << 24); 3000 3001 if ((i & 3) == 3) { 3002 /* 3003 * The first 128 table entries are programmed into the 3004 * RETA register, with any beyond that (eg; on X550) 3005 * into ERETA. 3006 */ 3007 if (i < 128) 3008 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3009 else 3010 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 3011 reta); 3012 reta = 0; 3013 } 3014 } 3015 3016 /* 3017 * Fill out hash function seeds with a random constant 3018 */ 3019 for (i = 0; i < 10; i++) { 3020 (void) random_get_pseudo_bytes((uint8_t *)&random, 3021 sizeof (uint32_t)); 3022 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 3023 } 3024 3025 /* 3026 * Disable Packet Checksum to enable RSS for multiple receive queues. 3027 * It is an adapter hardware limitation that Packet Checksum is 3028 * mutually exclusive with RSS. 3029 */ 3030 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3031 rxcsum |= IXGBE_RXCSUM_PCSD; 3032 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 3033 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3034 } 3035 3036 /* 3037 * ixgbe_init_unicst - Initialize the unicast addresses. 3038 */ 3039 static void 3040 ixgbe_init_unicst(ixgbe_t *ixgbe) 3041 { 3042 struct ixgbe_hw *hw = &ixgbe->hw; 3043 uint8_t *mac_addr; 3044 int slot; 3045 /* 3046 * Here we should consider two situations: 3047 * 3048 * 1. Chipset is initialized at the first time, 3049 * Clear all the multiple unicast addresses. 3050 * 3051 * 2. Chipset is reset 3052 * Recover the multiple unicast addresses from the 3053 * software data structure to the RAR registers. 3054 */ 3055 if (!ixgbe->unicst_init) { 3056 /* 3057 * Initialize the multiple unicast addresses 3058 */ 3059 ixgbe->unicst_total = hw->mac.num_rar_entries; 3060 ixgbe->unicst_avail = ixgbe->unicst_total; 3061 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3062 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3063 bzero(mac_addr, ETHERADDRL); 3064 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL); 3065 ixgbe->unicst_addr[slot].mac.set = 0; 3066 } 3067 ixgbe->unicst_init = B_TRUE; 3068 } else { 3069 /* Re-configure the RAR registers */ 3070 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3071 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3072 if (ixgbe->unicst_addr[slot].mac.set == 1) { 3073 (void) ixgbe_set_rar(hw, slot, mac_addr, 3074 ixgbe->unicst_addr[slot].mac.group_index, 3075 IXGBE_RAH_AV); 3076 } else { 3077 bzero(mac_addr, ETHERADDRL); 3078 (void) ixgbe_set_rar(hw, slot, mac_addr, 3079 NULL, NULL); 3080 } 3081 } 3082 } 3083 } 3084 3085 /* 3086 * ixgbe_unicst_find - Find the slot for the specified unicast address 3087 */ 3088 int 3089 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr) 3090 { 3091 int slot; 3092 3093 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3094 3095 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3096 if (bcmp(ixgbe->unicst_addr[slot].mac.addr, 3097 mac_addr, ETHERADDRL) == 0) 3098 return (slot); 3099 } 3100 3101 return (-1); 3102 } 3103 3104 /* 3105 * ixgbe_multicst_add - Add a multicst address. 3106 */ 3107 int 3108 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3109 { 3110 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3111 3112 if ((multiaddr[0] & 01) == 0) { 3113 return (EINVAL); 3114 } 3115 3116 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 3117 return (ENOENT); 3118 } 3119 3120 bcopy(multiaddr, 3121 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 3122 ixgbe->mcast_count++; 3123 3124 /* 3125 * Update the multicast table in the hardware 3126 */ 3127 ixgbe_setup_multicst(ixgbe); 3128 3129 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3130 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3131 return (EIO); 3132 } 3133 3134 return (0); 3135 } 3136 3137 /* 3138 * ixgbe_multicst_remove - Remove a multicst address. 3139 */ 3140 int 3141 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3142 { 3143 int i; 3144 3145 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3146 3147 for (i = 0; i < ixgbe->mcast_count; i++) { 3148 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 3149 ETHERADDRL) == 0) { 3150 for (i++; i < ixgbe->mcast_count; i++) { 3151 ixgbe->mcast_table[i - 1] = 3152 ixgbe->mcast_table[i]; 3153 } 3154 ixgbe->mcast_count--; 3155 break; 3156 } 3157 } 3158 3159 /* 3160 * Update the multicast table in the hardware 3161 */ 3162 ixgbe_setup_multicst(ixgbe); 3163 3164 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3165 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3166 return (EIO); 3167 } 3168 3169 return (0); 3170 } 3171 3172 /* 3173 * ixgbe_setup_multicast - Setup multicast data structures. 3174 * 3175 * This routine initializes all of the multicast related structures 3176 * and save them in the hardware registers. 3177 */ 3178 static void 3179 ixgbe_setup_multicst(ixgbe_t *ixgbe) 3180 { 3181 uint8_t *mc_addr_list; 3182 uint32_t mc_addr_count; 3183 struct ixgbe_hw *hw = &ixgbe->hw; 3184 3185 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3186 3187 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 3188 3189 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 3190 mc_addr_count = ixgbe->mcast_count; 3191 3192 /* 3193 * Update the multicast addresses to the MTA registers 3194 */ 3195 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 3196 ixgbe_mc_table_itr, TRUE); 3197 } 3198 3199 /* 3200 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode). 3201 * 3202 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers. 3203 * Different chipsets may have different allowed configuration of vmdq and rss. 3204 */ 3205 static void 3206 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe) 3207 { 3208 struct ixgbe_hw *hw = &ixgbe->hw; 3209 uint32_t ring_per_group; 3210 3211 switch (hw->mac.type) { 3212 case ixgbe_mac_82598EB: 3213 /* 3214 * 82598 supports the following combination: 3215 * vmdq no. x rss no. 3216 * [5..16] x 1 3217 * [1..4] x [1..16] 3218 * However 8 rss queue per pool (vmdq) is sufficient for 3219 * most cases. 3220 */ 3221 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3222 if (ixgbe->num_rx_groups > 4) { 3223 ixgbe->num_rx_rings = ixgbe->num_rx_groups; 3224 } else { 3225 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3226 min(8, ring_per_group); 3227 } 3228 3229 break; 3230 3231 case ixgbe_mac_82599EB: 3232 case ixgbe_mac_X540: 3233 case ixgbe_mac_X550: 3234 case ixgbe_mac_X550EM_x: 3235 /* 3236 * 82599 supports the following combination: 3237 * vmdq no. x rss no. 3238 * [33..64] x [1..2] 3239 * [2..32] x [1..4] 3240 * 1 x [1..16] 3241 * However 8 rss queue per pool (vmdq) is sufficient for 3242 * most cases. 3243 * 3244 * For now, treat X540 and X550 like the 82599. 3245 */ 3246 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3247 if (ixgbe->num_rx_groups == 1) { 3248 ixgbe->num_rx_rings = min(8, ring_per_group); 3249 } else if (ixgbe->num_rx_groups <= 32) { 3250 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3251 min(4, ring_per_group); 3252 } else if (ixgbe->num_rx_groups <= 64) { 3253 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3254 min(2, ring_per_group); 3255 } 3256 break; 3257 3258 default: 3259 break; 3260 } 3261 3262 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3263 3264 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) { 3265 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3266 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) { 3267 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ; 3268 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) { 3269 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS; 3270 } else { 3271 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS; 3272 } 3273 3274 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d", 3275 ixgbe->num_rx_groups, ixgbe->num_rx_rings); 3276 } 3277 3278 /* 3279 * ixgbe_get_conf - Get driver configurations set in driver.conf. 3280 * 3281 * This routine gets user-configured values out of the configuration 3282 * file ixgbe.conf. 3283 * 3284 * For each configurable value, there is a minimum, a maximum, and a 3285 * default. 3286 * If user does not configure a value, use the default. 3287 * If user configures below the minimum, use the minumum. 3288 * If user configures above the maximum, use the maxumum. 3289 */ 3290 static void 3291 ixgbe_get_conf(ixgbe_t *ixgbe) 3292 { 3293 struct ixgbe_hw *hw = &ixgbe->hw; 3294 uint32_t flow_control; 3295 3296 /* 3297 * ixgbe driver supports the following user configurations: 3298 * 3299 * Jumbo frame configuration: 3300 * default_mtu 3301 * 3302 * Ethernet flow control configuration: 3303 * flow_control 3304 * 3305 * Multiple rings configurations: 3306 * tx_queue_number 3307 * tx_ring_size 3308 * rx_queue_number 3309 * rx_ring_size 3310 * 3311 * Call ixgbe_get_prop() to get the value for a specific 3312 * configuration parameter. 3313 */ 3314 3315 /* 3316 * Jumbo frame configuration - max_frame_size controls host buffer 3317 * allocation, so includes MTU, ethernet header, vlan tag and 3318 * frame check sequence. 3319 */ 3320 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 3321 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU); 3322 3323 ixgbe->max_frame_size = ixgbe->default_mtu + 3324 sizeof (struct ether_vlan_header) + ETHERFCSL; 3325 3326 /* 3327 * Ethernet flow control configuration 3328 */ 3329 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 3330 ixgbe_fc_none, 3, ixgbe_fc_none); 3331 if (flow_control == 3) 3332 flow_control = ixgbe_fc_default; 3333 3334 /* 3335 * fc.requested mode is what the user requests. After autoneg, 3336 * fc.current_mode will be the flow_control mode that was negotiated. 3337 */ 3338 hw->fc.requested_mode = flow_control; 3339 3340 /* 3341 * Multiple rings configurations 3342 */ 3343 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 3344 ixgbe->capab->min_tx_que_num, 3345 ixgbe->capab->max_tx_que_num, 3346 ixgbe->capab->def_tx_que_num); 3347 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 3348 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 3349 3350 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 3351 ixgbe->capab->min_rx_que_num, 3352 ixgbe->capab->max_rx_que_num, 3353 ixgbe->capab->def_rx_que_num); 3354 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 3355 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 3356 3357 /* 3358 * Multiple groups configuration 3359 */ 3360 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM, 3361 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num, 3362 ixgbe->capab->def_rx_grp_num); 3363 3364 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE, 3365 0, 1, DEFAULT_MR_ENABLE); 3366 3367 if (ixgbe->mr_enable == B_FALSE) { 3368 ixgbe->num_tx_rings = 1; 3369 ixgbe->num_rx_rings = 1; 3370 ixgbe->num_rx_groups = 1; 3371 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3372 } else { 3373 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3374 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1); 3375 /* 3376 * The combination of num_rx_rings and num_rx_groups 3377 * may be not supported by h/w. We need to adjust 3378 * them to appropriate values. 3379 */ 3380 ixgbe_setup_vmdq_rss_conf(ixgbe); 3381 } 3382 3383 /* 3384 * Tunable used to force an interrupt type. The only use is 3385 * for testing of the lesser interrupt types. 3386 * 0 = don't force interrupt type 3387 * 1 = force interrupt type MSI-X 3388 * 2 = force interrupt type MSI 3389 * 3 = force interrupt type Legacy 3390 */ 3391 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 3392 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 3393 3394 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 3395 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 3396 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 3397 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 3398 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 3399 0, 1, DEFAULT_LSO_ENABLE); 3400 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE, 3401 0, 1, DEFAULT_LRO_ENABLE); 3402 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 3403 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 3404 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe, 3405 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE); 3406 3407 /* Head Write Back not recommended for 82599, X540 and X550 */ 3408 if (hw->mac.type == ixgbe_mac_82599EB || 3409 hw->mac.type == ixgbe_mac_X540 || 3410 hw->mac.type == ixgbe_mac_X550 || 3411 hw->mac.type == ixgbe_mac_X550EM_x) { 3412 ixgbe->tx_head_wb_enable = B_FALSE; 3413 } 3414 3415 /* 3416 * ixgbe LSO needs the tx h/w checksum support. 3417 * LSO will be disabled if tx h/w checksum is not 3418 * enabled. 3419 */ 3420 if (ixgbe->tx_hcksum_enable == B_FALSE) { 3421 ixgbe->lso_enable = B_FALSE; 3422 } 3423 3424 /* 3425 * ixgbe LRO needs the rx h/w checksum support. 3426 * LRO will be disabled if rx h/w checksum is not 3427 * enabled. 3428 */ 3429 if (ixgbe->rx_hcksum_enable == B_FALSE) { 3430 ixgbe->lro_enable = B_FALSE; 3431 } 3432 3433 /* 3434 * ixgbe LRO only supported by 82599, X540 and X550 3435 */ 3436 if (hw->mac.type == ixgbe_mac_82598EB) { 3437 ixgbe->lro_enable = B_FALSE; 3438 } 3439 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 3440 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 3441 DEFAULT_TX_COPY_THRESHOLD); 3442 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 3443 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 3444 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 3445 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 3446 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 3447 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 3448 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 3449 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 3450 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 3451 3452 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 3453 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 3454 DEFAULT_RX_COPY_THRESHOLD); 3455 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 3456 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 3457 DEFAULT_RX_LIMIT_PER_INTR); 3458 3459 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 3460 ixgbe->capab->min_intr_throttle, 3461 ixgbe->capab->max_intr_throttle, 3462 ixgbe->capab->def_intr_throttle); 3463 /* 3464 * 82599, X540 and X550 require the interrupt throttling rate is 3465 * a multiple of 8. This is enforced by the register definiton. 3466 */ 3467 if (hw->mac.type == ixgbe_mac_82599EB || 3468 hw->mac.type == ixgbe_mac_X540 || 3469 hw->mac.type == ixgbe_mac_X550 || 3470 hw->mac.type == ixgbe_mac_X550EM_x) 3471 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; 3472 3473 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe, 3474 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP); 3475 } 3476 3477 static void 3478 ixgbe_init_params(ixgbe_t *ixgbe) 3479 { 3480 struct ixgbe_hw *hw = &ixgbe->hw; 3481 ixgbe_link_speed speeds_supported = 0; 3482 boolean_t negotiate; 3483 3484 /* 3485 * Get a list of speeds the adapter supports. If the hw struct hasn't 3486 * been populated with this information yet, retrieve it from the 3487 * adapter and save it to our own variable. 3488 * 3489 * On certain adapters, such as ones which use SFPs, the contents of 3490 * hw->phy.speeds_supported (and hw->phy.autoneg_advertised) are not 3491 * updated, so we must rely on calling ixgbe_get_link_capabilities() 3492 * in order to ascertain the speeds which we are capable of supporting, 3493 * and in the case of SFP-equipped adapters, which speed we are 3494 * advertising. If ixgbe_get_link_capabilities() fails for some reason, 3495 * we'll go with a default list of speeds as a last resort. 3496 */ 3497 speeds_supported = hw->phy.speeds_supported; 3498 3499 if (speeds_supported == 0) { 3500 if (ixgbe_get_link_capabilities(hw, &speeds_supported, 3501 &negotiate) != IXGBE_SUCCESS) { 3502 if (hw->mac.type == ixgbe_mac_82598EB) { 3503 speeds_supported = 3504 IXGBE_LINK_SPEED_82598_AUTONEG; 3505 } else { 3506 speeds_supported = 3507 IXGBE_LINK_SPEED_82599_AUTONEG; 3508 } 3509 } 3510 } 3511 ixgbe->speeds_supported = speeds_supported; 3512 3513 /* 3514 * By default, all supported speeds are enabled and advertised. 3515 */ 3516 if (speeds_supported & IXGBE_LINK_SPEED_10GB_FULL) { 3517 ixgbe->param_en_10000fdx_cap = 1; 3518 ixgbe->param_adv_10000fdx_cap = 1; 3519 } else { 3520 ixgbe->param_en_10000fdx_cap = 0; 3521 ixgbe->param_adv_10000fdx_cap = 0; 3522 } 3523 3524 if (speeds_supported & IXGBE_LINK_SPEED_5GB_FULL) { 3525 ixgbe->param_en_5000fdx_cap = 1; 3526 ixgbe->param_adv_5000fdx_cap = 1; 3527 } else { 3528 ixgbe->param_en_5000fdx_cap = 0; 3529 ixgbe->param_adv_5000fdx_cap = 0; 3530 } 3531 3532 if (speeds_supported & IXGBE_LINK_SPEED_2_5GB_FULL) { 3533 ixgbe->param_en_2500fdx_cap = 1; 3534 ixgbe->param_adv_2500fdx_cap = 1; 3535 } else { 3536 ixgbe->param_en_2500fdx_cap = 0; 3537 ixgbe->param_adv_2500fdx_cap = 0; 3538 } 3539 3540 if (speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) { 3541 ixgbe->param_en_1000fdx_cap = 1; 3542 ixgbe->param_adv_1000fdx_cap = 1; 3543 } else { 3544 ixgbe->param_en_1000fdx_cap = 0; 3545 ixgbe->param_adv_1000fdx_cap = 0; 3546 } 3547 3548 if (speeds_supported & IXGBE_LINK_SPEED_100_FULL) { 3549 ixgbe->param_en_100fdx_cap = 1; 3550 ixgbe->param_adv_100fdx_cap = 1; 3551 } else { 3552 ixgbe->param_en_100fdx_cap = 0; 3553 ixgbe->param_adv_100fdx_cap = 0; 3554 } 3555 3556 ixgbe->param_pause_cap = 1; 3557 ixgbe->param_asym_pause_cap = 1; 3558 ixgbe->param_rem_fault = 0; 3559 3560 ixgbe->param_adv_autoneg_cap = 1; 3561 ixgbe->param_adv_pause_cap = 1; 3562 ixgbe->param_adv_asym_pause_cap = 1; 3563 ixgbe->param_adv_rem_fault = 0; 3564 3565 ixgbe->param_lp_10000fdx_cap = 0; 3566 ixgbe->param_lp_5000fdx_cap = 0; 3567 ixgbe->param_lp_2500fdx_cap = 0; 3568 ixgbe->param_lp_1000fdx_cap = 0; 3569 ixgbe->param_lp_100fdx_cap = 0; 3570 ixgbe->param_lp_autoneg_cap = 0; 3571 ixgbe->param_lp_pause_cap = 0; 3572 ixgbe->param_lp_asym_pause_cap = 0; 3573 ixgbe->param_lp_rem_fault = 0; 3574 } 3575 3576 /* 3577 * ixgbe_get_prop - Get a property value out of the configuration file 3578 * ixgbe.conf. 3579 * 3580 * Caller provides the name of the property, a default value, a minimum 3581 * value, and a maximum value. 3582 * 3583 * Return configured value of the property, with default, minimum and 3584 * maximum properly applied. 3585 */ 3586 static int 3587 ixgbe_get_prop(ixgbe_t *ixgbe, 3588 char *propname, /* name of the property */ 3589 int minval, /* minimum acceptable value */ 3590 int maxval, /* maximim acceptable value */ 3591 int defval) /* default value */ 3592 { 3593 int value; 3594 3595 /* 3596 * Call ddi_prop_get_int() to read the conf settings 3597 */ 3598 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 3599 DDI_PROP_DONTPASS, propname, defval); 3600 if (value > maxval) 3601 value = maxval; 3602 3603 if (value < minval) 3604 value = minval; 3605 3606 return (value); 3607 } 3608 3609 /* 3610 * ixgbe_driver_setup_link - Using the link properties to setup the link. 3611 */ 3612 int 3613 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 3614 { 3615 struct ixgbe_hw *hw = &ixgbe->hw; 3616 ixgbe_link_speed advertised = 0; 3617 3618 /* 3619 * Assemble a list of enabled speeds to auto-negotiate with. 3620 */ 3621 if (ixgbe->param_en_10000fdx_cap == 1) 3622 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 3623 3624 if (ixgbe->param_en_5000fdx_cap == 1) 3625 advertised |= IXGBE_LINK_SPEED_5GB_FULL; 3626 3627 if (ixgbe->param_en_2500fdx_cap == 1) 3628 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; 3629 3630 if (ixgbe->param_en_1000fdx_cap == 1) 3631 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 3632 3633 if (ixgbe->param_en_100fdx_cap == 1) 3634 advertised |= IXGBE_LINK_SPEED_100_FULL; 3635 3636 /* 3637 * As a last resort, autoneg with a default list of speeds. 3638 */ 3639 if (ixgbe->param_adv_autoneg_cap == 1 && advertised == 0) { 3640 ixgbe_notice(ixgbe, "Invalid link settings. Setting link " 3641 "to autonegotiate with full capabilities."); 3642 3643 if (hw->mac.type == ixgbe_mac_82598EB) 3644 advertised = IXGBE_LINK_SPEED_82598_AUTONEG; 3645 else 3646 advertised = IXGBE_LINK_SPEED_82599_AUTONEG; 3647 } 3648 3649 if (setup_hw) { 3650 if (ixgbe_setup_link(&ixgbe->hw, advertised, 3651 ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) { 3652 ixgbe_notice(ixgbe, "Setup link failed on this " 3653 "device."); 3654 return (IXGBE_FAILURE); 3655 } 3656 } 3657 3658 return (IXGBE_SUCCESS); 3659 } 3660 3661 /* 3662 * ixgbe_driver_link_check - Link status processing. 3663 * 3664 * This function can be called in both kernel context and interrupt context 3665 */ 3666 static void 3667 ixgbe_driver_link_check(ixgbe_t *ixgbe) 3668 { 3669 struct ixgbe_hw *hw = &ixgbe->hw; 3670 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 3671 boolean_t link_up = B_FALSE; 3672 boolean_t link_changed = B_FALSE; 3673 3674 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3675 3676 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 3677 if (link_up) { 3678 ixgbe->link_check_complete = B_TRUE; 3679 3680 /* Link is up, enable flow control settings */ 3681 (void) ixgbe_fc_enable(hw); 3682 3683 /* 3684 * The Link is up, check whether it was marked as down earlier 3685 */ 3686 if (ixgbe->link_state != LINK_STATE_UP) { 3687 switch (speed) { 3688 case IXGBE_LINK_SPEED_10GB_FULL: 3689 ixgbe->link_speed = SPEED_10GB; 3690 break; 3691 case IXGBE_LINK_SPEED_5GB_FULL: 3692 ixgbe->link_speed = SPEED_5GB; 3693 break; 3694 case IXGBE_LINK_SPEED_2_5GB_FULL: 3695 ixgbe->link_speed = SPEED_2_5GB; 3696 break; 3697 case IXGBE_LINK_SPEED_1GB_FULL: 3698 ixgbe->link_speed = SPEED_1GB; 3699 break; 3700 case IXGBE_LINK_SPEED_100_FULL: 3701 ixgbe->link_speed = SPEED_100; 3702 } 3703 ixgbe->link_duplex = LINK_DUPLEX_FULL; 3704 ixgbe->link_state = LINK_STATE_UP; 3705 link_changed = B_TRUE; 3706 } 3707 } else { 3708 if (ixgbe->link_check_complete == B_TRUE || 3709 (ixgbe->link_check_complete == B_FALSE && 3710 gethrtime() >= ixgbe->link_check_hrtime)) { 3711 /* 3712 * The link is really down 3713 */ 3714 ixgbe->link_check_complete = B_TRUE; 3715 3716 if (ixgbe->link_state != LINK_STATE_DOWN) { 3717 ixgbe->link_speed = 0; 3718 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN; 3719 ixgbe->link_state = LINK_STATE_DOWN; 3720 link_changed = B_TRUE; 3721 } 3722 } 3723 } 3724 3725 /* 3726 * If we are in an interrupt context, need to re-enable the 3727 * interrupt, which was automasked 3728 */ 3729 if (servicing_interrupt() != 0) { 3730 ixgbe->eims |= IXGBE_EICR_LSC; 3731 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3732 } 3733 3734 if (link_changed) { 3735 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3736 } 3737 } 3738 3739 /* 3740 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599. 3741 */ 3742 static void 3743 ixgbe_sfp_check(void *arg) 3744 { 3745 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3746 uint32_t eicr = ixgbe->eicr; 3747 struct ixgbe_hw *hw = &ixgbe->hw; 3748 3749 mutex_enter(&ixgbe->gen_lock); 3750 (void) hw->phy.ops.identify_sfp(hw); 3751 if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) { 3752 /* clear the interrupt */ 3753 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3754 3755 /* if link up, do multispeed fiber setup */ 3756 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3757 B_TRUE); 3758 ixgbe_driver_link_check(ixgbe); 3759 ixgbe_get_hw_state(ixgbe); 3760 } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) { 3761 /* clear the interrupt */ 3762 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw)); 3763 3764 /* if link up, do sfp module setup */ 3765 (void) hw->mac.ops.setup_sfp(hw); 3766 3767 /* do multispeed fiber setup */ 3768 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3769 B_TRUE); 3770 ixgbe_driver_link_check(ixgbe); 3771 ixgbe_get_hw_state(ixgbe); 3772 } 3773 mutex_exit(&ixgbe->gen_lock); 3774 3775 /* 3776 * We need to fully re-check the link later. 3777 */ 3778 ixgbe->link_check_complete = B_FALSE; 3779 ixgbe->link_check_hrtime = gethrtime() + 3780 (IXGBE_LINK_UP_TIME * 100000000ULL); 3781 } 3782 3783 /* 3784 * ixgbe_overtemp_check - overtemp module processing done in taskq 3785 * 3786 * This routine will only be called on adapters with temperature sensor. 3787 * The indication of over-temperature can be either SDP0 interrupt or the link 3788 * status change interrupt. 3789 */ 3790 static void 3791 ixgbe_overtemp_check(void *arg) 3792 { 3793 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3794 struct ixgbe_hw *hw = &ixgbe->hw; 3795 uint32_t eicr = ixgbe->eicr; 3796 ixgbe_link_speed speed; 3797 boolean_t link_up; 3798 3799 mutex_enter(&ixgbe->gen_lock); 3800 3801 /* make sure we know current state of link */ 3802 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 3803 3804 /* check over-temp condition */ 3805 if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) || 3806 (eicr & IXGBE_EICR_LSC)) { 3807 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) { 3808 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3809 3810 /* 3811 * Disable the adapter interrupts 3812 */ 3813 ixgbe_disable_adapter_interrupts(ixgbe); 3814 3815 /* 3816 * Disable Rx/Tx units 3817 */ 3818 (void) ixgbe_stop_adapter(hw); 3819 3820 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3821 ixgbe_error(ixgbe, 3822 "Problem: Network adapter has been stopped " 3823 "because it has overheated"); 3824 ixgbe_error(ixgbe, 3825 "Action: Restart the computer. " 3826 "If the problem persists, power off the system " 3827 "and replace the adapter"); 3828 } 3829 } 3830 3831 /* write to clear the interrupt */ 3832 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 3833 3834 mutex_exit(&ixgbe->gen_lock); 3835 } 3836 3837 /* 3838 * ixgbe_phy_check - taskq to process interrupts from an external PHY 3839 * 3840 * This routine will only be called on adapters with external PHYs 3841 * (such as X550) that may be trying to raise our attention to some event. 3842 * Currently, this is limited to claiming PHY overtemperature and link status 3843 * change (LSC) events, however this may expand to include other things in 3844 * future adapters. 3845 */ 3846 static void 3847 ixgbe_phy_check(void *arg) 3848 { 3849 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3850 struct ixgbe_hw *hw = &ixgbe->hw; 3851 int rv; 3852 3853 mutex_enter(&ixgbe->gen_lock); 3854 3855 /* 3856 * X550 baseT PHY overtemp and LSC events are handled here. 3857 * 3858 * If an overtemp event occurs, it will be reflected in the 3859 * return value of phy.ops.handle_lasi() and the common code will 3860 * automatically power off the baseT PHY. This is our cue to trigger 3861 * an FMA event. 3862 * 3863 * If a link status change event occurs, phy.ops.handle_lasi() will 3864 * automatically initiate a link setup between the integrated KR PHY 3865 * and the external X557 PHY to ensure that the link speed between 3866 * them matches the link speed of the baseT link. 3867 */ 3868 rv = ixgbe_handle_lasi(hw); 3869 3870 if (rv == IXGBE_ERR_OVERTEMP) { 3871 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3872 3873 /* 3874 * Disable the adapter interrupts 3875 */ 3876 ixgbe_disable_adapter_interrupts(ixgbe); 3877 3878 /* 3879 * Disable Rx/Tx units 3880 */ 3881 (void) ixgbe_stop_adapter(hw); 3882 3883 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3884 ixgbe_error(ixgbe, 3885 "Problem: Network adapter has been stopped due to a " 3886 "overtemperature event being detected."); 3887 ixgbe_error(ixgbe, 3888 "Action: Shut down or restart the computer. If the issue " 3889 "persists, please take action in accordance with the " 3890 "recommendations from your system vendor."); 3891 } 3892 3893 mutex_exit(&ixgbe->gen_lock); 3894 } 3895 3896 /* 3897 * ixgbe_link_timer - timer for link status detection 3898 */ 3899 static void 3900 ixgbe_link_timer(void *arg) 3901 { 3902 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3903 3904 mutex_enter(&ixgbe->gen_lock); 3905 ixgbe_driver_link_check(ixgbe); 3906 mutex_exit(&ixgbe->gen_lock); 3907 } 3908 3909 /* 3910 * ixgbe_local_timer - Driver watchdog function. 3911 * 3912 * This function will handle the transmit stall check and other routines. 3913 */ 3914 static void 3915 ixgbe_local_timer(void *arg) 3916 { 3917 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3918 3919 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP) 3920 goto out; 3921 3922 if (ixgbe->ixgbe_state & IXGBE_ERROR) { 3923 ixgbe->reset_count++; 3924 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3925 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3926 goto out; 3927 } 3928 3929 if (ixgbe_stall_check(ixgbe)) { 3930 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL); 3931 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3932 3933 ixgbe->reset_count++; 3934 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3935 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3936 } 3937 3938 out: 3939 ixgbe_restart_watchdog_timer(ixgbe); 3940 } 3941 3942 /* 3943 * ixgbe_stall_check - Check for transmit stall. 3944 * 3945 * This function checks if the adapter is stalled (in transmit). 3946 * 3947 * It is called each time the watchdog timeout is invoked. 3948 * If the transmit descriptor reclaim continuously fails, 3949 * the watchdog value will increment by 1. If the watchdog 3950 * value exceeds the threshold, the ixgbe is assumed to 3951 * have stalled and need to be reset. 3952 */ 3953 static boolean_t 3954 ixgbe_stall_check(ixgbe_t *ixgbe) 3955 { 3956 ixgbe_tx_ring_t *tx_ring; 3957 boolean_t result; 3958 int i; 3959 3960 if (ixgbe->link_state != LINK_STATE_UP) 3961 return (B_FALSE); 3962 3963 /* 3964 * If any tx ring is stalled, we'll reset the chipset 3965 */ 3966 result = B_FALSE; 3967 for (i = 0; i < ixgbe->num_tx_rings; i++) { 3968 tx_ring = &ixgbe->tx_rings[i]; 3969 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) { 3970 tx_ring->tx_recycle(tx_ring); 3971 } 3972 3973 if (tx_ring->recycle_fail > 0) 3974 tx_ring->stall_watchdog++; 3975 else 3976 tx_ring->stall_watchdog = 0; 3977 3978 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 3979 result = B_TRUE; 3980 break; 3981 } 3982 } 3983 3984 if (result) { 3985 tx_ring->stall_watchdog = 0; 3986 tx_ring->recycle_fail = 0; 3987 } 3988 3989 return (result); 3990 } 3991 3992 3993 /* 3994 * is_valid_mac_addr - Check if the mac address is valid. 3995 */ 3996 static boolean_t 3997 is_valid_mac_addr(uint8_t *mac_addr) 3998 { 3999 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 4000 const uint8_t addr_test2[6] = 4001 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4002 4003 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 4004 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 4005 return (B_FALSE); 4006 4007 return (B_TRUE); 4008 } 4009 4010 static boolean_t 4011 ixgbe_find_mac_address(ixgbe_t *ixgbe) 4012 { 4013 #ifdef __sparc 4014 struct ixgbe_hw *hw = &ixgbe->hw; 4015 uchar_t *bytes; 4016 struct ether_addr sysaddr; 4017 uint_t nelts; 4018 int err; 4019 boolean_t found = B_FALSE; 4020 4021 /* 4022 * The "vendor's factory-set address" may already have 4023 * been extracted from the chip, but if the property 4024 * "local-mac-address" is set we use that instead. 4025 * 4026 * We check whether it looks like an array of 6 4027 * bytes (which it should, if OBP set it). If we can't 4028 * make sense of it this way, we'll ignore it. 4029 */ 4030 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4031 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 4032 if (err == DDI_PROP_SUCCESS) { 4033 if (nelts == ETHERADDRL) { 4034 while (nelts--) 4035 hw->mac.addr[nelts] = bytes[nelts]; 4036 found = B_TRUE; 4037 } 4038 ddi_prop_free(bytes); 4039 } 4040 4041 /* 4042 * Look up the OBP property "local-mac-address?". If the user has set 4043 * 'local-mac-address? = false', use "the system address" instead. 4044 */ 4045 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 4046 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 4047 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 4048 if (localetheraddr(NULL, &sysaddr) != 0) { 4049 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 4050 found = B_TRUE; 4051 } 4052 } 4053 ddi_prop_free(bytes); 4054 } 4055 4056 /* 4057 * Finally(!), if there's a valid "mac-address" property (created 4058 * if we netbooted from this interface), we must use this instead 4059 * of any of the above to ensure that the NFS/install server doesn't 4060 * get confused by the address changing as illumos takes over! 4061 */ 4062 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4063 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 4064 if (err == DDI_PROP_SUCCESS) { 4065 if (nelts == ETHERADDRL) { 4066 while (nelts--) 4067 hw->mac.addr[nelts] = bytes[nelts]; 4068 found = B_TRUE; 4069 } 4070 ddi_prop_free(bytes); 4071 } 4072 4073 if (found) { 4074 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 4075 return (B_TRUE); 4076 } 4077 #else 4078 _NOTE(ARGUNUSED(ixgbe)); 4079 #endif 4080 4081 return (B_TRUE); 4082 } 4083 4084 #pragma inline(ixgbe_arm_watchdog_timer) 4085 static void 4086 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 4087 { 4088 /* 4089 * Fire a watchdog timer 4090 */ 4091 ixgbe->watchdog_tid = 4092 timeout(ixgbe_local_timer, 4093 (void *)ixgbe, 1 * drv_usectohz(1000000)); 4094 4095 } 4096 4097 /* 4098 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 4099 */ 4100 void 4101 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 4102 { 4103 mutex_enter(&ixgbe->watchdog_lock); 4104 4105 if (!ixgbe->watchdog_enable) { 4106 ixgbe->watchdog_enable = B_TRUE; 4107 ixgbe->watchdog_start = B_TRUE; 4108 ixgbe_arm_watchdog_timer(ixgbe); 4109 } 4110 4111 mutex_exit(&ixgbe->watchdog_lock); 4112 } 4113 4114 /* 4115 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 4116 */ 4117 void 4118 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 4119 { 4120 timeout_id_t tid; 4121 4122 mutex_enter(&ixgbe->watchdog_lock); 4123 4124 ixgbe->watchdog_enable = B_FALSE; 4125 ixgbe->watchdog_start = B_FALSE; 4126 tid = ixgbe->watchdog_tid; 4127 ixgbe->watchdog_tid = 0; 4128 4129 mutex_exit(&ixgbe->watchdog_lock); 4130 4131 if (tid != 0) 4132 (void) untimeout(tid); 4133 } 4134 4135 /* 4136 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 4137 */ 4138 void 4139 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 4140 { 4141 mutex_enter(&ixgbe->watchdog_lock); 4142 4143 if (ixgbe->watchdog_enable) { 4144 if (!ixgbe->watchdog_start) { 4145 ixgbe->watchdog_start = B_TRUE; 4146 ixgbe_arm_watchdog_timer(ixgbe); 4147 } 4148 } 4149 4150 mutex_exit(&ixgbe->watchdog_lock); 4151 } 4152 4153 /* 4154 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 4155 */ 4156 static void 4157 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 4158 { 4159 mutex_enter(&ixgbe->watchdog_lock); 4160 4161 if (ixgbe->watchdog_start) 4162 ixgbe_arm_watchdog_timer(ixgbe); 4163 4164 mutex_exit(&ixgbe->watchdog_lock); 4165 } 4166 4167 /* 4168 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 4169 */ 4170 void 4171 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 4172 { 4173 timeout_id_t tid; 4174 4175 mutex_enter(&ixgbe->watchdog_lock); 4176 4177 ixgbe->watchdog_start = B_FALSE; 4178 tid = ixgbe->watchdog_tid; 4179 ixgbe->watchdog_tid = 0; 4180 4181 mutex_exit(&ixgbe->watchdog_lock); 4182 4183 if (tid != 0) 4184 (void) untimeout(tid); 4185 } 4186 4187 /* 4188 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 4189 */ 4190 static void 4191 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 4192 { 4193 struct ixgbe_hw *hw = &ixgbe->hw; 4194 4195 /* 4196 * mask all interrupts off 4197 */ 4198 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 4199 4200 /* 4201 * for MSI-X, also disable autoclear 4202 */ 4203 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4204 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 4205 } 4206 4207 IXGBE_WRITE_FLUSH(hw); 4208 } 4209 4210 /* 4211 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 4212 */ 4213 static void 4214 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 4215 { 4216 struct ixgbe_hw *hw = &ixgbe->hw; 4217 uint32_t eiac, eiam; 4218 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 4219 4220 /* interrupt types to enable */ 4221 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 4222 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 4223 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */ 4224 4225 /* enable automask on "other" causes that this adapter can generate */ 4226 eiam = ixgbe->capab->other_intr; 4227 4228 /* 4229 * msi-x mode 4230 */ 4231 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4232 /* enable autoclear but not on bits 29:20 */ 4233 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR); 4234 4235 /* general purpose interrupt enable */ 4236 gpie |= (IXGBE_GPIE_MSIX_MODE 4237 | IXGBE_GPIE_PBA_SUPPORT 4238 | IXGBE_GPIE_OCD 4239 | IXGBE_GPIE_EIAME); 4240 /* 4241 * non-msi-x mode 4242 */ 4243 } else { 4244 4245 /* disable autoclear, leave gpie at default */ 4246 eiac = 0; 4247 4248 /* 4249 * General purpose interrupt enable. 4250 * For 82599, X540 and X550, extended interrupt 4251 * automask enable only in MSI or MSI-X mode 4252 */ 4253 if ((hw->mac.type == ixgbe_mac_82598EB) || 4254 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) { 4255 gpie |= IXGBE_GPIE_EIAME; 4256 } 4257 } 4258 4259 /* Enable specific "other" interrupt types */ 4260 switch (hw->mac.type) { 4261 case ixgbe_mac_82598EB: 4262 gpie |= ixgbe->capab->other_gpie; 4263 break; 4264 4265 case ixgbe_mac_82599EB: 4266 case ixgbe_mac_X540: 4267 case ixgbe_mac_X550: 4268 case ixgbe_mac_X550EM_x: 4269 gpie |= ixgbe->capab->other_gpie; 4270 4271 /* Enable RSC Delay 8us when LRO enabled */ 4272 if (ixgbe->lro_enable) { 4273 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT); 4274 } 4275 break; 4276 4277 default: 4278 break; 4279 } 4280 4281 /* write to interrupt control registers */ 4282 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4283 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 4284 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam); 4285 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 4286 IXGBE_WRITE_FLUSH(hw); 4287 } 4288 4289 /* 4290 * ixgbe_loopback_ioctl - Loopback support. 4291 */ 4292 enum ioc_reply 4293 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 4294 { 4295 lb_info_sz_t *lbsp; 4296 lb_property_t *lbpp; 4297 uint32_t *lbmp; 4298 uint32_t size; 4299 uint32_t value; 4300 4301 if (mp->b_cont == NULL) 4302 return (IOC_INVAL); 4303 4304 switch (iocp->ioc_cmd) { 4305 default: 4306 return (IOC_INVAL); 4307 4308 case LB_GET_INFO_SIZE: 4309 size = sizeof (lb_info_sz_t); 4310 if (iocp->ioc_count != size) 4311 return (IOC_INVAL); 4312 4313 value = sizeof (lb_normal); 4314 value += sizeof (lb_mac); 4315 value += sizeof (lb_external); 4316 4317 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 4318 *lbsp = value; 4319 break; 4320 4321 case LB_GET_INFO: 4322 value = sizeof (lb_normal); 4323 value += sizeof (lb_mac); 4324 value += sizeof (lb_external); 4325 4326 size = value; 4327 if (iocp->ioc_count != size) 4328 return (IOC_INVAL); 4329 4330 value = 0; 4331 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 4332 4333 lbpp[value++] = lb_normal; 4334 lbpp[value++] = lb_mac; 4335 lbpp[value++] = lb_external; 4336 break; 4337 4338 case LB_GET_MODE: 4339 size = sizeof (uint32_t); 4340 if (iocp->ioc_count != size) 4341 return (IOC_INVAL); 4342 4343 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4344 *lbmp = ixgbe->loopback_mode; 4345 break; 4346 4347 case LB_SET_MODE: 4348 size = 0; 4349 if (iocp->ioc_count != sizeof (uint32_t)) 4350 return (IOC_INVAL); 4351 4352 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4353 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 4354 return (IOC_INVAL); 4355 break; 4356 } 4357 4358 iocp->ioc_count = size; 4359 iocp->ioc_error = 0; 4360 4361 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4362 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4363 return (IOC_INVAL); 4364 } 4365 4366 return (IOC_REPLY); 4367 } 4368 4369 /* 4370 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 4371 */ 4372 static boolean_t 4373 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 4374 { 4375 if (mode == ixgbe->loopback_mode) 4376 return (B_TRUE); 4377 4378 ixgbe->loopback_mode = mode; 4379 4380 if (mode == IXGBE_LB_NONE) { 4381 /* 4382 * Reset the chip 4383 */ 4384 (void) ixgbe_reset(ixgbe); 4385 return (B_TRUE); 4386 } 4387 4388 mutex_enter(&ixgbe->gen_lock); 4389 4390 switch (mode) { 4391 default: 4392 mutex_exit(&ixgbe->gen_lock); 4393 return (B_FALSE); 4394 4395 case IXGBE_LB_EXTERNAL: 4396 break; 4397 4398 case IXGBE_LB_INTERNAL_MAC: 4399 ixgbe_set_internal_mac_loopback(ixgbe); 4400 break; 4401 } 4402 4403 mutex_exit(&ixgbe->gen_lock); 4404 4405 return (B_TRUE); 4406 } 4407 4408 /* 4409 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 4410 */ 4411 static void 4412 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 4413 { 4414 struct ixgbe_hw *hw; 4415 uint32_t reg; 4416 uint8_t atlas; 4417 4418 hw = &ixgbe->hw; 4419 4420 /* 4421 * Setup MAC loopback 4422 */ 4423 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 4424 reg |= IXGBE_HLREG0_LPBK; 4425 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 4426 4427 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4428 reg &= ~IXGBE_AUTOC_LMS_MASK; 4429 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4430 4431 /* 4432 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 4433 */ 4434 switch (hw->mac.type) { 4435 case ixgbe_mac_82598EB: 4436 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4437 &atlas); 4438 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 4439 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4440 atlas); 4441 4442 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4443 &atlas); 4444 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 4445 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4446 atlas); 4447 4448 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4449 &atlas); 4450 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 4451 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4452 atlas); 4453 4454 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4455 &atlas); 4456 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 4457 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4458 atlas); 4459 break; 4460 4461 case ixgbe_mac_82599EB: 4462 case ixgbe_mac_X540: 4463 case ixgbe_mac_X550: 4464 case ixgbe_mac_X550EM_x: 4465 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4466 reg |= (IXGBE_AUTOC_FLU | 4467 IXGBE_AUTOC_10G_KX4); 4468 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4469 4470 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL, 4471 B_FALSE); 4472 break; 4473 4474 default: 4475 break; 4476 } 4477 } 4478 4479 #pragma inline(ixgbe_intr_rx_work) 4480 /* 4481 * ixgbe_intr_rx_work - RX processing of ISR. 4482 */ 4483 static void 4484 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 4485 { 4486 mblk_t *mp; 4487 4488 mutex_enter(&rx_ring->rx_lock); 4489 4490 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4491 mutex_exit(&rx_ring->rx_lock); 4492 4493 if (mp != NULL) 4494 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4495 rx_ring->ring_gen_num); 4496 } 4497 4498 #pragma inline(ixgbe_intr_tx_work) 4499 /* 4500 * ixgbe_intr_tx_work - TX processing of ISR. 4501 */ 4502 static void 4503 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 4504 { 4505 ixgbe_t *ixgbe = tx_ring->ixgbe; 4506 4507 /* 4508 * Recycle the tx descriptors 4509 */ 4510 tx_ring->tx_recycle(tx_ring); 4511 4512 /* 4513 * Schedule the re-transmit 4514 */ 4515 if (tx_ring->reschedule && 4516 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) { 4517 tx_ring->reschedule = B_FALSE; 4518 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl, 4519 tx_ring->ring_handle); 4520 tx_ring->stat_reschedule++; 4521 } 4522 } 4523 4524 #pragma inline(ixgbe_intr_other_work) 4525 /* 4526 * ixgbe_intr_other_work - Process interrupt types other than tx/rx 4527 */ 4528 static void 4529 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr) 4530 { 4531 struct ixgbe_hw *hw = &ixgbe->hw; 4532 4533 ASSERT(mutex_owned(&ixgbe->gen_lock)); 4534 4535 /* 4536 * handle link status change 4537 */ 4538 if (eicr & IXGBE_EICR_LSC) { 4539 ixgbe_driver_link_check(ixgbe); 4540 ixgbe_get_hw_state(ixgbe); 4541 } 4542 4543 /* 4544 * check for fan failure on adapters with fans 4545 */ 4546 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 4547 (eicr & IXGBE_EICR_GPI_SDP1)) { 4548 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 4549 4550 /* 4551 * Disable the adapter interrupts 4552 */ 4553 ixgbe_disable_adapter_interrupts(ixgbe); 4554 4555 /* 4556 * Disable Rx/Tx units 4557 */ 4558 (void) ixgbe_stop_adapter(&ixgbe->hw); 4559 4560 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 4561 ixgbe_error(ixgbe, 4562 "Problem: Network adapter has been stopped " 4563 "because the fan has stopped.\n"); 4564 ixgbe_error(ixgbe, 4565 "Action: Replace the adapter.\n"); 4566 4567 /* re-enable the interrupt, which was automasked */ 4568 ixgbe->eims |= IXGBE_EICR_GPI_SDP1; 4569 } 4570 4571 /* 4572 * Do SFP check for adapters with hot-plug capability 4573 */ 4574 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) && 4575 ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) || 4576 (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) { 4577 ixgbe->eicr = eicr; 4578 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq, 4579 ixgbe_sfp_check, (void *)ixgbe, 4580 DDI_NOSLEEP)) != DDI_SUCCESS) { 4581 ixgbe_log(ixgbe, "No memory available to dispatch " 4582 "taskq for SFP check"); 4583 } 4584 } 4585 4586 /* 4587 * Do over-temperature check for adapters with temp sensor 4588 */ 4589 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) && 4590 ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) || 4591 (eicr & IXGBE_EICR_LSC))) { 4592 ixgbe->eicr = eicr; 4593 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq, 4594 ixgbe_overtemp_check, (void *)ixgbe, 4595 DDI_NOSLEEP)) != DDI_SUCCESS) { 4596 ixgbe_log(ixgbe, "No memory available to dispatch " 4597 "taskq for overtemp check"); 4598 } 4599 } 4600 4601 /* 4602 * Process an external PHY interrupt 4603 */ 4604 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 4605 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 4606 ixgbe->eicr = eicr; 4607 if ((ddi_taskq_dispatch(ixgbe->phy_taskq, 4608 ixgbe_phy_check, (void *)ixgbe, 4609 DDI_NOSLEEP)) != DDI_SUCCESS) { 4610 ixgbe_log(ixgbe, "No memory available to dispatch " 4611 "taskq for PHY check"); 4612 } 4613 } 4614 } 4615 4616 /* 4617 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 4618 */ 4619 static uint_t 4620 ixgbe_intr_legacy(void *arg1, void *arg2) 4621 { 4622 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4623 struct ixgbe_hw *hw = &ixgbe->hw; 4624 ixgbe_tx_ring_t *tx_ring; 4625 ixgbe_rx_ring_t *rx_ring; 4626 uint32_t eicr; 4627 mblk_t *mp; 4628 boolean_t tx_reschedule; 4629 uint_t result; 4630 4631 _NOTE(ARGUNUSED(arg2)); 4632 4633 mutex_enter(&ixgbe->gen_lock); 4634 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 4635 mutex_exit(&ixgbe->gen_lock); 4636 return (DDI_INTR_UNCLAIMED); 4637 } 4638 4639 mp = NULL; 4640 tx_reschedule = B_FALSE; 4641 4642 /* 4643 * Any bit set in eicr: claim this interrupt 4644 */ 4645 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4646 4647 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4648 mutex_exit(&ixgbe->gen_lock); 4649 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4650 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4651 return (DDI_INTR_CLAIMED); 4652 } 4653 4654 if (eicr) { 4655 /* 4656 * For legacy interrupt, we have only one interrupt, 4657 * so we have only one rx ring and one tx ring enabled. 4658 */ 4659 ASSERT(ixgbe->num_rx_rings == 1); 4660 ASSERT(ixgbe->num_tx_rings == 1); 4661 4662 /* 4663 * For legacy interrupt, rx rings[0] will use RTxQ[0]. 4664 */ 4665 if (eicr & 0x1) { 4666 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE; 4667 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4668 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4669 /* 4670 * Clean the rx descriptors 4671 */ 4672 rx_ring = &ixgbe->rx_rings[0]; 4673 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4674 } 4675 4676 /* 4677 * For legacy interrupt, tx rings[0] will use RTxQ[1]. 4678 */ 4679 if (eicr & 0x2) { 4680 /* 4681 * Recycle the tx descriptors 4682 */ 4683 tx_ring = &ixgbe->tx_rings[0]; 4684 tx_ring->tx_recycle(tx_ring); 4685 4686 /* 4687 * Schedule the re-transmit 4688 */ 4689 tx_reschedule = (tx_ring->reschedule && 4690 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)); 4691 } 4692 4693 /* any interrupt type other than tx/rx */ 4694 if (eicr & ixgbe->capab->other_intr) { 4695 switch (hw->mac.type) { 4696 case ixgbe_mac_82598EB: 4697 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4698 break; 4699 4700 case ixgbe_mac_82599EB: 4701 case ixgbe_mac_X540: 4702 case ixgbe_mac_X550: 4703 case ixgbe_mac_X550EM_x: 4704 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4705 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4706 break; 4707 4708 default: 4709 break; 4710 } 4711 ixgbe_intr_other_work(ixgbe, eicr); 4712 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4713 } 4714 4715 mutex_exit(&ixgbe->gen_lock); 4716 4717 result = DDI_INTR_CLAIMED; 4718 } else { 4719 mutex_exit(&ixgbe->gen_lock); 4720 4721 /* 4722 * No interrupt cause bits set: don't claim this interrupt. 4723 */ 4724 result = DDI_INTR_UNCLAIMED; 4725 } 4726 4727 /* re-enable the interrupts which were automasked */ 4728 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4729 4730 /* 4731 * Do the following work outside of the gen_lock 4732 */ 4733 if (mp != NULL) { 4734 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4735 rx_ring->ring_gen_num); 4736 } 4737 4738 if (tx_reschedule) { 4739 tx_ring->reschedule = B_FALSE; 4740 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle); 4741 tx_ring->stat_reschedule++; 4742 } 4743 4744 return (result); 4745 } 4746 4747 /* 4748 * ixgbe_intr_msi - Interrupt handler for MSI. 4749 */ 4750 static uint_t 4751 ixgbe_intr_msi(void *arg1, void *arg2) 4752 { 4753 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4754 struct ixgbe_hw *hw = &ixgbe->hw; 4755 uint32_t eicr; 4756 4757 _NOTE(ARGUNUSED(arg2)); 4758 4759 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4760 4761 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4762 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4763 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4764 return (DDI_INTR_CLAIMED); 4765 } 4766 4767 /* 4768 * For MSI interrupt, we have only one vector, 4769 * so we have only one rx ring and one tx ring enabled. 4770 */ 4771 ASSERT(ixgbe->num_rx_rings == 1); 4772 ASSERT(ixgbe->num_tx_rings == 1); 4773 4774 /* 4775 * For MSI interrupt, rx rings[0] will use RTxQ[0]. 4776 */ 4777 if (eicr & 0x1) { 4778 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 4779 } 4780 4781 /* 4782 * For MSI interrupt, tx rings[0] will use RTxQ[1]. 4783 */ 4784 if (eicr & 0x2) { 4785 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 4786 } 4787 4788 /* any interrupt type other than tx/rx */ 4789 if (eicr & ixgbe->capab->other_intr) { 4790 mutex_enter(&ixgbe->gen_lock); 4791 switch (hw->mac.type) { 4792 case ixgbe_mac_82598EB: 4793 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4794 break; 4795 4796 case ixgbe_mac_82599EB: 4797 case ixgbe_mac_X540: 4798 case ixgbe_mac_X550: 4799 case ixgbe_mac_X550EM_x: 4800 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4801 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4802 break; 4803 4804 default: 4805 break; 4806 } 4807 ixgbe_intr_other_work(ixgbe, eicr); 4808 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4809 mutex_exit(&ixgbe->gen_lock); 4810 } 4811 4812 /* re-enable the interrupts which were automasked */ 4813 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4814 4815 return (DDI_INTR_CLAIMED); 4816 } 4817 4818 /* 4819 * ixgbe_intr_msix - Interrupt handler for MSI-X. 4820 */ 4821 static uint_t 4822 ixgbe_intr_msix(void *arg1, void *arg2) 4823 { 4824 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1; 4825 ixgbe_t *ixgbe = vect->ixgbe; 4826 struct ixgbe_hw *hw = &ixgbe->hw; 4827 uint32_t eicr; 4828 int r_idx = 0; 4829 4830 _NOTE(ARGUNUSED(arg2)); 4831 4832 /* 4833 * Clean each rx ring that has its bit set in the map 4834 */ 4835 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 4836 while (r_idx >= 0) { 4837 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 4838 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 4839 (ixgbe->num_rx_rings - 1)); 4840 } 4841 4842 /* 4843 * Clean each tx ring that has its bit set in the map 4844 */ 4845 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1)); 4846 while (r_idx >= 0) { 4847 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]); 4848 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 4849 (ixgbe->num_tx_rings - 1)); 4850 } 4851 4852 4853 /* 4854 * Clean other interrupt (link change) that has its bit set in the map 4855 */ 4856 if (BT_TEST(vect->other_map, 0) == 1) { 4857 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4858 4859 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != 4860 DDI_FM_OK) { 4861 ddi_fm_service_impact(ixgbe->dip, 4862 DDI_SERVICE_DEGRADED); 4863 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4864 return (DDI_INTR_CLAIMED); 4865 } 4866 4867 /* 4868 * Check "other" cause bits: any interrupt type other than tx/rx 4869 */ 4870 if (eicr & ixgbe->capab->other_intr) { 4871 mutex_enter(&ixgbe->gen_lock); 4872 switch (hw->mac.type) { 4873 case ixgbe_mac_82598EB: 4874 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4875 ixgbe_intr_other_work(ixgbe, eicr); 4876 break; 4877 4878 case ixgbe_mac_82599EB: 4879 case ixgbe_mac_X540: 4880 case ixgbe_mac_X550: 4881 case ixgbe_mac_X550EM_x: 4882 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4883 ixgbe_intr_other_work(ixgbe, eicr); 4884 break; 4885 4886 default: 4887 break; 4888 } 4889 mutex_exit(&ixgbe->gen_lock); 4890 } 4891 4892 /* re-enable the interrupts which were automasked */ 4893 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4894 } 4895 4896 return (DDI_INTR_CLAIMED); 4897 } 4898 4899 /* 4900 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 4901 * 4902 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 4903 * if not successful, try Legacy. 4904 * ixgbe->intr_force can be used to force sequence to start with 4905 * any of the 3 types. 4906 * If MSI-X is not used, number of tx/rx rings is forced to 1. 4907 */ 4908 static int 4909 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 4910 { 4911 dev_info_t *devinfo; 4912 int intr_types; 4913 int rc; 4914 4915 devinfo = ixgbe->dip; 4916 4917 /* 4918 * Get supported interrupt types 4919 */ 4920 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 4921 4922 if (rc != DDI_SUCCESS) { 4923 ixgbe_log(ixgbe, 4924 "Get supported interrupt types failed: %d", rc); 4925 return (IXGBE_FAILURE); 4926 } 4927 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 4928 4929 ixgbe->intr_type = 0; 4930 4931 /* 4932 * Install MSI-X interrupts 4933 */ 4934 if ((intr_types & DDI_INTR_TYPE_MSIX) && 4935 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 4936 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 4937 if (rc == IXGBE_SUCCESS) 4938 return (IXGBE_SUCCESS); 4939 4940 ixgbe_log(ixgbe, 4941 "Allocate MSI-X failed, trying MSI interrupts..."); 4942 } 4943 4944 /* 4945 * MSI-X not used, force rings and groups to 1 4946 */ 4947 ixgbe->num_rx_rings = 1; 4948 ixgbe->num_rx_groups = 1; 4949 ixgbe->num_tx_rings = 1; 4950 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 4951 ixgbe_log(ixgbe, 4952 "MSI-X not used, force rings and groups number to 1"); 4953 4954 /* 4955 * Install MSI interrupts 4956 */ 4957 if ((intr_types & DDI_INTR_TYPE_MSI) && 4958 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 4959 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 4960 if (rc == IXGBE_SUCCESS) 4961 return (IXGBE_SUCCESS); 4962 4963 ixgbe_log(ixgbe, 4964 "Allocate MSI failed, trying Legacy interrupts..."); 4965 } 4966 4967 /* 4968 * Install legacy interrupts 4969 */ 4970 if (intr_types & DDI_INTR_TYPE_FIXED) { 4971 /* 4972 * Disallow legacy interrupts for X550. X550 has a silicon 4973 * bug which prevents Shared Legacy interrupts from working. 4974 * For details, please reference: 4975 * 4976 * Intel Ethernet Controller X550 Specification Update rev. 2.1 4977 * May 2016, erratum 22: PCIe Interrupt Status Bit 4978 */ 4979 if (ixgbe->hw.mac.type == ixgbe_mac_X550 || 4980 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x || 4981 ixgbe->hw.mac.type == ixgbe_mac_X550_vf || 4982 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x_vf) { 4983 ixgbe_log(ixgbe, 4984 "Legacy interrupts are not supported on this " 4985 "adapter. Please use MSI or MSI-X instead."); 4986 return (IXGBE_FAILURE); 4987 } 4988 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 4989 if (rc == IXGBE_SUCCESS) 4990 return (IXGBE_SUCCESS); 4991 4992 ixgbe_log(ixgbe, 4993 "Allocate Legacy interrupts failed"); 4994 } 4995 4996 /* 4997 * If none of the 3 types succeeded, return failure 4998 */ 4999 return (IXGBE_FAILURE); 5000 } 5001 5002 /* 5003 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 5004 * 5005 * For legacy and MSI, only 1 handle is needed. For MSI-X, 5006 * if fewer than 2 handles are available, return failure. 5007 * Upon success, this maps the vectors to rx and tx rings for 5008 * interrupts. 5009 */ 5010 static int 5011 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 5012 { 5013 dev_info_t *devinfo; 5014 int request, count, actual; 5015 int minimum; 5016 int rc; 5017 uint32_t ring_per_group; 5018 5019 devinfo = ixgbe->dip; 5020 5021 switch (intr_type) { 5022 case DDI_INTR_TYPE_FIXED: 5023 request = 1; /* Request 1 legacy interrupt handle */ 5024 minimum = 1; 5025 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 5026 break; 5027 5028 case DDI_INTR_TYPE_MSI: 5029 request = 1; /* Request 1 MSI interrupt handle */ 5030 minimum = 1; 5031 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 5032 break; 5033 5034 case DDI_INTR_TYPE_MSIX: 5035 /* 5036 * Best number of vectors for the adapter is 5037 * (# rx rings + # tx rings), however we will 5038 * limit the request number. 5039 */ 5040 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings); 5041 if (request > ixgbe->capab->max_ring_vect) 5042 request = ixgbe->capab->max_ring_vect; 5043 minimum = 1; 5044 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 5045 break; 5046 5047 default: 5048 ixgbe_log(ixgbe, 5049 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 5050 intr_type); 5051 return (IXGBE_FAILURE); 5052 } 5053 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 5054 request, minimum); 5055 5056 /* 5057 * Get number of supported interrupts 5058 */ 5059 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 5060 if ((rc != DDI_SUCCESS) || (count < minimum)) { 5061 ixgbe_log(ixgbe, 5062 "Get interrupt number failed. Return: %d, count: %d", 5063 rc, count); 5064 return (IXGBE_FAILURE); 5065 } 5066 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 5067 5068 actual = 0; 5069 ixgbe->intr_cnt = 0; 5070 ixgbe->intr_cnt_max = 0; 5071 ixgbe->intr_cnt_min = 0; 5072 5073 /* 5074 * Allocate an array of interrupt handles 5075 */ 5076 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 5077 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 5078 5079 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 5080 request, &actual, DDI_INTR_ALLOC_NORMAL); 5081 if (rc != DDI_SUCCESS) { 5082 ixgbe_log(ixgbe, "Allocate interrupts failed. " 5083 "return: %d, request: %d, actual: %d", 5084 rc, request, actual); 5085 goto alloc_handle_fail; 5086 } 5087 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 5088 5089 /* 5090 * upper/lower limit of interrupts 5091 */ 5092 ixgbe->intr_cnt = actual; 5093 ixgbe->intr_cnt_max = request; 5094 ixgbe->intr_cnt_min = minimum; 5095 5096 /* 5097 * rss number per group should not exceed the rx interrupt number, 5098 * else need to adjust rx ring number. 5099 */ 5100 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5101 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0); 5102 if (actual < ring_per_group) { 5103 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual; 5104 ixgbe_setup_vmdq_rss_conf(ixgbe); 5105 } 5106 5107 /* 5108 * Now we know the actual number of vectors. Here we map the vector 5109 * to other, rx rings and tx ring. 5110 */ 5111 if (actual < minimum) { 5112 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 5113 actual); 5114 goto alloc_handle_fail; 5115 } 5116 5117 /* 5118 * Get priority for first vector, assume remaining are all the same 5119 */ 5120 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 5121 if (rc != DDI_SUCCESS) { 5122 ixgbe_log(ixgbe, 5123 "Get interrupt priority failed: %d", rc); 5124 goto alloc_handle_fail; 5125 } 5126 5127 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 5128 if (rc != DDI_SUCCESS) { 5129 ixgbe_log(ixgbe, 5130 "Get interrupt cap failed: %d", rc); 5131 goto alloc_handle_fail; 5132 } 5133 5134 ixgbe->intr_type = intr_type; 5135 5136 return (IXGBE_SUCCESS); 5137 5138 alloc_handle_fail: 5139 ixgbe_rem_intrs(ixgbe); 5140 5141 return (IXGBE_FAILURE); 5142 } 5143 5144 /* 5145 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 5146 * 5147 * Before adding the interrupt handlers, the interrupt vectors have 5148 * been allocated, and the rx/tx rings have also been allocated. 5149 */ 5150 static int 5151 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 5152 { 5153 int vector = 0; 5154 int rc; 5155 5156 switch (ixgbe->intr_type) { 5157 case DDI_INTR_TYPE_MSIX: 5158 /* 5159 * Add interrupt handler for all vectors 5160 */ 5161 for (vector = 0; vector < ixgbe->intr_cnt; vector++) { 5162 /* 5163 * install pointer to vect_map[vector] 5164 */ 5165 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5166 (ddi_intr_handler_t *)ixgbe_intr_msix, 5167 (void *)&ixgbe->vect_map[vector], NULL); 5168 5169 if (rc != DDI_SUCCESS) { 5170 ixgbe_log(ixgbe, 5171 "Add interrupt handler failed. " 5172 "return: %d, vector: %d", rc, vector); 5173 for (vector--; vector >= 0; vector--) { 5174 (void) ddi_intr_remove_handler( 5175 ixgbe->htable[vector]); 5176 } 5177 return (IXGBE_FAILURE); 5178 } 5179 } 5180 5181 break; 5182 5183 case DDI_INTR_TYPE_MSI: 5184 /* 5185 * Add interrupt handlers for the only vector 5186 */ 5187 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5188 (ddi_intr_handler_t *)ixgbe_intr_msi, 5189 (void *)ixgbe, NULL); 5190 5191 if (rc != DDI_SUCCESS) { 5192 ixgbe_log(ixgbe, 5193 "Add MSI interrupt handler failed: %d", rc); 5194 return (IXGBE_FAILURE); 5195 } 5196 5197 break; 5198 5199 case DDI_INTR_TYPE_FIXED: 5200 /* 5201 * Add interrupt handlers for the only vector 5202 */ 5203 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5204 (ddi_intr_handler_t *)ixgbe_intr_legacy, 5205 (void *)ixgbe, NULL); 5206 5207 if (rc != DDI_SUCCESS) { 5208 ixgbe_log(ixgbe, 5209 "Add legacy interrupt handler failed: %d", rc); 5210 return (IXGBE_FAILURE); 5211 } 5212 5213 break; 5214 5215 default: 5216 return (IXGBE_FAILURE); 5217 } 5218 5219 return (IXGBE_SUCCESS); 5220 } 5221 5222 #pragma inline(ixgbe_map_rxring_to_vector) 5223 /* 5224 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 5225 */ 5226 static void 5227 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 5228 { 5229 /* 5230 * Set bit in map 5231 */ 5232 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 5233 5234 /* 5235 * Count bits set 5236 */ 5237 ixgbe->vect_map[v_idx].rxr_cnt++; 5238 5239 /* 5240 * Remember bit position 5241 */ 5242 ixgbe->rx_rings[r_idx].intr_vector = v_idx; 5243 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 5244 } 5245 5246 #pragma inline(ixgbe_map_txring_to_vector) 5247 /* 5248 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 5249 */ 5250 static void 5251 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 5252 { 5253 /* 5254 * Set bit in map 5255 */ 5256 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 5257 5258 /* 5259 * Count bits set 5260 */ 5261 ixgbe->vect_map[v_idx].txr_cnt++; 5262 5263 /* 5264 * Remember bit position 5265 */ 5266 ixgbe->tx_rings[t_idx].intr_vector = v_idx; 5267 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 5268 } 5269 5270 /* 5271 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector 5272 * allocation register (IVAR). 5273 * cause: 5274 * -1 : other cause 5275 * 0 : rx 5276 * 1 : tx 5277 */ 5278 static void 5279 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector, 5280 int8_t cause) 5281 { 5282 struct ixgbe_hw *hw = &ixgbe->hw; 5283 u32 ivar, index; 5284 5285 switch (hw->mac.type) { 5286 case ixgbe_mac_82598EB: 5287 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5288 if (cause == -1) { 5289 cause = 0; 5290 } 5291 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5292 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5293 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3))); 5294 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3))); 5295 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5296 break; 5297 5298 case ixgbe_mac_82599EB: 5299 case ixgbe_mac_X540: 5300 case ixgbe_mac_X550: 5301 case ixgbe_mac_X550EM_x: 5302 if (cause == -1) { 5303 /* other causes */ 5304 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5305 index = (intr_alloc_entry & 1) * 8; 5306 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5307 ivar &= ~(0xFF << index); 5308 ivar |= (msix_vector << index); 5309 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5310 } else { 5311 /* tx or rx causes */ 5312 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5313 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5314 ivar = IXGBE_READ_REG(hw, 5315 IXGBE_IVAR(intr_alloc_entry >> 1)); 5316 ivar &= ~(0xFF << index); 5317 ivar |= (msix_vector << index); 5318 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5319 ivar); 5320 } 5321 break; 5322 5323 default: 5324 break; 5325 } 5326 } 5327 5328 /* 5329 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of 5330 * given interrupt vector allocation register (IVAR). 5331 * cause: 5332 * -1 : other cause 5333 * 0 : rx 5334 * 1 : tx 5335 */ 5336 static void 5337 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5338 { 5339 struct ixgbe_hw *hw = &ixgbe->hw; 5340 u32 ivar, index; 5341 5342 switch (hw->mac.type) { 5343 case ixgbe_mac_82598EB: 5344 if (cause == -1) { 5345 cause = 0; 5346 } 5347 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5348 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5349 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 * 5350 (intr_alloc_entry & 0x3))); 5351 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5352 break; 5353 5354 case ixgbe_mac_82599EB: 5355 case ixgbe_mac_X540: 5356 case ixgbe_mac_X550: 5357 case ixgbe_mac_X550EM_x: 5358 if (cause == -1) { 5359 /* other causes */ 5360 index = (intr_alloc_entry & 1) * 8; 5361 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5362 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5363 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5364 } else { 5365 /* tx or rx causes */ 5366 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5367 ivar = IXGBE_READ_REG(hw, 5368 IXGBE_IVAR(intr_alloc_entry >> 1)); 5369 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5370 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5371 ivar); 5372 } 5373 break; 5374 5375 default: 5376 break; 5377 } 5378 } 5379 5380 /* 5381 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of 5382 * given interrupt vector allocation register (IVAR). 5383 * cause: 5384 * -1 : other cause 5385 * 0 : rx 5386 * 1 : tx 5387 */ 5388 static void 5389 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5390 { 5391 struct ixgbe_hw *hw = &ixgbe->hw; 5392 u32 ivar, index; 5393 5394 switch (hw->mac.type) { 5395 case ixgbe_mac_82598EB: 5396 if (cause == -1) { 5397 cause = 0; 5398 } 5399 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5400 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5401 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 * 5402 (intr_alloc_entry & 0x3))); 5403 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5404 break; 5405 5406 case ixgbe_mac_82599EB: 5407 case ixgbe_mac_X540: 5408 case ixgbe_mac_X550: 5409 case ixgbe_mac_X550EM_x: 5410 if (cause == -1) { 5411 /* other causes */ 5412 index = (intr_alloc_entry & 1) * 8; 5413 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5414 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5415 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5416 } else { 5417 /* tx or rx causes */ 5418 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5419 ivar = IXGBE_READ_REG(hw, 5420 IXGBE_IVAR(intr_alloc_entry >> 1)); 5421 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5422 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5423 ivar); 5424 } 5425 break; 5426 5427 default: 5428 break; 5429 } 5430 } 5431 5432 /* 5433 * Convert the rx ring index driver maintained to the rx ring index 5434 * in h/w. 5435 */ 5436 static uint32_t 5437 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index) 5438 { 5439 5440 struct ixgbe_hw *hw = &ixgbe->hw; 5441 uint32_t rx_ring_per_group, hw_rx_index; 5442 5443 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS || 5444 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) { 5445 return (sw_rx_index); 5446 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) { 5447 switch (hw->mac.type) { 5448 case ixgbe_mac_82598EB: 5449 return (sw_rx_index); 5450 5451 case ixgbe_mac_82599EB: 5452 case ixgbe_mac_X540: 5453 case ixgbe_mac_X550: 5454 case ixgbe_mac_X550EM_x: 5455 return (sw_rx_index * 2); 5456 5457 default: 5458 break; 5459 } 5460 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) { 5461 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5462 5463 switch (hw->mac.type) { 5464 case ixgbe_mac_82598EB: 5465 hw_rx_index = (sw_rx_index / rx_ring_per_group) * 5466 16 + (sw_rx_index % rx_ring_per_group); 5467 return (hw_rx_index); 5468 5469 case ixgbe_mac_82599EB: 5470 case ixgbe_mac_X540: 5471 case ixgbe_mac_X550: 5472 case ixgbe_mac_X550EM_x: 5473 if (ixgbe->num_rx_groups > 32) { 5474 hw_rx_index = (sw_rx_index / 5475 rx_ring_per_group) * 2 + 5476 (sw_rx_index % rx_ring_per_group); 5477 } else { 5478 hw_rx_index = (sw_rx_index / 5479 rx_ring_per_group) * 4 + 5480 (sw_rx_index % rx_ring_per_group); 5481 } 5482 return (hw_rx_index); 5483 5484 default: 5485 break; 5486 } 5487 } 5488 5489 /* 5490 * Should never reach. Just to make compiler happy. 5491 */ 5492 return (sw_rx_index); 5493 } 5494 5495 /* 5496 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors. 5497 * 5498 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt 5499 * to vector[0 - (intr_cnt -1)]. 5500 */ 5501 static int 5502 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe) 5503 { 5504 int i, vector = 0; 5505 5506 /* initialize vector map */ 5507 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 5508 for (i = 0; i < ixgbe->intr_cnt; i++) { 5509 ixgbe->vect_map[i].ixgbe = ixgbe; 5510 } 5511 5512 /* 5513 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0], 5514 * tx rings[0] on RTxQ[1]. 5515 */ 5516 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5517 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 5518 ixgbe_map_txring_to_vector(ixgbe, 0, 1); 5519 return (IXGBE_SUCCESS); 5520 } 5521 5522 /* 5523 * Interrupts/vectors mapping for MSI-X 5524 */ 5525 5526 /* 5527 * Map other interrupt to vector 0, 5528 * Set bit in map and count the bits set. 5529 */ 5530 BT_SET(ixgbe->vect_map[vector].other_map, 0); 5531 ixgbe->vect_map[vector].other_cnt++; 5532 5533 /* 5534 * Map rx ring interrupts to vectors 5535 */ 5536 for (i = 0; i < ixgbe->num_rx_rings; i++) { 5537 ixgbe_map_rxring_to_vector(ixgbe, i, vector); 5538 vector = (vector +1) % ixgbe->intr_cnt; 5539 } 5540 5541 /* 5542 * Map tx ring interrupts to vectors 5543 */ 5544 for (i = 0; i < ixgbe->num_tx_rings; i++) { 5545 ixgbe_map_txring_to_vector(ixgbe, i, vector); 5546 vector = (vector +1) % ixgbe->intr_cnt; 5547 } 5548 5549 return (IXGBE_SUCCESS); 5550 } 5551 5552 /* 5553 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 5554 * 5555 * This relies on ring/vector mapping already set up in the 5556 * vect_map[] structures 5557 */ 5558 static void 5559 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 5560 { 5561 struct ixgbe_hw *hw = &ixgbe->hw; 5562 ixgbe_intr_vector_t *vect; /* vector bitmap */ 5563 int r_idx; /* ring index */ 5564 int v_idx; /* vector index */ 5565 uint32_t hw_index; 5566 5567 /* 5568 * Clear any previous entries 5569 */ 5570 switch (hw->mac.type) { 5571 case ixgbe_mac_82598EB: 5572 for (v_idx = 0; v_idx < 25; v_idx++) 5573 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5574 break; 5575 5576 case ixgbe_mac_82599EB: 5577 case ixgbe_mac_X540: 5578 case ixgbe_mac_X550: 5579 case ixgbe_mac_X550EM_x: 5580 for (v_idx = 0; v_idx < 64; v_idx++) 5581 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5582 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0); 5583 break; 5584 5585 default: 5586 break; 5587 } 5588 5589 /* 5590 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and 5591 * tx rings[0] will use RTxQ[1]. 5592 */ 5593 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5594 ixgbe_setup_ivar(ixgbe, 0, 0, 0); 5595 ixgbe_setup_ivar(ixgbe, 0, 1, 1); 5596 return; 5597 } 5598 5599 /* 5600 * For MSI-X interrupt, "Other" is always on vector[0]. 5601 */ 5602 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1); 5603 5604 /* 5605 * For each interrupt vector, populate the IVAR table 5606 */ 5607 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 5608 vect = &ixgbe->vect_map[v_idx]; 5609 5610 /* 5611 * For each rx ring bit set 5612 */ 5613 r_idx = bt_getlowbit(vect->rx_map, 0, 5614 (ixgbe->num_rx_rings - 1)); 5615 5616 while (r_idx >= 0) { 5617 hw_index = ixgbe->rx_rings[r_idx].hw_index; 5618 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0); 5619 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 5620 (ixgbe->num_rx_rings - 1)); 5621 } 5622 5623 /* 5624 * For each tx ring bit set 5625 */ 5626 r_idx = bt_getlowbit(vect->tx_map, 0, 5627 (ixgbe->num_tx_rings - 1)); 5628 5629 while (r_idx >= 0) { 5630 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1); 5631 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 5632 (ixgbe->num_tx_rings - 1)); 5633 } 5634 } 5635 } 5636 5637 /* 5638 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 5639 */ 5640 static void 5641 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 5642 { 5643 int i; 5644 int rc; 5645 5646 for (i = 0; i < ixgbe->intr_cnt; i++) { 5647 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 5648 if (rc != DDI_SUCCESS) { 5649 IXGBE_DEBUGLOG_1(ixgbe, 5650 "Remove intr handler failed: %d", rc); 5651 } 5652 } 5653 } 5654 5655 /* 5656 * ixgbe_rem_intrs - Remove the allocated interrupts. 5657 */ 5658 static void 5659 ixgbe_rem_intrs(ixgbe_t *ixgbe) 5660 { 5661 int i; 5662 int rc; 5663 5664 for (i = 0; i < ixgbe->intr_cnt; i++) { 5665 rc = ddi_intr_free(ixgbe->htable[i]); 5666 if (rc != DDI_SUCCESS) { 5667 IXGBE_DEBUGLOG_1(ixgbe, 5668 "Free intr failed: %d", rc); 5669 } 5670 } 5671 5672 kmem_free(ixgbe->htable, ixgbe->intr_size); 5673 ixgbe->htable = NULL; 5674 } 5675 5676 /* 5677 * ixgbe_enable_intrs - Enable all the ddi interrupts. 5678 */ 5679 static int 5680 ixgbe_enable_intrs(ixgbe_t *ixgbe) 5681 { 5682 int i; 5683 int rc; 5684 5685 /* 5686 * Enable interrupts 5687 */ 5688 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5689 /* 5690 * Call ddi_intr_block_enable() for MSI 5691 */ 5692 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 5693 if (rc != DDI_SUCCESS) { 5694 ixgbe_log(ixgbe, 5695 "Enable block intr failed: %d", rc); 5696 return (IXGBE_FAILURE); 5697 } 5698 } else { 5699 /* 5700 * Call ddi_intr_enable() for Legacy/MSI non block enable 5701 */ 5702 for (i = 0; i < ixgbe->intr_cnt; i++) { 5703 rc = ddi_intr_enable(ixgbe->htable[i]); 5704 if (rc != DDI_SUCCESS) { 5705 ixgbe_log(ixgbe, 5706 "Enable intr failed: %d", rc); 5707 return (IXGBE_FAILURE); 5708 } 5709 } 5710 } 5711 5712 return (IXGBE_SUCCESS); 5713 } 5714 5715 /* 5716 * ixgbe_disable_intrs - Disable all the interrupts. 5717 */ 5718 static int 5719 ixgbe_disable_intrs(ixgbe_t *ixgbe) 5720 { 5721 int i; 5722 int rc; 5723 5724 /* 5725 * Disable all interrupts 5726 */ 5727 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5728 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 5729 if (rc != DDI_SUCCESS) { 5730 ixgbe_log(ixgbe, 5731 "Disable block intr failed: %d", rc); 5732 return (IXGBE_FAILURE); 5733 } 5734 } else { 5735 for (i = 0; i < ixgbe->intr_cnt; i++) { 5736 rc = ddi_intr_disable(ixgbe->htable[i]); 5737 if (rc != DDI_SUCCESS) { 5738 ixgbe_log(ixgbe, 5739 "Disable intr failed: %d", rc); 5740 return (IXGBE_FAILURE); 5741 } 5742 } 5743 } 5744 5745 return (IXGBE_SUCCESS); 5746 } 5747 5748 /* 5749 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 5750 */ 5751 static void 5752 ixgbe_get_hw_state(ixgbe_t *ixgbe) 5753 { 5754 struct ixgbe_hw *hw = &ixgbe->hw; 5755 ixgbe_link_speed speed = 0; 5756 boolean_t link_up = B_FALSE; 5757 uint32_t pcs1g_anlp = 0; 5758 5759 ASSERT(mutex_owned(&ixgbe->gen_lock)); 5760 ixgbe->param_lp_1000fdx_cap = 0; 5761 ixgbe->param_lp_100fdx_cap = 0; 5762 5763 /* check for link, don't wait */ 5764 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 5765 5766 /* 5767 * Update the observed Link Partner's capabilities. Not all adapters 5768 * can provide full information on the LP's capable speeds, so we 5769 * provide what we can. 5770 */ 5771 if (link_up) { 5772 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 5773 5774 ixgbe->param_lp_1000fdx_cap = 5775 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5776 ixgbe->param_lp_100fdx_cap = 5777 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5778 } 5779 5780 /* 5781 * Update GLD's notion of the adapter's currently advertised speeds. 5782 * Since the common code doesn't always record the current autonegotiate 5783 * settings in the phy struct for all parts (specifically, adapters with 5784 * SFPs) we first test to see if it is 0, and if so, we fall back to 5785 * using the adapter's speed capabilities which we saved during instance 5786 * init in ixgbe_init_params(). 5787 * 5788 * Adapters with SFPs will always be shown as advertising all of their 5789 * supported speeds, and adapters with baseT PHYs (where the phy struct 5790 * is maintained by the common code) will always have a factual view of 5791 * their currently-advertised speeds. In the case of SFPs, this is 5792 * acceptable as we default to advertising all speeds that the adapter 5793 * claims to support, and those properties are immutable; unlike on 5794 * baseT (copper) PHYs, where speeds can be enabled or disabled at will. 5795 */ 5796 speed = hw->phy.autoneg_advertised; 5797 if (speed == 0) 5798 speed = ixgbe->speeds_supported; 5799 5800 ixgbe->param_adv_10000fdx_cap = 5801 (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 1 : 0; 5802 ixgbe->param_adv_5000fdx_cap = 5803 (speed & IXGBE_LINK_SPEED_5GB_FULL) ? 1 : 0; 5804 ixgbe->param_adv_2500fdx_cap = 5805 (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 1 : 0; 5806 ixgbe->param_adv_1000fdx_cap = 5807 (speed & IXGBE_LINK_SPEED_1GB_FULL) ? 1 : 0; 5808 ixgbe->param_adv_100fdx_cap = 5809 (speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0; 5810 } 5811 5812 /* 5813 * ixgbe_get_driver_control - Notify that driver is in control of device. 5814 */ 5815 static void 5816 ixgbe_get_driver_control(struct ixgbe_hw *hw) 5817 { 5818 uint32_t ctrl_ext; 5819 5820 /* 5821 * Notify firmware that driver is in control of device 5822 */ 5823 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5824 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 5825 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5826 } 5827 5828 /* 5829 * ixgbe_release_driver_control - Notify that driver is no longer in control 5830 * of device. 5831 */ 5832 static void 5833 ixgbe_release_driver_control(struct ixgbe_hw *hw) 5834 { 5835 uint32_t ctrl_ext; 5836 5837 /* 5838 * Notify firmware that driver is no longer in control of device 5839 */ 5840 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5841 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 5842 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5843 } 5844 5845 /* 5846 * ixgbe_atomic_reserve - Atomic decrease operation. 5847 */ 5848 int 5849 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 5850 { 5851 uint32_t oldval; 5852 uint32_t newval; 5853 5854 /* 5855 * ATOMICALLY 5856 */ 5857 do { 5858 oldval = *count_p; 5859 if (oldval < n) 5860 return (-1); 5861 newval = oldval - n; 5862 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 5863 5864 return (newval); 5865 } 5866 5867 /* 5868 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 5869 */ 5870 static uint8_t * 5871 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 5872 { 5873 uint8_t *addr = *upd_ptr; 5874 uint8_t *new_ptr; 5875 5876 _NOTE(ARGUNUSED(hw)); 5877 _NOTE(ARGUNUSED(vmdq)); 5878 5879 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 5880 *upd_ptr = new_ptr; 5881 return (addr); 5882 } 5883 5884 /* 5885 * FMA support 5886 */ 5887 int 5888 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 5889 { 5890 ddi_fm_error_t de; 5891 5892 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5893 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 5894 return (de.fme_status); 5895 } 5896 5897 int 5898 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 5899 { 5900 ddi_fm_error_t de; 5901 5902 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5903 return (de.fme_status); 5904 } 5905 5906 /* 5907 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 5908 */ 5909 static int 5910 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5911 { 5912 _NOTE(ARGUNUSED(impl_data)); 5913 /* 5914 * as the driver can always deal with an error in any dma or 5915 * access handle, we can just return the fme_status value. 5916 */ 5917 pci_ereport_post(dip, err, NULL); 5918 return (err->fme_status); 5919 } 5920 5921 static void 5922 ixgbe_fm_init(ixgbe_t *ixgbe) 5923 { 5924 ddi_iblock_cookie_t iblk; 5925 int fma_dma_flag; 5926 5927 /* 5928 * Only register with IO Fault Services if we have some capability 5929 */ 5930 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 5931 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5932 } else { 5933 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5934 } 5935 5936 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 5937 fma_dma_flag = 1; 5938 } else { 5939 fma_dma_flag = 0; 5940 } 5941 5942 ixgbe_set_fma_flags(fma_dma_flag); 5943 5944 if (ixgbe->fm_capabilities) { 5945 5946 /* 5947 * Register capabilities with IO Fault Services 5948 */ 5949 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 5950 5951 /* 5952 * Initialize pci ereport capabilities if ereport capable 5953 */ 5954 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5955 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5956 pci_ereport_setup(ixgbe->dip); 5957 5958 /* 5959 * Register error callback if error callback capable 5960 */ 5961 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5962 ddi_fm_handler_register(ixgbe->dip, 5963 ixgbe_fm_error_cb, (void*) ixgbe); 5964 } 5965 } 5966 5967 static void 5968 ixgbe_fm_fini(ixgbe_t *ixgbe) 5969 { 5970 /* 5971 * Only unregister FMA capabilities if they are registered 5972 */ 5973 if (ixgbe->fm_capabilities) { 5974 5975 /* 5976 * Release any resources allocated by pci_ereport_setup() 5977 */ 5978 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5979 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5980 pci_ereport_teardown(ixgbe->dip); 5981 5982 /* 5983 * Un-register error callback if error callback capable 5984 */ 5985 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5986 ddi_fm_handler_unregister(ixgbe->dip); 5987 5988 /* 5989 * Unregister from IO Fault Service 5990 */ 5991 ddi_fm_fini(ixgbe->dip); 5992 } 5993 } 5994 5995 void 5996 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 5997 { 5998 uint64_t ena; 5999 char buf[FM_MAX_CLASS]; 6000 6001 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6002 ena = fm_ena_generate(0, FM_ENA_FMT1); 6003 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 6004 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 6005 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6006 } 6007 } 6008 6009 static int 6010 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 6011 { 6012 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh; 6013 6014 mutex_enter(&rx_ring->rx_lock); 6015 rx_ring->ring_gen_num = mr_gen_num; 6016 mutex_exit(&rx_ring->rx_lock); 6017 return (0); 6018 } 6019 6020 /* 6021 * Get the global ring index by a ring index within a group. 6022 */ 6023 static int 6024 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex) 6025 { 6026 ixgbe_rx_ring_t *rx_ring; 6027 int i; 6028 6029 for (i = 0; i < ixgbe->num_rx_rings; i++) { 6030 rx_ring = &ixgbe->rx_rings[i]; 6031 if (rx_ring->group_index == gindex) 6032 rindex--; 6033 if (rindex < 0) 6034 return (i); 6035 } 6036 6037 return (-1); 6038 } 6039 6040 /* 6041 * Callback funtion for MAC layer to register all rings. 6042 */ 6043 /* ARGSUSED */ 6044 void 6045 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index, 6046 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 6047 { 6048 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6049 mac_intr_t *mintr = &infop->mri_intr; 6050 6051 switch (rtype) { 6052 case MAC_RING_TYPE_RX: { 6053 /* 6054 * 'index' is the ring index within the group. 6055 * Need to get the global ring index by searching in groups. 6056 */ 6057 int global_ring_index = ixgbe_get_rx_ring_index( 6058 ixgbe, group_index, ring_index); 6059 6060 ASSERT(global_ring_index >= 0); 6061 6062 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index]; 6063 rx_ring->ring_handle = rh; 6064 6065 infop->mri_driver = (mac_ring_driver_t)rx_ring; 6066 infop->mri_start = ixgbe_ring_start; 6067 infop->mri_stop = NULL; 6068 infop->mri_poll = ixgbe_ring_rx_poll; 6069 infop->mri_stat = ixgbe_rx_ring_stat; 6070 6071 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 6072 mintr->mi_enable = ixgbe_rx_ring_intr_enable; 6073 mintr->mi_disable = ixgbe_rx_ring_intr_disable; 6074 if (ixgbe->intr_type & 6075 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6076 mintr->mi_ddi_handle = 6077 ixgbe->htable[rx_ring->intr_vector]; 6078 } 6079 6080 break; 6081 } 6082 case MAC_RING_TYPE_TX: { 6083 ASSERT(group_index == -1); 6084 ASSERT(ring_index < ixgbe->num_tx_rings); 6085 6086 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index]; 6087 tx_ring->ring_handle = rh; 6088 6089 infop->mri_driver = (mac_ring_driver_t)tx_ring; 6090 infop->mri_start = NULL; 6091 infop->mri_stop = NULL; 6092 infop->mri_tx = ixgbe_ring_tx; 6093 infop->mri_stat = ixgbe_tx_ring_stat; 6094 if (ixgbe->intr_type & 6095 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6096 mintr->mi_ddi_handle = 6097 ixgbe->htable[tx_ring->intr_vector]; 6098 } 6099 break; 6100 } 6101 default: 6102 break; 6103 } 6104 } 6105 6106 /* 6107 * Callback funtion for MAC layer to register all groups. 6108 */ 6109 void 6110 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index, 6111 mac_group_info_t *infop, mac_group_handle_t gh) 6112 { 6113 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6114 6115 switch (rtype) { 6116 case MAC_RING_TYPE_RX: { 6117 ixgbe_rx_group_t *rx_group; 6118 6119 rx_group = &ixgbe->rx_groups[index]; 6120 rx_group->group_handle = gh; 6121 6122 infop->mgi_driver = (mac_group_driver_t)rx_group; 6123 infop->mgi_start = NULL; 6124 infop->mgi_stop = NULL; 6125 infop->mgi_addmac = ixgbe_addmac; 6126 infop->mgi_remmac = ixgbe_remmac; 6127 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups); 6128 6129 break; 6130 } 6131 case MAC_RING_TYPE_TX: 6132 break; 6133 default: 6134 break; 6135 } 6136 } 6137 6138 /* 6139 * Enable interrupt on the specificed rx ring. 6140 */ 6141 int 6142 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh) 6143 { 6144 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6145 ixgbe_t *ixgbe = rx_ring->ixgbe; 6146 int r_idx = rx_ring->index; 6147 int hw_r_idx = rx_ring->hw_index; 6148 int v_idx = rx_ring->intr_vector; 6149 6150 mutex_enter(&ixgbe->gen_lock); 6151 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6152 mutex_exit(&ixgbe->gen_lock); 6153 /* 6154 * Simply return 0. 6155 * Interrupts are being adjusted. ixgbe_intr_adjust() 6156 * will eventually re-enable the interrupt when it's 6157 * done with the adjustment. 6158 */ 6159 return (0); 6160 } 6161 6162 /* 6163 * To enable interrupt by setting the VAL bit of given interrupt 6164 * vector allocation register (IVAR). 6165 */ 6166 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0); 6167 6168 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 6169 6170 /* 6171 * Trigger a Rx interrupt on this ring 6172 */ 6173 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx)); 6174 IXGBE_WRITE_FLUSH(&ixgbe->hw); 6175 6176 mutex_exit(&ixgbe->gen_lock); 6177 6178 return (0); 6179 } 6180 6181 /* 6182 * Disable interrupt on the specificed rx ring. 6183 */ 6184 int 6185 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh) 6186 { 6187 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6188 ixgbe_t *ixgbe = rx_ring->ixgbe; 6189 int r_idx = rx_ring->index; 6190 int hw_r_idx = rx_ring->hw_index; 6191 int v_idx = rx_ring->intr_vector; 6192 6193 mutex_enter(&ixgbe->gen_lock); 6194 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6195 mutex_exit(&ixgbe->gen_lock); 6196 /* 6197 * Simply return 0. 6198 * In the rare case where an interrupt is being 6199 * disabled while interrupts are being adjusted, 6200 * we don't fail the operation. No interrupts will 6201 * be generated while they are adjusted, and 6202 * ixgbe_intr_adjust() will cause the interrupts 6203 * to be re-enabled once it completes. Note that 6204 * in this case, packets may be delivered to the 6205 * stack via interrupts before xgbe_rx_ring_intr_enable() 6206 * is called again. This is acceptable since interrupt 6207 * adjustment is infrequent, and the stack will be 6208 * able to handle these packets. 6209 */ 6210 return (0); 6211 } 6212 6213 /* 6214 * To disable interrupt by clearing the VAL bit of given interrupt 6215 * vector allocation register (IVAR). 6216 */ 6217 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0); 6218 6219 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx); 6220 6221 mutex_exit(&ixgbe->gen_lock); 6222 6223 return (0); 6224 } 6225 6226 /* 6227 * Add a mac address. 6228 */ 6229 static int 6230 ixgbe_addmac(void *arg, const uint8_t *mac_addr) 6231 { 6232 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6233 ixgbe_t *ixgbe = rx_group->ixgbe; 6234 struct ixgbe_hw *hw = &ixgbe->hw; 6235 int slot, i; 6236 6237 mutex_enter(&ixgbe->gen_lock); 6238 6239 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6240 mutex_exit(&ixgbe->gen_lock); 6241 return (ECANCELED); 6242 } 6243 6244 if (ixgbe->unicst_avail == 0) { 6245 /* no slots available */ 6246 mutex_exit(&ixgbe->gen_lock); 6247 return (ENOSPC); 6248 } 6249 6250 /* 6251 * The first ixgbe->num_rx_groups slots are reserved for each respective 6252 * group. The rest slots are shared by all groups. While adding a 6253 * MAC address, reserved slots are firstly checked then the shared 6254 * slots are searched. 6255 */ 6256 slot = -1; 6257 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) { 6258 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) { 6259 if (ixgbe->unicst_addr[i].mac.set == 0) { 6260 slot = i; 6261 break; 6262 } 6263 } 6264 } else { 6265 slot = rx_group->index; 6266 } 6267 6268 if (slot == -1) { 6269 /* no slots available */ 6270 mutex_exit(&ixgbe->gen_lock); 6271 return (ENOSPC); 6272 } 6273 6274 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6275 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr, 6276 rx_group->index, IXGBE_RAH_AV); 6277 ixgbe->unicst_addr[slot].mac.set = 1; 6278 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index; 6279 ixgbe->unicst_avail--; 6280 6281 mutex_exit(&ixgbe->gen_lock); 6282 6283 return (0); 6284 } 6285 6286 /* 6287 * Remove a mac address. 6288 */ 6289 static int 6290 ixgbe_remmac(void *arg, const uint8_t *mac_addr) 6291 { 6292 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6293 ixgbe_t *ixgbe = rx_group->ixgbe; 6294 struct ixgbe_hw *hw = &ixgbe->hw; 6295 int slot; 6296 6297 mutex_enter(&ixgbe->gen_lock); 6298 6299 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6300 mutex_exit(&ixgbe->gen_lock); 6301 return (ECANCELED); 6302 } 6303 6304 slot = ixgbe_unicst_find(ixgbe, mac_addr); 6305 if (slot == -1) { 6306 mutex_exit(&ixgbe->gen_lock); 6307 return (EINVAL); 6308 } 6309 6310 if (ixgbe->unicst_addr[slot].mac.set == 0) { 6311 mutex_exit(&ixgbe->gen_lock); 6312 return (EINVAL); 6313 } 6314 6315 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6316 (void) ixgbe_clear_rar(hw, slot); 6317 ixgbe->unicst_addr[slot].mac.set = 0; 6318 ixgbe->unicst_avail++; 6319 6320 mutex_exit(&ixgbe->gen_lock); 6321 6322 return (0); 6323 } 6324