1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 28 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved. 31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved. 32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved. 33 */ 34 35 #include "ixgbe_sw.h" 36 37 static char ixgbe_ident[] = "Intel 10Gb Ethernet"; 38 39 /* 40 * Local function protoypes 41 */ 42 static int ixgbe_register_mac(ixgbe_t *); 43 static int ixgbe_identify_hardware(ixgbe_t *); 44 static int ixgbe_regs_map(ixgbe_t *); 45 static void ixgbe_init_properties(ixgbe_t *); 46 static int ixgbe_init_driver_settings(ixgbe_t *); 47 static void ixgbe_init_locks(ixgbe_t *); 48 static void ixgbe_destroy_locks(ixgbe_t *); 49 static int ixgbe_init(ixgbe_t *); 50 static int ixgbe_chip_start(ixgbe_t *); 51 static void ixgbe_chip_stop(ixgbe_t *); 52 static int ixgbe_reset(ixgbe_t *); 53 static void ixgbe_tx_clean(ixgbe_t *); 54 static boolean_t ixgbe_tx_drain(ixgbe_t *); 55 static boolean_t ixgbe_rx_drain(ixgbe_t *); 56 static int ixgbe_alloc_rings(ixgbe_t *); 57 static void ixgbe_free_rings(ixgbe_t *); 58 static int ixgbe_alloc_rx_data(ixgbe_t *); 59 static void ixgbe_free_rx_data(ixgbe_t *); 60 static void ixgbe_setup_rings(ixgbe_t *); 61 static void ixgbe_setup_rx(ixgbe_t *); 62 static void ixgbe_setup_tx(ixgbe_t *); 63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 65 static void ixgbe_setup_rss(ixgbe_t *); 66 static void ixgbe_setup_vmdq(ixgbe_t *); 67 static void ixgbe_setup_vmdq_rss(ixgbe_t *); 68 static void ixgbe_setup_rss_table(ixgbe_t *); 69 static void ixgbe_init_unicst(ixgbe_t *); 70 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); 71 static void ixgbe_setup_multicst(ixgbe_t *); 72 static void ixgbe_get_hw_state(ixgbe_t *); 73 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); 74 static void ixgbe_get_conf(ixgbe_t *); 75 static void ixgbe_init_params(ixgbe_t *); 76 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 77 static void ixgbe_driver_link_check(ixgbe_t *); 78 static void ixgbe_sfp_check(void *); 79 static void ixgbe_overtemp_check(void *); 80 static void ixgbe_phy_check(void *); 81 static void ixgbe_link_timer(void *); 82 static void ixgbe_local_timer(void *); 83 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 84 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 85 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 86 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 87 static boolean_t is_valid_mac_addr(uint8_t *); 88 static boolean_t ixgbe_stall_check(ixgbe_t *); 89 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 90 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 91 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 92 static int ixgbe_alloc_intrs(ixgbe_t *); 93 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 94 static int ixgbe_add_intr_handlers(ixgbe_t *); 95 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 96 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 97 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t); 98 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t); 99 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t); 100 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index); 101 static int ixgbe_map_intrs_to_vectors(ixgbe_t *); 102 static void ixgbe_setup_adapter_vector(ixgbe_t *); 103 static void ixgbe_rem_intr_handlers(ixgbe_t *); 104 static void ixgbe_rem_intrs(ixgbe_t *); 105 static int ixgbe_enable_intrs(ixgbe_t *); 106 static int ixgbe_disable_intrs(ixgbe_t *); 107 static uint_t ixgbe_intr_legacy(void *, void *); 108 static uint_t ixgbe_intr_msi(void *, void *); 109 static uint_t ixgbe_intr_msix(void *, void *); 110 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 111 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 112 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); 113 static void ixgbe_get_driver_control(struct ixgbe_hw *); 114 static int ixgbe_addmac(void *, const uint8_t *); 115 static int ixgbe_remmac(void *, const uint8_t *); 116 static void ixgbe_release_driver_control(struct ixgbe_hw *); 117 118 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 119 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 120 static int ixgbe_resume(dev_info_t *); 121 static int ixgbe_suspend(dev_info_t *); 122 static int ixgbe_quiesce(dev_info_t *); 123 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 124 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 125 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *); 126 static int ixgbe_intr_cb_register(ixgbe_t *); 127 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int); 128 129 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 130 const void *impl_data); 131 static void ixgbe_fm_init(ixgbe_t *); 132 static void ixgbe_fm_fini(ixgbe_t *); 133 134 char *ixgbe_priv_props[] = { 135 "_tx_copy_thresh", 136 "_tx_recycle_thresh", 137 "_tx_overload_thresh", 138 "_tx_resched_thresh", 139 "_rx_copy_thresh", 140 "_rx_limit_per_intr", 141 "_intr_throttling", 142 "_adv_pause_cap", 143 "_adv_asym_pause_cap", 144 NULL 145 }; 146 147 #define IXGBE_MAX_PRIV_PROPS \ 148 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t)) 149 150 static struct cb_ops ixgbe_cb_ops = { 151 nulldev, /* cb_open */ 152 nulldev, /* cb_close */ 153 nodev, /* cb_strategy */ 154 nodev, /* cb_print */ 155 nodev, /* cb_dump */ 156 nodev, /* cb_read */ 157 nodev, /* cb_write */ 158 nodev, /* cb_ioctl */ 159 nodev, /* cb_devmap */ 160 nodev, /* cb_mmap */ 161 nodev, /* cb_segmap */ 162 nochpoll, /* cb_chpoll */ 163 ddi_prop_op, /* cb_prop_op */ 164 NULL, /* cb_stream */ 165 D_MP | D_HOTPLUG, /* cb_flag */ 166 CB_REV, /* cb_rev */ 167 nodev, /* cb_aread */ 168 nodev /* cb_awrite */ 169 }; 170 171 static struct dev_ops ixgbe_dev_ops = { 172 DEVO_REV, /* devo_rev */ 173 0, /* devo_refcnt */ 174 NULL, /* devo_getinfo */ 175 nulldev, /* devo_identify */ 176 nulldev, /* devo_probe */ 177 ixgbe_attach, /* devo_attach */ 178 ixgbe_detach, /* devo_detach */ 179 nodev, /* devo_reset */ 180 &ixgbe_cb_ops, /* devo_cb_ops */ 181 NULL, /* devo_bus_ops */ 182 ddi_power, /* devo_power */ 183 ixgbe_quiesce, /* devo_quiesce */ 184 }; 185 186 static struct modldrv ixgbe_modldrv = { 187 &mod_driverops, /* Type of module. This one is a driver */ 188 ixgbe_ident, /* Discription string */ 189 &ixgbe_dev_ops /* driver ops */ 190 }; 191 192 static struct modlinkage ixgbe_modlinkage = { 193 MODREV_1, &ixgbe_modldrv, NULL 194 }; 195 196 /* 197 * Access attributes for register mapping 198 */ 199 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 200 DDI_DEVICE_ATTR_V1, 201 DDI_STRUCTURE_LE_ACC, 202 DDI_STRICTORDER_ACC, 203 DDI_FLAGERR_ACC 204 }; 205 206 /* 207 * Loopback property 208 */ 209 static lb_property_t lb_normal = { 210 normal, "normal", IXGBE_LB_NONE 211 }; 212 213 static lb_property_t lb_mac = { 214 internal, "MAC", IXGBE_LB_INTERNAL_MAC 215 }; 216 217 static lb_property_t lb_external = { 218 external, "External", IXGBE_LB_EXTERNAL 219 }; 220 221 #define IXGBE_M_CALLBACK_FLAGS \ 222 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 223 224 static mac_callbacks_t ixgbe_m_callbacks = { 225 IXGBE_M_CALLBACK_FLAGS, 226 ixgbe_m_stat, 227 ixgbe_m_start, 228 ixgbe_m_stop, 229 ixgbe_m_promisc, 230 ixgbe_m_multicst, 231 NULL, 232 NULL, 233 NULL, 234 ixgbe_m_ioctl, 235 ixgbe_m_getcapab, 236 NULL, 237 NULL, 238 ixgbe_m_setprop, 239 ixgbe_m_getprop, 240 ixgbe_m_propinfo 241 }; 242 243 /* 244 * Initialize capabilities of each supported adapter type 245 */ 246 static adapter_info_t ixgbe_82598eb_cap = { 247 64, /* maximum number of rx queues */ 248 1, /* minimum number of rx queues */ 249 64, /* default number of rx queues */ 250 16, /* maximum number of rx groups */ 251 1, /* minimum number of rx groups */ 252 1, /* default number of rx groups */ 253 32, /* maximum number of tx queues */ 254 1, /* minimum number of tx queues */ 255 8, /* default number of tx queues */ 256 16366, /* maximum MTU size */ 257 0xFFFF, /* maximum interrupt throttle rate */ 258 0, /* minimum interrupt throttle rate */ 259 200, /* default interrupt throttle rate */ 260 18, /* maximum total msix vectors */ 261 16, /* maximum number of ring vectors */ 262 2, /* maximum number of other vectors */ 263 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 264 0, /* "other" interrupt types enable mask */ 265 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 266 | IXGBE_FLAG_RSS_CAPABLE 267 | IXGBE_FLAG_VMDQ_CAPABLE) 268 }; 269 270 static adapter_info_t ixgbe_82599eb_cap = { 271 128, /* maximum number of rx queues */ 272 1, /* minimum number of rx queues */ 273 128, /* default number of rx queues */ 274 64, /* maximum number of rx groups */ 275 1, /* minimum number of rx groups */ 276 1, /* default number of rx groups */ 277 128, /* maximum number of tx queues */ 278 1, /* minimum number of tx queues */ 279 8, /* default number of tx queues */ 280 15500, /* maximum MTU size */ 281 0xFF8, /* maximum interrupt throttle rate */ 282 0, /* minimum interrupt throttle rate */ 283 200, /* default interrupt throttle rate */ 284 64, /* maximum total msix vectors */ 285 16, /* maximum number of ring vectors */ 286 2, /* maximum number of other vectors */ 287 (IXGBE_EICR_LSC 288 | IXGBE_EICR_GPI_SDP1 289 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */ 290 291 (IXGBE_SDP1_GPIEN 292 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */ 293 294 (IXGBE_FLAG_DCA_CAPABLE 295 | IXGBE_FLAG_RSS_CAPABLE 296 | IXGBE_FLAG_VMDQ_CAPABLE 297 | IXGBE_FLAG_RSC_CAPABLE 298 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */ 299 }; 300 301 static adapter_info_t ixgbe_X540_cap = { 302 128, /* maximum number of rx queues */ 303 1, /* minimum number of rx queues */ 304 128, /* default number of rx queues */ 305 64, /* maximum number of rx groups */ 306 1, /* minimum number of rx groups */ 307 1, /* default number of rx groups */ 308 128, /* maximum number of tx queues */ 309 1, /* minimum number of tx queues */ 310 8, /* default number of tx queues */ 311 15500, /* maximum MTU size */ 312 0xFF8, /* maximum interrupt throttle rate */ 313 0, /* minimum interrupt throttle rate */ 314 200, /* default interrupt throttle rate */ 315 64, /* maximum total msix vectors */ 316 16, /* maximum number of ring vectors */ 317 2, /* maximum number of other vectors */ 318 (IXGBE_EICR_LSC 319 | IXGBE_EICR_GPI_SDP1_X540 320 | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */ 321 322 (IXGBE_SDP1_GPIEN_X540 323 | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */ 324 325 (IXGBE_FLAG_DCA_CAPABLE 326 | IXGBE_FLAG_RSS_CAPABLE 327 | IXGBE_FLAG_VMDQ_CAPABLE 328 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 329 }; 330 331 static adapter_info_t ixgbe_X550_cap = { 332 128, /* maximum number of rx queues */ 333 1, /* minimum number of rx queues */ 334 128, /* default number of rx queues */ 335 64, /* maximum number of rx groups */ 336 1, /* minimum number of rx groups */ 337 1, /* default number of rx groups */ 338 128, /* maximum number of tx queues */ 339 1, /* minimum number of tx queues */ 340 8, /* default number of tx queues */ 341 15500, /* maximum MTU size */ 342 0xFF8, /* maximum interrupt throttle rate */ 343 0, /* minimum interrupt throttle rate */ 344 0x200, /* default interrupt throttle rate */ 345 64, /* maximum total msix vectors */ 346 16, /* maximum number of ring vectors */ 347 2, /* maximum number of other vectors */ 348 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 349 0, /* "other" interrupt types enable mask */ 350 (IXGBE_FLAG_RSS_CAPABLE 351 | IXGBE_FLAG_VMDQ_CAPABLE 352 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 353 }; 354 355 /* 356 * Module Initialization Functions. 357 */ 358 359 int 360 _init(void) 361 { 362 int status; 363 364 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 365 366 status = mod_install(&ixgbe_modlinkage); 367 368 if (status != DDI_SUCCESS) { 369 mac_fini_ops(&ixgbe_dev_ops); 370 } 371 372 return (status); 373 } 374 375 int 376 _fini(void) 377 { 378 int status; 379 380 status = mod_remove(&ixgbe_modlinkage); 381 382 if (status == DDI_SUCCESS) { 383 mac_fini_ops(&ixgbe_dev_ops); 384 } 385 386 return (status); 387 } 388 389 int 390 _info(struct modinfo *modinfop) 391 { 392 int status; 393 394 status = mod_info(&ixgbe_modlinkage, modinfop); 395 396 return (status); 397 } 398 399 /* 400 * ixgbe_attach - Driver attach. 401 * 402 * This function is the device specific initialization entry 403 * point. This entry point is required and must be written. 404 * The DDI_ATTACH command must be provided in the attach entry 405 * point. When attach() is called with cmd set to DDI_ATTACH, 406 * all normal kernel services (such as kmem_alloc(9F)) are 407 * available for use by the driver. 408 * 409 * The attach() function will be called once for each instance 410 * of the device on the system with cmd set to DDI_ATTACH. 411 * Until attach() succeeds, the only driver entry points which 412 * may be called are open(9E) and getinfo(9E). 413 */ 414 static int 415 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 416 { 417 ixgbe_t *ixgbe; 418 struct ixgbe_osdep *osdep; 419 struct ixgbe_hw *hw; 420 int instance; 421 char taskqname[32]; 422 423 /* 424 * Check the command and perform corresponding operations 425 */ 426 switch (cmd) { 427 default: 428 return (DDI_FAILURE); 429 430 case DDI_RESUME: 431 return (ixgbe_resume(devinfo)); 432 433 case DDI_ATTACH: 434 break; 435 } 436 437 /* Get the device instance */ 438 instance = ddi_get_instance(devinfo); 439 440 /* Allocate memory for the instance data structure */ 441 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 442 443 ixgbe->dip = devinfo; 444 ixgbe->instance = instance; 445 446 hw = &ixgbe->hw; 447 osdep = &ixgbe->osdep; 448 hw->back = osdep; 449 osdep->ixgbe = ixgbe; 450 451 /* Attach the instance pointer to the dev_info data structure */ 452 ddi_set_driver_private(devinfo, ixgbe); 453 454 /* 455 * Initialize for FMA support 456 */ 457 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 458 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 459 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 460 ixgbe_fm_init(ixgbe); 461 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 462 463 /* 464 * Map PCI config space registers 465 */ 466 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 467 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 468 goto attach_fail; 469 } 470 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 471 472 /* 473 * Identify the chipset family 474 */ 475 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 476 ixgbe_error(ixgbe, "Failed to identify hardware"); 477 goto attach_fail; 478 } 479 480 /* 481 * Map device registers 482 */ 483 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 484 ixgbe_error(ixgbe, "Failed to map device registers"); 485 goto attach_fail; 486 } 487 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 488 489 /* 490 * Initialize driver parameters 491 */ 492 ixgbe_init_properties(ixgbe); 493 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 494 495 /* 496 * Register interrupt callback 497 */ 498 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) { 499 ixgbe_error(ixgbe, "Failed to register interrupt callback"); 500 goto attach_fail; 501 } 502 503 /* 504 * Allocate interrupts 505 */ 506 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 507 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 508 goto attach_fail; 509 } 510 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 511 512 /* 513 * Allocate rx/tx rings based on the ring numbers. 514 * The actual numbers of rx/tx rings are decided by the number of 515 * allocated interrupt vectors, so we should allocate the rings after 516 * interrupts are allocated. 517 */ 518 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 519 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 520 goto attach_fail; 521 } 522 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 523 524 /* 525 * Map rings to interrupt vectors 526 */ 527 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 528 ixgbe_error(ixgbe, "Failed to map interrupts to vectors"); 529 goto attach_fail; 530 } 531 532 /* 533 * Add interrupt handlers 534 */ 535 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 536 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 537 goto attach_fail; 538 } 539 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 540 541 /* 542 * Create a taskq for sfp-change 543 */ 544 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance); 545 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname, 546 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 547 ixgbe_error(ixgbe, "sfp_taskq create failed"); 548 goto attach_fail; 549 } 550 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ; 551 552 /* 553 * Create a taskq for over-temp 554 */ 555 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance); 556 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname, 557 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 558 ixgbe_error(ixgbe, "overtemp_taskq create failed"); 559 goto attach_fail; 560 } 561 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ; 562 563 /* 564 * Create a taskq for processing external PHY interrupts 565 */ 566 (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance); 567 if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname, 568 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 569 ixgbe_error(ixgbe, "phy_taskq create failed"); 570 goto attach_fail; 571 } 572 ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ; 573 574 /* 575 * Initialize driver parameters 576 */ 577 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 578 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 579 goto attach_fail; 580 } 581 582 /* 583 * Initialize mutexes for this device. 584 * Do this before enabling the interrupt handler and 585 * register the softint to avoid the condition where 586 * interrupt handler can try using uninitialized mutex. 587 */ 588 ixgbe_init_locks(ixgbe); 589 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 590 591 /* 592 * Initialize chipset hardware 593 */ 594 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 595 ixgbe_error(ixgbe, "Failed to initialize adapter"); 596 goto attach_fail; 597 } 598 ixgbe->link_check_complete = B_FALSE; 599 ixgbe->link_check_hrtime = gethrtime() + 600 (IXGBE_LINK_UP_TIME * 100000000ULL); 601 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 602 603 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 604 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 605 goto attach_fail; 606 } 607 608 /* 609 * Initialize adapter capabilities 610 */ 611 ixgbe_init_params(ixgbe); 612 613 /* 614 * Initialize statistics 615 */ 616 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 617 ixgbe_error(ixgbe, "Failed to initialize statistics"); 618 goto attach_fail; 619 } 620 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 621 622 /* 623 * Register the driver to the MAC 624 */ 625 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 626 ixgbe_error(ixgbe, "Failed to register MAC"); 627 goto attach_fail; 628 } 629 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 630 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 631 632 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe, 633 IXGBE_CYCLIC_PERIOD, DDI_IPL_0); 634 if (ixgbe->periodic_id == 0) { 635 ixgbe_error(ixgbe, "Failed to add the link check timer"); 636 goto attach_fail; 637 } 638 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER; 639 640 /* 641 * Now that mutex locks are initialized, and the chip is also 642 * initialized, enable interrupts. 643 */ 644 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 645 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 646 goto attach_fail; 647 } 648 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 649 650 ixgbe_log(ixgbe, "%s", ixgbe_ident); 651 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED); 652 653 return (DDI_SUCCESS); 654 655 attach_fail: 656 ixgbe_unconfigure(devinfo, ixgbe); 657 return (DDI_FAILURE); 658 } 659 660 /* 661 * ixgbe_detach - Driver detach. 662 * 663 * The detach() function is the complement of the attach routine. 664 * If cmd is set to DDI_DETACH, detach() is used to remove the 665 * state associated with a given instance of a device node 666 * prior to the removal of that instance from the system. 667 * 668 * The detach() function will be called once for each instance 669 * of the device for which there has been a successful attach() 670 * once there are no longer any opens on the device. 671 * 672 * Interrupts routine are disabled, All memory allocated by this 673 * driver are freed. 674 */ 675 static int 676 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 677 { 678 ixgbe_t *ixgbe; 679 680 /* 681 * Check detach command 682 */ 683 switch (cmd) { 684 default: 685 return (DDI_FAILURE); 686 687 case DDI_SUSPEND: 688 return (ixgbe_suspend(devinfo)); 689 690 case DDI_DETACH: 691 break; 692 } 693 694 /* 695 * Get the pointer to the driver private data structure 696 */ 697 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 698 if (ixgbe == NULL) 699 return (DDI_FAILURE); 700 701 /* 702 * If the device is still running, it needs to be stopped first. 703 * This check is necessary because under some specific circumstances, 704 * the detach routine can be called without stopping the interface 705 * first. 706 */ 707 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 708 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 709 mutex_enter(&ixgbe->gen_lock); 710 ixgbe_stop(ixgbe, B_TRUE); 711 mutex_exit(&ixgbe->gen_lock); 712 /* Disable and stop the watchdog timer */ 713 ixgbe_disable_watchdog_timer(ixgbe); 714 } 715 716 /* 717 * Check if there are still rx buffers held by the upper layer. 718 * If so, fail the detach. 719 */ 720 if (!ixgbe_rx_drain(ixgbe)) 721 return (DDI_FAILURE); 722 723 /* 724 * Do the remaining unconfigure routines 725 */ 726 ixgbe_unconfigure(devinfo, ixgbe); 727 728 return (DDI_SUCCESS); 729 } 730 731 /* 732 * quiesce(9E) entry point. 733 * 734 * This function is called when the system is single-threaded at high 735 * PIL with preemption disabled. Therefore, this function must not be 736 * blocked. 737 * 738 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 739 * DDI_FAILURE indicates an error condition and should almost never happen. 740 */ 741 static int 742 ixgbe_quiesce(dev_info_t *devinfo) 743 { 744 ixgbe_t *ixgbe; 745 struct ixgbe_hw *hw; 746 747 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 748 749 if (ixgbe == NULL) 750 return (DDI_FAILURE); 751 752 hw = &ixgbe->hw; 753 754 /* 755 * Disable the adapter interrupts 756 */ 757 ixgbe_disable_adapter_interrupts(ixgbe); 758 759 /* 760 * Tell firmware driver is no longer in control 761 */ 762 ixgbe_release_driver_control(hw); 763 764 /* 765 * Reset the chipset 766 */ 767 (void) ixgbe_reset_hw(hw); 768 769 /* 770 * Reset PHY 771 */ 772 (void) ixgbe_reset_phy(hw); 773 774 return (DDI_SUCCESS); 775 } 776 777 static void 778 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 779 { 780 /* 781 * Disable interrupt 782 */ 783 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 784 (void) ixgbe_disable_intrs(ixgbe); 785 } 786 787 /* 788 * remove the link check timer 789 */ 790 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) { 791 if (ixgbe->periodic_id != NULL) { 792 ddi_periodic_delete(ixgbe->periodic_id); 793 ixgbe->periodic_id = NULL; 794 } 795 } 796 797 /* 798 * Unregister MAC 799 */ 800 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 801 (void) mac_unregister(ixgbe->mac_hdl); 802 } 803 804 /* 805 * Free statistics 806 */ 807 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 808 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 809 } 810 811 /* 812 * Remove interrupt handlers 813 */ 814 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 815 ixgbe_rem_intr_handlers(ixgbe); 816 } 817 818 /* 819 * Remove taskq for sfp-status-change 820 */ 821 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) { 822 ddi_taskq_destroy(ixgbe->sfp_taskq); 823 } 824 825 /* 826 * Remove taskq for over-temp 827 */ 828 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) { 829 ddi_taskq_destroy(ixgbe->overtemp_taskq); 830 } 831 832 /* 833 * Remove taskq for external PHYs 834 */ 835 if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) { 836 ddi_taskq_destroy(ixgbe->phy_taskq); 837 } 838 839 /* 840 * Remove interrupts 841 */ 842 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 843 ixgbe_rem_intrs(ixgbe); 844 } 845 846 /* 847 * Unregister interrupt callback handler 848 */ 849 if (ixgbe->cb_hdl != NULL) { 850 (void) ddi_cb_unregister(ixgbe->cb_hdl); 851 } 852 853 /* 854 * Remove driver properties 855 */ 856 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 857 (void) ddi_prop_remove_all(devinfo); 858 } 859 860 /* 861 * Stop the chipset 862 */ 863 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 864 mutex_enter(&ixgbe->gen_lock); 865 ixgbe_chip_stop(ixgbe); 866 mutex_exit(&ixgbe->gen_lock); 867 } 868 869 /* 870 * Free register handle 871 */ 872 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 873 if (ixgbe->osdep.reg_handle != NULL) 874 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 875 } 876 877 /* 878 * Free PCI config handle 879 */ 880 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 881 if (ixgbe->osdep.cfg_handle != NULL) 882 pci_config_teardown(&ixgbe->osdep.cfg_handle); 883 } 884 885 /* 886 * Free locks 887 */ 888 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 889 ixgbe_destroy_locks(ixgbe); 890 } 891 892 /* 893 * Free the rx/tx rings 894 */ 895 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 896 ixgbe_free_rings(ixgbe); 897 } 898 899 /* 900 * Unregister FMA capabilities 901 */ 902 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 903 ixgbe_fm_fini(ixgbe); 904 } 905 906 /* 907 * Free the driver data structure 908 */ 909 kmem_free(ixgbe, sizeof (ixgbe_t)); 910 911 ddi_set_driver_private(devinfo, NULL); 912 } 913 914 /* 915 * ixgbe_register_mac - Register the driver and its function pointers with 916 * the GLD interface. 917 */ 918 static int 919 ixgbe_register_mac(ixgbe_t *ixgbe) 920 { 921 struct ixgbe_hw *hw = &ixgbe->hw; 922 mac_register_t *mac; 923 int status; 924 925 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 926 return (IXGBE_FAILURE); 927 928 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 929 mac->m_driver = ixgbe; 930 mac->m_dip = ixgbe->dip; 931 mac->m_src_addr = hw->mac.addr; 932 mac->m_callbacks = &ixgbe_m_callbacks; 933 mac->m_min_sdu = 0; 934 mac->m_max_sdu = ixgbe->default_mtu; 935 mac->m_margin = VLAN_TAGSZ; 936 mac->m_priv_props = ixgbe_priv_props; 937 mac->m_v12n = MAC_VIRT_LEVEL1; 938 939 status = mac_register(mac, &ixgbe->mac_hdl); 940 941 mac_free(mac); 942 943 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 944 } 945 946 /* 947 * ixgbe_identify_hardware - Identify the type of the chipset. 948 */ 949 static int 950 ixgbe_identify_hardware(ixgbe_t *ixgbe) 951 { 952 struct ixgbe_hw *hw = &ixgbe->hw; 953 struct ixgbe_osdep *osdep = &ixgbe->osdep; 954 955 /* 956 * Get the device id 957 */ 958 hw->vendor_id = 959 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 960 hw->device_id = 961 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 962 hw->revision_id = 963 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 964 hw->subsystem_device_id = 965 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 966 hw->subsystem_vendor_id = 967 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 968 969 /* 970 * Set the mac type of the adapter based on the device id 971 */ 972 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) { 973 return (IXGBE_FAILURE); 974 } 975 976 /* 977 * Install adapter capabilities 978 */ 979 switch (hw->mac.type) { 980 case ixgbe_mac_82598EB: 981 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n"); 982 ixgbe->capab = &ixgbe_82598eb_cap; 983 984 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { 985 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 986 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1; 987 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN; 988 } 989 break; 990 991 case ixgbe_mac_82599EB: 992 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n"); 993 ixgbe->capab = &ixgbe_82599eb_cap; 994 995 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) { 996 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE; 997 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0; 998 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN; 999 } 1000 break; 1001 1002 case ixgbe_mac_X540: 1003 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n"); 1004 ixgbe->capab = &ixgbe_X540_cap; 1005 /* 1006 * For now, X540 is all set in its capab structure. 1007 * As other X540 variants show up, things can change here. 1008 */ 1009 break; 1010 1011 case ixgbe_mac_X550: 1012 case ixgbe_mac_X550EM_x: 1013 IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n"); 1014 ixgbe->capab = &ixgbe_X550_cap; 1015 1016 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) 1017 ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE; 1018 1019 /* 1020 * Link detection on X552 SFP+ and X552/X557-AT 1021 */ 1022 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 1023 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 1024 ixgbe->capab->other_intr |= 1025 IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 1026 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540; 1027 } 1028 break; 1029 1030 default: 1031 IXGBE_DEBUGLOG_1(ixgbe, 1032 "adapter not supported in ixgbe_identify_hardware(): %d\n", 1033 hw->mac.type); 1034 return (IXGBE_FAILURE); 1035 } 1036 1037 return (IXGBE_SUCCESS); 1038 } 1039 1040 /* 1041 * ixgbe_regs_map - Map the device registers. 1042 * 1043 */ 1044 static int 1045 ixgbe_regs_map(ixgbe_t *ixgbe) 1046 { 1047 dev_info_t *devinfo = ixgbe->dip; 1048 struct ixgbe_hw *hw = &ixgbe->hw; 1049 struct ixgbe_osdep *osdep = &ixgbe->osdep; 1050 off_t mem_size; 1051 1052 /* 1053 * First get the size of device registers to be mapped. 1054 */ 1055 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size) 1056 != DDI_SUCCESS) { 1057 return (IXGBE_FAILURE); 1058 } 1059 1060 /* 1061 * Call ddi_regs_map_setup() to map registers 1062 */ 1063 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET, 1064 (caddr_t *)&hw->hw_addr, 0, 1065 mem_size, &ixgbe_regs_acc_attr, 1066 &osdep->reg_handle)) != DDI_SUCCESS) { 1067 return (IXGBE_FAILURE); 1068 } 1069 1070 return (IXGBE_SUCCESS); 1071 } 1072 1073 /* 1074 * ixgbe_init_properties - Initialize driver properties. 1075 */ 1076 static void 1077 ixgbe_init_properties(ixgbe_t *ixgbe) 1078 { 1079 /* 1080 * Get conf file properties, including link settings 1081 * jumbo frames, ring number, descriptor number, etc. 1082 */ 1083 ixgbe_get_conf(ixgbe); 1084 } 1085 1086 /* 1087 * ixgbe_init_driver_settings - Initialize driver settings. 1088 * 1089 * The settings include hardware function pointers, bus information, 1090 * rx/tx rings settings, link state, and any other parameters that 1091 * need to be setup during driver initialization. 1092 */ 1093 static int 1094 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 1095 { 1096 struct ixgbe_hw *hw = &ixgbe->hw; 1097 dev_info_t *devinfo = ixgbe->dip; 1098 ixgbe_rx_ring_t *rx_ring; 1099 ixgbe_rx_group_t *rx_group; 1100 ixgbe_tx_ring_t *tx_ring; 1101 uint32_t rx_size; 1102 uint32_t tx_size; 1103 uint32_t ring_per_group; 1104 int i; 1105 1106 /* 1107 * Initialize chipset specific hardware function pointers 1108 */ 1109 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 1110 return (IXGBE_FAILURE); 1111 } 1112 1113 /* 1114 * Get the system page size 1115 */ 1116 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1); 1117 1118 /* 1119 * Set rx buffer size 1120 * 1121 * The IP header alignment room is counted in the calculation. 1122 * The rx buffer size is in unit of 1K that is required by the 1123 * chipset hardware. 1124 */ 1125 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 1126 ixgbe->rx_buf_size = ((rx_size >> 10) + 1127 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1128 1129 /* 1130 * Set tx buffer size 1131 */ 1132 tx_size = ixgbe->max_frame_size; 1133 ixgbe->tx_buf_size = ((tx_size >> 10) + 1134 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1135 1136 /* 1137 * Initialize rx/tx rings/groups parameters 1138 */ 1139 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 1140 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1141 rx_ring = &ixgbe->rx_rings[i]; 1142 rx_ring->index = i; 1143 rx_ring->ixgbe = ixgbe; 1144 rx_ring->group_index = i / ring_per_group; 1145 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i); 1146 } 1147 1148 for (i = 0; i < ixgbe->num_rx_groups; i++) { 1149 rx_group = &ixgbe->rx_groups[i]; 1150 rx_group->index = i; 1151 rx_group->ixgbe = ixgbe; 1152 } 1153 1154 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1155 tx_ring = &ixgbe->tx_rings[i]; 1156 tx_ring->index = i; 1157 tx_ring->ixgbe = ixgbe; 1158 if (ixgbe->tx_head_wb_enable) 1159 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 1160 else 1161 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 1162 1163 tx_ring->ring_size = ixgbe->tx_ring_size; 1164 tx_ring->free_list_size = ixgbe->tx_ring_size + 1165 (ixgbe->tx_ring_size >> 1); 1166 } 1167 1168 /* 1169 * Initialize values of interrupt throttling rate 1170 */ 1171 for (i = 1; i < MAX_INTR_VECTOR; i++) 1172 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 1173 1174 /* 1175 * The initial link state should be "unknown" 1176 */ 1177 ixgbe->link_state = LINK_STATE_UNKNOWN; 1178 1179 return (IXGBE_SUCCESS); 1180 } 1181 1182 /* 1183 * ixgbe_init_locks - Initialize locks. 1184 */ 1185 static void 1186 ixgbe_init_locks(ixgbe_t *ixgbe) 1187 { 1188 ixgbe_rx_ring_t *rx_ring; 1189 ixgbe_tx_ring_t *tx_ring; 1190 int i; 1191 1192 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1193 rx_ring = &ixgbe->rx_rings[i]; 1194 mutex_init(&rx_ring->rx_lock, NULL, 1195 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1196 } 1197 1198 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1199 tx_ring = &ixgbe->tx_rings[i]; 1200 mutex_init(&tx_ring->tx_lock, NULL, 1201 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1202 mutex_init(&tx_ring->recycle_lock, NULL, 1203 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1204 mutex_init(&tx_ring->tcb_head_lock, NULL, 1205 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1206 mutex_init(&tx_ring->tcb_tail_lock, NULL, 1207 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1208 } 1209 1210 mutex_init(&ixgbe->gen_lock, NULL, 1211 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1212 1213 mutex_init(&ixgbe->watchdog_lock, NULL, 1214 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1215 } 1216 1217 /* 1218 * ixgbe_destroy_locks - Destroy locks. 1219 */ 1220 static void 1221 ixgbe_destroy_locks(ixgbe_t *ixgbe) 1222 { 1223 ixgbe_rx_ring_t *rx_ring; 1224 ixgbe_tx_ring_t *tx_ring; 1225 int i; 1226 1227 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1228 rx_ring = &ixgbe->rx_rings[i]; 1229 mutex_destroy(&rx_ring->rx_lock); 1230 } 1231 1232 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1233 tx_ring = &ixgbe->tx_rings[i]; 1234 mutex_destroy(&tx_ring->tx_lock); 1235 mutex_destroy(&tx_ring->recycle_lock); 1236 mutex_destroy(&tx_ring->tcb_head_lock); 1237 mutex_destroy(&tx_ring->tcb_tail_lock); 1238 } 1239 1240 mutex_destroy(&ixgbe->gen_lock); 1241 mutex_destroy(&ixgbe->watchdog_lock); 1242 } 1243 1244 static int 1245 ixgbe_resume(dev_info_t *devinfo) 1246 { 1247 ixgbe_t *ixgbe; 1248 int i; 1249 1250 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1251 if (ixgbe == NULL) 1252 return (DDI_FAILURE); 1253 1254 mutex_enter(&ixgbe->gen_lock); 1255 1256 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1257 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1258 mutex_exit(&ixgbe->gen_lock); 1259 return (DDI_FAILURE); 1260 } 1261 1262 /* 1263 * Enable and start the watchdog timer 1264 */ 1265 ixgbe_enable_watchdog_timer(ixgbe); 1266 } 1267 1268 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED); 1269 1270 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1271 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1272 mac_tx_ring_update(ixgbe->mac_hdl, 1273 ixgbe->tx_rings[i].ring_handle); 1274 } 1275 } 1276 1277 mutex_exit(&ixgbe->gen_lock); 1278 1279 return (DDI_SUCCESS); 1280 } 1281 1282 static int 1283 ixgbe_suspend(dev_info_t *devinfo) 1284 { 1285 ixgbe_t *ixgbe; 1286 1287 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1288 if (ixgbe == NULL) 1289 return (DDI_FAILURE); 1290 1291 mutex_enter(&ixgbe->gen_lock); 1292 1293 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED); 1294 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1295 mutex_exit(&ixgbe->gen_lock); 1296 return (DDI_SUCCESS); 1297 } 1298 ixgbe_stop(ixgbe, B_FALSE); 1299 1300 mutex_exit(&ixgbe->gen_lock); 1301 1302 /* 1303 * Disable and stop the watchdog timer 1304 */ 1305 ixgbe_disable_watchdog_timer(ixgbe); 1306 1307 return (DDI_SUCCESS); 1308 } 1309 1310 /* 1311 * ixgbe_init - Initialize the device. 1312 */ 1313 static int 1314 ixgbe_init(ixgbe_t *ixgbe) 1315 { 1316 struct ixgbe_hw *hw = &ixgbe->hw; 1317 u8 pbanum[IXGBE_PBANUM_LENGTH]; 1318 int rv; 1319 1320 mutex_enter(&ixgbe->gen_lock); 1321 1322 /* 1323 * Configure/Initialize hardware 1324 */ 1325 rv = ixgbe_init_hw(hw); 1326 if (rv != IXGBE_SUCCESS) { 1327 switch (rv) { 1328 1329 /* 1330 * The first three errors are not prohibitive to us progressing 1331 * further, and are maily advisory in nature. In the case of a 1332 * SFP module not being present or not deemed supported by the 1333 * common code, we adivse the operator of this fact but carry on 1334 * instead of failing hard, as SFPs can be inserted or replaced 1335 * while the driver is running. In the case of a unknown error, 1336 * we fail-hard, logging the reason and emitting a FMA event. 1337 */ 1338 case IXGBE_ERR_EEPROM_VERSION: 1339 ixgbe_error(ixgbe, 1340 "This Intel 10Gb Ethernet device is pre-release and" 1341 " contains outdated firmware. Please contact your" 1342 " hardware vendor for a replacement."); 1343 break; 1344 case IXGBE_ERR_SFP_NOT_PRESENT: 1345 ixgbe_error(ixgbe, 1346 "No SFP+ module detected on this interface. Please " 1347 "install a supported SFP+ module for this " 1348 "interface to become operational."); 1349 break; 1350 case IXGBE_ERR_SFP_NOT_SUPPORTED: 1351 ixgbe_error(ixgbe, 1352 "Unsupported SFP+ module detected. Please replace " 1353 "it with a supported SFP+ module per Intel " 1354 "documentation, or bypass this check with " 1355 "allow_unsupported_sfp=1 in ixgbe.conf."); 1356 break; 1357 default: 1358 ixgbe_error(ixgbe, 1359 "Failed to initialize hardware. ixgbe_init_hw " 1360 "returned %d", rv); 1361 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1362 goto init_fail; 1363 } 1364 } 1365 1366 /* 1367 * Need to init eeprom before validating the checksum. 1368 */ 1369 if (ixgbe_init_eeprom_params(hw) < 0) { 1370 ixgbe_error(ixgbe, 1371 "Unable to intitialize the eeprom interface."); 1372 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1373 goto init_fail; 1374 } 1375 1376 /* 1377 * NVM validation 1378 */ 1379 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1380 /* 1381 * Some PCI-E parts fail the first check due to 1382 * the link being in sleep state. Call it again, 1383 * if it fails a second time it's a real issue. 1384 */ 1385 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1386 ixgbe_error(ixgbe, 1387 "Invalid NVM checksum. Please contact " 1388 "the vendor to update the NVM."); 1389 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1390 goto init_fail; 1391 } 1392 } 1393 1394 /* 1395 * Setup default flow control thresholds - enable/disable 1396 * & flow control type is controlled by ixgbe.conf 1397 */ 1398 hw->fc.high_water[0] = DEFAULT_FCRTH; 1399 hw->fc.low_water[0] = DEFAULT_FCRTL; 1400 hw->fc.pause_time = DEFAULT_FCPAUSE; 1401 hw->fc.send_xon = B_TRUE; 1402 1403 /* 1404 * Initialize flow control 1405 */ 1406 (void) ixgbe_start_hw(hw); 1407 1408 /* 1409 * Initialize link settings 1410 */ 1411 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1412 1413 /* 1414 * Initialize the chipset hardware 1415 */ 1416 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1417 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1418 goto init_fail; 1419 } 1420 1421 /* 1422 * Read identifying information and place in devinfo. 1423 */ 1424 pbanum[0] = '\0'; 1425 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum)); 1426 if (*pbanum != '\0') { 1427 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip, 1428 "printed-board-assembly", (char *)pbanum); 1429 } 1430 1431 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1432 goto init_fail; 1433 } 1434 1435 mutex_exit(&ixgbe->gen_lock); 1436 return (IXGBE_SUCCESS); 1437 1438 init_fail: 1439 /* 1440 * Reset PHY 1441 */ 1442 (void) ixgbe_reset_phy(hw); 1443 1444 mutex_exit(&ixgbe->gen_lock); 1445 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1446 return (IXGBE_FAILURE); 1447 } 1448 1449 /* 1450 * ixgbe_chip_start - Initialize and start the chipset hardware. 1451 */ 1452 static int 1453 ixgbe_chip_start(ixgbe_t *ixgbe) 1454 { 1455 struct ixgbe_hw *hw = &ixgbe->hw; 1456 int i; 1457 1458 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1459 1460 /* 1461 * Get the mac address 1462 * This function should handle SPARC case correctly. 1463 */ 1464 if (!ixgbe_find_mac_address(ixgbe)) { 1465 ixgbe_error(ixgbe, "Failed to get the mac address"); 1466 return (IXGBE_FAILURE); 1467 } 1468 1469 /* 1470 * Validate the mac address 1471 */ 1472 (void) ixgbe_init_rx_addrs(hw); 1473 if (!is_valid_mac_addr(hw->mac.addr)) { 1474 ixgbe_error(ixgbe, "Invalid mac address"); 1475 return (IXGBE_FAILURE); 1476 } 1477 1478 /* 1479 * Re-enable relaxed ordering for performance. It is disabled 1480 * by default in the hardware init. 1481 */ 1482 if (ixgbe->relax_order_enable == B_TRUE) 1483 ixgbe_enable_relaxed_ordering(hw); 1484 1485 /* 1486 * Setup adapter interrupt vectors 1487 */ 1488 ixgbe_setup_adapter_vector(ixgbe); 1489 1490 /* 1491 * Initialize unicast addresses. 1492 */ 1493 ixgbe_init_unicst(ixgbe); 1494 1495 /* 1496 * Setup and initialize the mctable structures. 1497 */ 1498 ixgbe_setup_multicst(ixgbe); 1499 1500 /* 1501 * Set interrupt throttling rate 1502 */ 1503 for (i = 0; i < ixgbe->intr_cnt; i++) { 1504 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1505 } 1506 1507 /* 1508 * Disable Wake-on-LAN 1509 */ 1510 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 1511 1512 /* 1513 * Some adapters offer Energy Efficient Ethernet (EEE) support. 1514 * Due to issues with EEE in e1000g/igb, we disable this by default 1515 * as a precautionary measure. 1516 * 1517 * Currently, the only known adapter which supports EEE in the ixgbe 1518 * line is 8086,15AB (IXGBE_DEV_ID_X550EM_X_KR), and only after the 1519 * first revision of it, as well as any X550 with MAC type 6 (non-EM) 1520 */ 1521 (void) ixgbe_setup_eee(hw, B_FALSE); 1522 1523 /* 1524 * Turn on any present SFP Tx laser 1525 */ 1526 ixgbe_enable_tx_laser(hw); 1527 1528 /* 1529 * Power on the PHY 1530 */ 1531 (void) ixgbe_set_phy_power(hw, B_TRUE); 1532 1533 /* 1534 * Save the state of the PHY 1535 */ 1536 ixgbe_get_hw_state(ixgbe); 1537 1538 /* 1539 * Make sure driver has control 1540 */ 1541 ixgbe_get_driver_control(hw); 1542 1543 return (IXGBE_SUCCESS); 1544 } 1545 1546 /* 1547 * ixgbe_chip_stop - Stop the chipset hardware 1548 */ 1549 static void 1550 ixgbe_chip_stop(ixgbe_t *ixgbe) 1551 { 1552 struct ixgbe_hw *hw = &ixgbe->hw; 1553 int rv; 1554 1555 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1556 1557 /* 1558 * Stop interupt generation and disable Tx unit 1559 */ 1560 hw->adapter_stopped = B_FALSE; 1561 (void) ixgbe_stop_adapter(hw); 1562 1563 /* 1564 * Reset the chipset 1565 */ 1566 (void) ixgbe_reset_hw(hw); 1567 1568 /* 1569 * Reset PHY 1570 */ 1571 (void) ixgbe_reset_phy(hw); 1572 1573 /* 1574 * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting 1575 * the PHY while doing so. Else, just power down the PHY. 1576 */ 1577 if (hw->phy.ops.enter_lplu != NULL) { 1578 hw->phy.reset_disable = B_TRUE; 1579 rv = hw->phy.ops.enter_lplu(hw); 1580 if (rv != IXGBE_SUCCESS) 1581 ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv); 1582 hw->phy.reset_disable = B_FALSE; 1583 } else { 1584 (void) ixgbe_set_phy_power(hw, B_FALSE); 1585 } 1586 1587 /* 1588 * Turn off any present SFP Tx laser 1589 * Expected for health and safety reasons 1590 */ 1591 ixgbe_disable_tx_laser(hw); 1592 1593 /* 1594 * Tell firmware driver is no longer in control 1595 */ 1596 ixgbe_release_driver_control(hw); 1597 1598 } 1599 1600 /* 1601 * ixgbe_reset - Reset the chipset and re-start the driver. 1602 * 1603 * It involves stopping and re-starting the chipset, 1604 * and re-configuring the rx/tx rings. 1605 */ 1606 static int 1607 ixgbe_reset(ixgbe_t *ixgbe) 1608 { 1609 int i; 1610 1611 /* 1612 * Disable and stop the watchdog timer 1613 */ 1614 ixgbe_disable_watchdog_timer(ixgbe); 1615 1616 mutex_enter(&ixgbe->gen_lock); 1617 1618 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1619 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 1620 1621 ixgbe_stop(ixgbe, B_FALSE); 1622 1623 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1624 mutex_exit(&ixgbe->gen_lock); 1625 return (IXGBE_FAILURE); 1626 } 1627 1628 /* 1629 * After resetting, need to recheck the link status. 1630 */ 1631 ixgbe->link_check_complete = B_FALSE; 1632 ixgbe->link_check_hrtime = gethrtime() + 1633 (IXGBE_LINK_UP_TIME * 100000000ULL); 1634 1635 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED); 1636 1637 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) { 1638 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1639 mac_tx_ring_update(ixgbe->mac_hdl, 1640 ixgbe->tx_rings[i].ring_handle); 1641 } 1642 } 1643 1644 mutex_exit(&ixgbe->gen_lock); 1645 1646 /* 1647 * Enable and start the watchdog timer 1648 */ 1649 ixgbe_enable_watchdog_timer(ixgbe); 1650 1651 return (IXGBE_SUCCESS); 1652 } 1653 1654 /* 1655 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1656 */ 1657 static void 1658 ixgbe_tx_clean(ixgbe_t *ixgbe) 1659 { 1660 ixgbe_tx_ring_t *tx_ring; 1661 tx_control_block_t *tcb; 1662 link_list_t pending_list; 1663 uint32_t desc_num; 1664 int i, j; 1665 1666 LINK_LIST_INIT(&pending_list); 1667 1668 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1669 tx_ring = &ixgbe->tx_rings[i]; 1670 1671 mutex_enter(&tx_ring->recycle_lock); 1672 1673 /* 1674 * Clean the pending tx data - the pending packets in the 1675 * work_list that have no chances to be transmitted again. 1676 * 1677 * We must ensure the chipset is stopped or the link is down 1678 * before cleaning the transmit packets. 1679 */ 1680 desc_num = 0; 1681 for (j = 0; j < tx_ring->ring_size; j++) { 1682 tcb = tx_ring->work_list[j]; 1683 if (tcb != NULL) { 1684 desc_num += tcb->desc_num; 1685 1686 tx_ring->work_list[j] = NULL; 1687 1688 ixgbe_free_tcb(tcb); 1689 1690 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1691 } 1692 } 1693 1694 if (desc_num > 0) { 1695 atomic_add_32(&tx_ring->tbd_free, desc_num); 1696 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1697 1698 /* 1699 * Reset the head and tail pointers of the tbd ring; 1700 * Reset the writeback head if it's enable. 1701 */ 1702 tx_ring->tbd_head = 0; 1703 tx_ring->tbd_tail = 0; 1704 if (ixgbe->tx_head_wb_enable) 1705 *tx_ring->tbd_head_wb = 0; 1706 1707 IXGBE_WRITE_REG(&ixgbe->hw, 1708 IXGBE_TDH(tx_ring->index), 0); 1709 IXGBE_WRITE_REG(&ixgbe->hw, 1710 IXGBE_TDT(tx_ring->index), 0); 1711 } 1712 1713 mutex_exit(&tx_ring->recycle_lock); 1714 1715 /* 1716 * Add the tx control blocks in the pending list to 1717 * the free list. 1718 */ 1719 ixgbe_put_free_list(tx_ring, &pending_list); 1720 } 1721 } 1722 1723 /* 1724 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1725 * transmitted. 1726 */ 1727 static boolean_t 1728 ixgbe_tx_drain(ixgbe_t *ixgbe) 1729 { 1730 ixgbe_tx_ring_t *tx_ring; 1731 boolean_t done; 1732 int i, j; 1733 1734 /* 1735 * Wait for a specific time to allow pending tx packets 1736 * to be transmitted. 1737 * 1738 * Check the counter tbd_free to see if transmission is done. 1739 * No lock protection is needed here. 1740 * 1741 * Return B_TRUE if all pending packets have been transmitted; 1742 * Otherwise return B_FALSE; 1743 */ 1744 for (i = 0; i < TX_DRAIN_TIME; i++) { 1745 1746 done = B_TRUE; 1747 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1748 tx_ring = &ixgbe->tx_rings[j]; 1749 done = done && 1750 (tx_ring->tbd_free == tx_ring->ring_size); 1751 } 1752 1753 if (done) 1754 break; 1755 1756 msec_delay(1); 1757 } 1758 1759 return (done); 1760 } 1761 1762 /* 1763 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1764 */ 1765 static boolean_t 1766 ixgbe_rx_drain(ixgbe_t *ixgbe) 1767 { 1768 boolean_t done = B_TRUE; 1769 int i; 1770 1771 /* 1772 * Polling the rx free list to check if those rx buffers held by 1773 * the upper layer are released. 1774 * 1775 * Check the counter rcb_free to see if all pending buffers are 1776 * released. No lock protection is needed here. 1777 * 1778 * Return B_TRUE if all pending buffers have been released; 1779 * Otherwise return B_FALSE; 1780 */ 1781 for (i = 0; i < RX_DRAIN_TIME; i++) { 1782 done = (ixgbe->rcb_pending == 0); 1783 1784 if (done) 1785 break; 1786 1787 msec_delay(1); 1788 } 1789 1790 return (done); 1791 } 1792 1793 /* 1794 * ixgbe_start - Start the driver/chipset. 1795 */ 1796 int 1797 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer) 1798 { 1799 struct ixgbe_hw *hw = &ixgbe->hw; 1800 int i; 1801 1802 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1803 1804 if (alloc_buffer) { 1805 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) { 1806 ixgbe_error(ixgbe, 1807 "Failed to allocate software receive rings"); 1808 return (IXGBE_FAILURE); 1809 } 1810 1811 /* Allocate buffers for all the rx/tx rings */ 1812 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) { 1813 ixgbe_error(ixgbe, "Failed to allocate DMA resource"); 1814 return (IXGBE_FAILURE); 1815 } 1816 1817 ixgbe->tx_ring_init = B_TRUE; 1818 } else { 1819 ixgbe->tx_ring_init = B_FALSE; 1820 } 1821 1822 for (i = 0; i < ixgbe->num_rx_rings; i++) 1823 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1824 for (i = 0; i < ixgbe->num_tx_rings; i++) 1825 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1826 1827 /* 1828 * Start the chipset hardware 1829 */ 1830 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1831 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1832 goto start_failure; 1833 } 1834 1835 /* 1836 * Configure link now for X550 1837 * 1838 * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the 1839 * resting state of the adapter at a 1Gb FDX speed. Prior to the X550, 1840 * the resting state of the link would be the maximum speed that 1841 * autonegotiation will allow (usually 10Gb, infrastructure allowing) 1842 * so we never bothered with explicitly setting the link to 10Gb as it 1843 * would already be at that state on driver attach. With X550, we must 1844 * trigger a re-negotiation of the link in order to switch from a LPLU 1845 * 1Gb link to 10Gb (cable and link partner permitting.) 1846 */ 1847 if (hw->mac.type == ixgbe_mac_X550 || 1848 hw->mac.type == ixgbe_mac_X550EM_x) { 1849 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE); 1850 ixgbe_get_hw_state(ixgbe); 1851 } 1852 1853 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1854 goto start_failure; 1855 } 1856 1857 /* 1858 * Setup the rx/tx rings 1859 */ 1860 ixgbe_setup_rings(ixgbe); 1861 1862 /* 1863 * ixgbe_start() will be called when resetting, however if reset 1864 * happens, we need to clear the ERROR, STALL and OVERTEMP flags 1865 * before enabling the interrupts. 1866 */ 1867 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR 1868 | IXGBE_STALL| IXGBE_OVERTEMP)); 1869 1870 /* 1871 * Enable adapter interrupts 1872 * The interrupts must be enabled after the driver state is START 1873 */ 1874 ixgbe_enable_adapter_interrupts(ixgbe); 1875 1876 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1877 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1878 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1879 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1880 1881 return (IXGBE_SUCCESS); 1882 1883 start_failure: 1884 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1885 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1886 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1887 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1888 1889 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1890 1891 return (IXGBE_FAILURE); 1892 } 1893 1894 /* 1895 * ixgbe_stop - Stop the driver/chipset. 1896 */ 1897 void 1898 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer) 1899 { 1900 int i; 1901 1902 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1903 1904 /* 1905 * Disable the adapter interrupts 1906 */ 1907 ixgbe_disable_adapter_interrupts(ixgbe); 1908 1909 /* 1910 * Drain the pending tx packets 1911 */ 1912 (void) ixgbe_tx_drain(ixgbe); 1913 1914 for (i = 0; i < ixgbe->num_rx_rings; i++) 1915 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1916 for (i = 0; i < ixgbe->num_tx_rings; i++) 1917 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1918 1919 /* 1920 * Stop the chipset hardware 1921 */ 1922 ixgbe_chip_stop(ixgbe); 1923 1924 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1925 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1926 } 1927 1928 /* 1929 * Clean the pending tx data/resources 1930 */ 1931 ixgbe_tx_clean(ixgbe); 1932 1933 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1934 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1935 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1936 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1937 1938 if (ixgbe->link_state == LINK_STATE_UP) { 1939 ixgbe->link_state = LINK_STATE_UNKNOWN; 1940 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 1941 } 1942 1943 if (free_buffer) { 1944 /* 1945 * Release the DMA/memory resources of rx/tx rings 1946 */ 1947 ixgbe_free_dma(ixgbe); 1948 ixgbe_free_rx_data(ixgbe); 1949 } 1950 } 1951 1952 /* 1953 * ixgbe_cbfunc - Driver interface for generic DDI callbacks 1954 */ 1955 /* ARGSUSED */ 1956 static int 1957 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg, 1958 void *arg1, void *arg2) 1959 { 1960 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 1961 1962 switch (cbaction) { 1963 /* IRM callback */ 1964 int count; 1965 case DDI_CB_INTR_ADD: 1966 case DDI_CB_INTR_REMOVE: 1967 count = (int)(uintptr_t)cbarg; 1968 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX); 1969 DTRACE_PROBE2(ixgbe__irm__callback, int, count, 1970 int, ixgbe->intr_cnt); 1971 if (ixgbe_intr_adjust(ixgbe, cbaction, count) != 1972 DDI_SUCCESS) { 1973 ixgbe_error(ixgbe, 1974 "IRM CB: Failed to adjust interrupts"); 1975 goto cb_fail; 1976 } 1977 break; 1978 default: 1979 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported", 1980 cbaction); 1981 return (DDI_ENOTSUP); 1982 } 1983 return (DDI_SUCCESS); 1984 cb_fail: 1985 return (DDI_FAILURE); 1986 } 1987 1988 /* 1989 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request. 1990 */ 1991 static int 1992 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count) 1993 { 1994 int i, rc, actual; 1995 1996 if (count == 0) 1997 return (DDI_SUCCESS); 1998 1999 if ((cbaction == DDI_CB_INTR_ADD && 2000 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) || 2001 (cbaction == DDI_CB_INTR_REMOVE && 2002 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)) 2003 return (DDI_FAILURE); 2004 2005 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 2006 return (DDI_FAILURE); 2007 } 2008 2009 for (i = 0; i < ixgbe->num_rx_rings; i++) 2010 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL); 2011 for (i = 0; i < ixgbe->num_tx_rings; i++) 2012 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL); 2013 2014 mutex_enter(&ixgbe->gen_lock); 2015 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 2016 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST; 2017 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 2018 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 2019 2020 ixgbe_stop(ixgbe, B_FALSE); 2021 /* 2022 * Disable interrupts 2023 */ 2024 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 2025 rc = ixgbe_disable_intrs(ixgbe); 2026 ASSERT(rc == IXGBE_SUCCESS); 2027 } 2028 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR; 2029 2030 /* 2031 * Remove interrupt handlers 2032 */ 2033 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 2034 ixgbe_rem_intr_handlers(ixgbe); 2035 } 2036 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR; 2037 2038 /* 2039 * Clear vect_map 2040 */ 2041 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 2042 switch (cbaction) { 2043 case DDI_CB_INTR_ADD: 2044 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable, 2045 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual, 2046 DDI_INTR_ALLOC_NORMAL); 2047 if (rc != DDI_SUCCESS || actual != count) { 2048 ixgbe_log(ixgbe, "Adjust interrupts failed." 2049 "return: %d, irm cb size: %d, actual: %d", 2050 rc, count, actual); 2051 goto intr_adjust_fail; 2052 } 2053 ixgbe->intr_cnt += count; 2054 break; 2055 2056 case DDI_CB_INTR_REMOVE: 2057 for (i = ixgbe->intr_cnt - count; 2058 i < ixgbe->intr_cnt; i ++) { 2059 rc = ddi_intr_free(ixgbe->htable[i]); 2060 ixgbe->htable[i] = NULL; 2061 if (rc != DDI_SUCCESS) { 2062 ixgbe_log(ixgbe, "Adjust interrupts failed." 2063 "return: %d, irm cb size: %d, actual: %d", 2064 rc, count, actual); 2065 goto intr_adjust_fail; 2066 } 2067 } 2068 ixgbe->intr_cnt -= count; 2069 break; 2070 } 2071 2072 /* 2073 * Get priority for first vector, assume remaining are all the same 2074 */ 2075 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 2076 if (rc != DDI_SUCCESS) { 2077 ixgbe_log(ixgbe, 2078 "Get interrupt priority failed: %d", rc); 2079 goto intr_adjust_fail; 2080 } 2081 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 2082 if (rc != DDI_SUCCESS) { 2083 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc); 2084 goto intr_adjust_fail; 2085 } 2086 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 2087 2088 /* 2089 * Map rings to interrupt vectors 2090 */ 2091 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 2092 ixgbe_error(ixgbe, 2093 "IRM CB: Failed to map interrupts to vectors"); 2094 goto intr_adjust_fail; 2095 } 2096 2097 /* 2098 * Add interrupt handlers 2099 */ 2100 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 2101 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers"); 2102 goto intr_adjust_fail; 2103 } 2104 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 2105 2106 /* 2107 * Now that mutex locks are initialized, and the chip is also 2108 * initialized, enable interrupts. 2109 */ 2110 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 2111 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts"); 2112 goto intr_adjust_fail; 2113 } 2114 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 2115 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 2116 ixgbe_error(ixgbe, "IRM CB: Failed to start"); 2117 goto intr_adjust_fail; 2118 } 2119 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST; 2120 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 2121 ixgbe->ixgbe_state |= IXGBE_STARTED; 2122 mutex_exit(&ixgbe->gen_lock); 2123 2124 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2125 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, 2126 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]); 2127 } 2128 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2129 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, 2130 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]); 2131 } 2132 2133 /* Wakeup all Tx rings */ 2134 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2135 mac_tx_ring_update(ixgbe->mac_hdl, 2136 ixgbe->tx_rings[i].ring_handle); 2137 } 2138 2139 IXGBE_DEBUGLOG_3(ixgbe, 2140 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).", 2141 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max); 2142 return (DDI_SUCCESS); 2143 2144 intr_adjust_fail: 2145 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 2146 mutex_exit(&ixgbe->gen_lock); 2147 return (DDI_FAILURE); 2148 } 2149 2150 /* 2151 * ixgbe_intr_cb_register - Register interrupt callback function. 2152 */ 2153 static int 2154 ixgbe_intr_cb_register(ixgbe_t *ixgbe) 2155 { 2156 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc, 2157 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) { 2158 return (IXGBE_FAILURE); 2159 } 2160 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered."); 2161 return (IXGBE_SUCCESS); 2162 } 2163 2164 /* 2165 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 2166 */ 2167 static int 2168 ixgbe_alloc_rings(ixgbe_t *ixgbe) 2169 { 2170 /* 2171 * Allocate memory space for rx rings 2172 */ 2173 ixgbe->rx_rings = kmem_zalloc( 2174 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 2175 KM_NOSLEEP); 2176 2177 if (ixgbe->rx_rings == NULL) { 2178 return (IXGBE_FAILURE); 2179 } 2180 2181 /* 2182 * Allocate memory space for tx rings 2183 */ 2184 ixgbe->tx_rings = kmem_zalloc( 2185 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 2186 KM_NOSLEEP); 2187 2188 if (ixgbe->tx_rings == NULL) { 2189 kmem_free(ixgbe->rx_rings, 2190 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2191 ixgbe->rx_rings = NULL; 2192 return (IXGBE_FAILURE); 2193 } 2194 2195 /* 2196 * Allocate memory space for rx ring groups 2197 */ 2198 ixgbe->rx_groups = kmem_zalloc( 2199 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups, 2200 KM_NOSLEEP); 2201 2202 if (ixgbe->rx_groups == NULL) { 2203 kmem_free(ixgbe->rx_rings, 2204 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2205 kmem_free(ixgbe->tx_rings, 2206 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2207 ixgbe->rx_rings = NULL; 2208 ixgbe->tx_rings = NULL; 2209 return (IXGBE_FAILURE); 2210 } 2211 2212 return (IXGBE_SUCCESS); 2213 } 2214 2215 /* 2216 * ixgbe_free_rings - Free the memory space of rx/tx rings. 2217 */ 2218 static void 2219 ixgbe_free_rings(ixgbe_t *ixgbe) 2220 { 2221 if (ixgbe->rx_rings != NULL) { 2222 kmem_free(ixgbe->rx_rings, 2223 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2224 ixgbe->rx_rings = NULL; 2225 } 2226 2227 if (ixgbe->tx_rings != NULL) { 2228 kmem_free(ixgbe->tx_rings, 2229 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2230 ixgbe->tx_rings = NULL; 2231 } 2232 2233 if (ixgbe->rx_groups != NULL) { 2234 kmem_free(ixgbe->rx_groups, 2235 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); 2236 ixgbe->rx_groups = NULL; 2237 } 2238 } 2239 2240 static int 2241 ixgbe_alloc_rx_data(ixgbe_t *ixgbe) 2242 { 2243 ixgbe_rx_ring_t *rx_ring; 2244 int i; 2245 2246 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2247 rx_ring = &ixgbe->rx_rings[i]; 2248 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS) 2249 goto alloc_rx_rings_failure; 2250 } 2251 return (IXGBE_SUCCESS); 2252 2253 alloc_rx_rings_failure: 2254 ixgbe_free_rx_data(ixgbe); 2255 return (IXGBE_FAILURE); 2256 } 2257 2258 static void 2259 ixgbe_free_rx_data(ixgbe_t *ixgbe) 2260 { 2261 ixgbe_rx_ring_t *rx_ring; 2262 ixgbe_rx_data_t *rx_data; 2263 int i; 2264 2265 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2266 rx_ring = &ixgbe->rx_rings[i]; 2267 2268 mutex_enter(&ixgbe->rx_pending_lock); 2269 rx_data = rx_ring->rx_data; 2270 2271 if (rx_data != NULL) { 2272 rx_data->flag |= IXGBE_RX_STOPPED; 2273 2274 if (rx_data->rcb_pending == 0) { 2275 ixgbe_free_rx_ring_data(rx_data); 2276 rx_ring->rx_data = NULL; 2277 } 2278 } 2279 2280 mutex_exit(&ixgbe->rx_pending_lock); 2281 } 2282 } 2283 2284 /* 2285 * ixgbe_setup_rings - Setup rx/tx rings. 2286 */ 2287 static void 2288 ixgbe_setup_rings(ixgbe_t *ixgbe) 2289 { 2290 /* 2291 * Setup the rx/tx rings, including the following: 2292 * 2293 * 1. Setup the descriptor ring and the control block buffers; 2294 * 2. Initialize necessary registers for receive/transmit; 2295 * 3. Initialize software pointers/parameters for receive/transmit; 2296 */ 2297 ixgbe_setup_rx(ixgbe); 2298 2299 ixgbe_setup_tx(ixgbe); 2300 } 2301 2302 static void 2303 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 2304 { 2305 ixgbe_t *ixgbe = rx_ring->ixgbe; 2306 ixgbe_rx_data_t *rx_data = rx_ring->rx_data; 2307 struct ixgbe_hw *hw = &ixgbe->hw; 2308 rx_control_block_t *rcb; 2309 union ixgbe_adv_rx_desc *rbd; 2310 uint32_t size; 2311 uint32_t buf_low; 2312 uint32_t buf_high; 2313 uint32_t reg_val; 2314 int i; 2315 2316 ASSERT(mutex_owned(&rx_ring->rx_lock)); 2317 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2318 2319 for (i = 0; i < ixgbe->rx_ring_size; i++) { 2320 rcb = rx_data->work_list[i]; 2321 rbd = &rx_data->rbd_ring[i]; 2322 2323 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 2324 rbd->read.hdr_addr = NULL; 2325 } 2326 2327 /* 2328 * Initialize the length register 2329 */ 2330 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc); 2331 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size); 2332 2333 /* 2334 * Initialize the base address registers 2335 */ 2336 buf_low = (uint32_t)rx_data->rbd_area.dma_address; 2337 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); 2338 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high); 2339 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low); 2340 2341 /* 2342 * Setup head & tail pointers 2343 */ 2344 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index), 2345 rx_data->ring_size - 1); 2346 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0); 2347 2348 rx_data->rbd_next = 0; 2349 rx_data->lro_first = 0; 2350 2351 /* 2352 * Setup the Receive Descriptor Control Register (RXDCTL) 2353 * PTHRESH=32 descriptors (half the internal cache) 2354 * HTHRESH=0 descriptors (to minimize latency on fetch) 2355 * WTHRESH defaults to 1 (writeback each descriptor) 2356 */ 2357 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index)); 2358 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 2359 2360 /* Not a valid value for 82599, X540 or X550 */ 2361 if (hw->mac.type == ixgbe_mac_82598EB) { 2362 reg_val |= 0x0020; /* pthresh */ 2363 } 2364 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); 2365 2366 if (hw->mac.type == ixgbe_mac_82599EB || 2367 hw->mac.type == ixgbe_mac_X540 || 2368 hw->mac.type == ixgbe_mac_X550 || 2369 hw->mac.type == ixgbe_mac_X550EM_x) { 2370 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2371 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); 2372 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2373 } 2374 2375 /* 2376 * Setup the Split and Replication Receive Control Register. 2377 * Set the rx buffer size and the advanced descriptor type. 2378 */ 2379 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 2380 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2381 reg_val |= IXGBE_SRRCTL_DROP_EN; 2382 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); 2383 } 2384 2385 static void 2386 ixgbe_setup_rx(ixgbe_t *ixgbe) 2387 { 2388 ixgbe_rx_ring_t *rx_ring; 2389 struct ixgbe_hw *hw = &ixgbe->hw; 2390 uint32_t reg_val; 2391 uint32_t ring_mapping; 2392 uint32_t i, index; 2393 uint32_t psrtype_rss_bit; 2394 2395 /* 2396 * Ensure that Rx is disabled while setting up 2397 * the Rx unit and Rx descriptor ring(s) 2398 */ 2399 ixgbe_disable_rx(hw); 2400 2401 /* PSRTYPE must be configured for 82599 */ 2402 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ && 2403 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) { 2404 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2405 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2406 reg_val |= IXGBE_PSRTYPE_L2HDR; 2407 reg_val |= 0x80000000; 2408 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val); 2409 } else { 2410 if (ixgbe->num_rx_groups > 32) { 2411 psrtype_rss_bit = 0x20000000; 2412 } else { 2413 psrtype_rss_bit = 0x40000000; 2414 } 2415 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) { 2416 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2417 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2418 reg_val |= IXGBE_PSRTYPE_L2HDR; 2419 reg_val |= psrtype_rss_bit; 2420 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val); 2421 } 2422 } 2423 2424 /* 2425 * Set filter control in FCTRL to determine types of packets are passed 2426 * up to the driver. 2427 * - Pass broadcast packets. 2428 * - Do not pass flow control pause frames (82598-specific) 2429 */ 2430 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2431 reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */ 2432 if (hw->mac.type == ixgbe_mac_82598EB) { 2433 reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */ 2434 } 2435 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 2436 2437 /* 2438 * Hardware checksum settings 2439 */ 2440 if (ixgbe->rx_hcksum_enable) { 2441 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2442 reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 2443 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 2444 } 2445 2446 /* 2447 * Setup VMDq and RSS for multiple receive queues 2448 */ 2449 switch (ixgbe->classify_mode) { 2450 case IXGBE_CLASSIFY_RSS: 2451 /* 2452 * One group, only RSS is needed when more than 2453 * one ring enabled. 2454 */ 2455 ixgbe_setup_rss(ixgbe); 2456 break; 2457 2458 case IXGBE_CLASSIFY_VMDQ: 2459 /* 2460 * Multiple groups, each group has one ring, 2461 * only VMDq is needed. 2462 */ 2463 ixgbe_setup_vmdq(ixgbe); 2464 break; 2465 2466 case IXGBE_CLASSIFY_VMDQ_RSS: 2467 /* 2468 * Multiple groups and multiple rings, both 2469 * VMDq and RSS are needed. 2470 */ 2471 ixgbe_setup_vmdq_rss(ixgbe); 2472 break; 2473 2474 default: 2475 break; 2476 } 2477 2478 /* 2479 * Enable the receive unit. This must be done after filter 2480 * control is set in FCTRL. On 82598, we disable the descriptor monitor. 2481 * 82598 is the only adapter which defines this RXCTRL option. 2482 */ 2483 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2484 if (hw->mac.type == ixgbe_mac_82598EB) 2485 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */ 2486 reg_val |= IXGBE_RXCTRL_RXEN; 2487 (void) ixgbe_enable_rx_dma(hw, reg_val); 2488 2489 /* 2490 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 2491 */ 2492 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2493 rx_ring = &ixgbe->rx_rings[i]; 2494 ixgbe_setup_rx_ring(rx_ring); 2495 } 2496 2497 /* 2498 * Setup the per-ring statistics mapping. 2499 */ 2500 ring_mapping = 0; 2501 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2502 index = ixgbe->rx_rings[i].hw_index; 2503 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2)); 2504 ring_mapping |= (i & 0xF) << (8 * (index & 0x3)); 2505 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping); 2506 } 2507 2508 /* 2509 * The Max Frame Size in MHADD/MAXFRS will be internally increased 2510 * by four bytes if the packet has a VLAN field, so includes MTU, 2511 * ethernet header and frame check sequence. 2512 * Register is MAXFRS in 82599. 2513 */ 2514 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD); 2515 reg_val &= ~IXGBE_MHADD_MFS_MASK; 2516 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header) 2517 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 2518 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 2519 2520 /* 2521 * Setup Jumbo Frame enable bit 2522 */ 2523 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2524 if (ixgbe->default_mtu > ETHERMTU) 2525 reg_val |= IXGBE_HLREG0_JUMBOEN; 2526 else 2527 reg_val &= ~IXGBE_HLREG0_JUMBOEN; 2528 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2529 2530 /* 2531 * Setup RSC for multiple receive queues. 2532 */ 2533 if (ixgbe->lro_enable) { 2534 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2535 /* 2536 * Make sure rx_buf_size * MAXDESC not greater 2537 * than 65535. 2538 * Intel recommends 4 for MAXDESC field value. 2539 */ 2540 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i)); 2541 reg_val |= IXGBE_RSCCTL_RSCEN; 2542 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k) 2543 reg_val |= IXGBE_RSCCTL_MAXDESC_1; 2544 else 2545 reg_val |= IXGBE_RSCCTL_MAXDESC_4; 2546 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val); 2547 } 2548 2549 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU); 2550 reg_val |= IXGBE_RSCDBU_RSCACKDIS; 2551 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val); 2552 2553 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2554 reg_val |= IXGBE_RDRXCTL_RSCACKC; 2555 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX; 2556 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2557 2558 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2559 } 2560 } 2561 2562 static void 2563 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 2564 { 2565 ixgbe_t *ixgbe = tx_ring->ixgbe; 2566 struct ixgbe_hw *hw = &ixgbe->hw; 2567 uint32_t size; 2568 uint32_t buf_low; 2569 uint32_t buf_high; 2570 uint32_t reg_val; 2571 2572 ASSERT(mutex_owned(&tx_ring->tx_lock)); 2573 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2574 2575 /* 2576 * Initialize the length register 2577 */ 2578 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 2579 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 2580 2581 /* 2582 * Initialize the base address registers 2583 */ 2584 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 2585 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 2586 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 2587 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 2588 2589 /* 2590 * Setup head & tail pointers 2591 */ 2592 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 2593 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 2594 2595 /* 2596 * Setup head write-back 2597 */ 2598 if (ixgbe->tx_head_wb_enable) { 2599 /* 2600 * The memory of the head write-back is allocated using 2601 * the extra tbd beyond the tail of the tbd ring. 2602 */ 2603 tx_ring->tbd_head_wb = (uint32_t *) 2604 ((uintptr_t)tx_ring->tbd_area.address + size); 2605 *tx_ring->tbd_head_wb = 0; 2606 2607 buf_low = (uint32_t) 2608 (tx_ring->tbd_area.dma_address + size); 2609 buf_high = (uint32_t) 2610 ((tx_ring->tbd_area.dma_address + size) >> 32); 2611 2612 /* Set the head write-back enable bit */ 2613 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 2614 2615 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 2616 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 2617 2618 /* 2619 * Turn off relaxed ordering for head write back or it will 2620 * cause problems with the tx recycling 2621 */ 2622 2623 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ? 2624 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) : 2625 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index)); 2626 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 2627 if (hw->mac.type == ixgbe_mac_82598EB) { 2628 IXGBE_WRITE_REG(hw, 2629 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 2630 } else { 2631 IXGBE_WRITE_REG(hw, 2632 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val); 2633 } 2634 } else { 2635 tx_ring->tbd_head_wb = NULL; 2636 } 2637 2638 tx_ring->tbd_head = 0; 2639 tx_ring->tbd_tail = 0; 2640 tx_ring->tbd_free = tx_ring->ring_size; 2641 2642 if (ixgbe->tx_ring_init == B_TRUE) { 2643 tx_ring->tcb_head = 0; 2644 tx_ring->tcb_tail = 0; 2645 tx_ring->tcb_free = tx_ring->free_list_size; 2646 } 2647 2648 /* 2649 * Initialize the s/w context structure 2650 */ 2651 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); 2652 } 2653 2654 static void 2655 ixgbe_setup_tx(ixgbe_t *ixgbe) 2656 { 2657 struct ixgbe_hw *hw = &ixgbe->hw; 2658 ixgbe_tx_ring_t *tx_ring; 2659 uint32_t reg_val; 2660 uint32_t ring_mapping; 2661 int i; 2662 2663 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2664 tx_ring = &ixgbe->tx_rings[i]; 2665 ixgbe_setup_tx_ring(tx_ring); 2666 } 2667 2668 /* 2669 * Setup the per-ring statistics mapping. 2670 */ 2671 ring_mapping = 0; 2672 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2673 ring_mapping |= (i & 0xF) << (8 * (i & 0x3)); 2674 if ((i & 0x3) == 0x3) { 2675 switch (hw->mac.type) { 2676 case ixgbe_mac_82598EB: 2677 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 2678 ring_mapping); 2679 break; 2680 2681 case ixgbe_mac_82599EB: 2682 case ixgbe_mac_X540: 2683 case ixgbe_mac_X550: 2684 case ixgbe_mac_X550EM_x: 2685 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 2686 ring_mapping); 2687 break; 2688 2689 default: 2690 break; 2691 } 2692 2693 ring_mapping = 0; 2694 } 2695 } 2696 if (i & 0x3) { 2697 switch (hw->mac.type) { 2698 case ixgbe_mac_82598EB: 2699 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); 2700 break; 2701 2702 case ixgbe_mac_82599EB: 2703 case ixgbe_mac_X540: 2704 case ixgbe_mac_X550: 2705 case ixgbe_mac_X550EM_x: 2706 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); 2707 break; 2708 2709 default: 2710 break; 2711 } 2712 } 2713 2714 /* 2715 * Enable CRC appending and TX padding (for short tx frames) 2716 */ 2717 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2718 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 2719 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2720 2721 /* 2722 * enable DMA for 82599, X540 and X550 parts 2723 */ 2724 if (hw->mac.type == ixgbe_mac_82599EB || 2725 hw->mac.type == ixgbe_mac_X540 || 2726 hw->mac.type == ixgbe_mac_X550 || 2727 hw->mac.type == ixgbe_mac_X550EM_x) { 2728 /* DMATXCTL.TE must be set after all Tx config is complete */ 2729 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2730 reg_val |= IXGBE_DMATXCTL_TE; 2731 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); 2732 2733 /* Disable arbiter to set MTQC */ 2734 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2735 reg_val |= IXGBE_RTTDCS_ARBDIS; 2736 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2737 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2738 reg_val &= ~IXGBE_RTTDCS_ARBDIS; 2739 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2740 } 2741 2742 /* 2743 * Enabling tx queues .. 2744 * For 82599 must be done after DMATXCTL.TE is set 2745 */ 2746 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2747 tx_ring = &ixgbe->tx_rings[i]; 2748 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index)); 2749 reg_val |= IXGBE_TXDCTL_ENABLE; 2750 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 2751 } 2752 } 2753 2754 /* 2755 * ixgbe_setup_rss - Setup receive-side scaling feature. 2756 */ 2757 static void 2758 ixgbe_setup_rss(ixgbe_t *ixgbe) 2759 { 2760 struct ixgbe_hw *hw = &ixgbe->hw; 2761 uint32_t mrqc; 2762 2763 /* 2764 * Initialize RETA/ERETA table 2765 */ 2766 ixgbe_setup_rss_table(ixgbe); 2767 2768 /* 2769 * Enable RSS & perform hash on these packet types 2770 */ 2771 mrqc = IXGBE_MRQC_RSSEN | 2772 IXGBE_MRQC_RSS_FIELD_IPV4 | 2773 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2774 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2775 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2776 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2777 IXGBE_MRQC_RSS_FIELD_IPV6 | 2778 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2779 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2780 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2781 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2782 } 2783 2784 /* 2785 * ixgbe_setup_vmdq - Setup MAC classification feature 2786 */ 2787 static void 2788 ixgbe_setup_vmdq(ixgbe_t *ixgbe) 2789 { 2790 struct ixgbe_hw *hw = &ixgbe->hw; 2791 uint32_t vmdctl, i, vtctl; 2792 2793 /* 2794 * Setup the VMDq Control register, enable VMDq based on 2795 * packet destination MAC address: 2796 */ 2797 switch (hw->mac.type) { 2798 case ixgbe_mac_82598EB: 2799 /* 2800 * VMDq Enable = 1; 2801 * VMDq Filter = 0; MAC filtering 2802 * Default VMDq output index = 0; 2803 */ 2804 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2805 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2806 break; 2807 2808 case ixgbe_mac_82599EB: 2809 case ixgbe_mac_X540: 2810 case ixgbe_mac_X550: 2811 case ixgbe_mac_X550EM_x: 2812 /* 2813 * Enable VMDq-only. 2814 */ 2815 vmdctl = IXGBE_MRQC_VMDQEN; 2816 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl); 2817 2818 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2819 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2820 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2821 } 2822 2823 /* 2824 * Enable Virtualization and Replication. 2825 */ 2826 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2827 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2828 2829 /* 2830 * Enable receiving packets to all VFs 2831 */ 2832 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2833 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2834 break; 2835 2836 default: 2837 break; 2838 } 2839 } 2840 2841 /* 2842 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature. 2843 */ 2844 static void 2845 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) 2846 { 2847 struct ixgbe_hw *hw = &ixgbe->hw; 2848 uint32_t i, mrqc; 2849 uint32_t vtctl, vmdctl; 2850 2851 /* 2852 * Initialize RETA/ERETA table 2853 */ 2854 ixgbe_setup_rss_table(ixgbe); 2855 2856 /* 2857 * Enable and setup RSS and VMDq 2858 */ 2859 switch (hw->mac.type) { 2860 case ixgbe_mac_82598EB: 2861 /* 2862 * Enable RSS & Setup RSS Hash functions 2863 */ 2864 mrqc = IXGBE_MRQC_RSSEN | 2865 IXGBE_MRQC_RSS_FIELD_IPV4 | 2866 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2867 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2868 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2869 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2870 IXGBE_MRQC_RSS_FIELD_IPV6 | 2871 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2872 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2873 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2874 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2875 2876 /* 2877 * Enable and Setup VMDq 2878 * VMDq Filter = 0; MAC filtering 2879 * Default VMDq output index = 0; 2880 */ 2881 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2882 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2883 break; 2884 2885 case ixgbe_mac_82599EB: 2886 case ixgbe_mac_X540: 2887 case ixgbe_mac_X550: 2888 case ixgbe_mac_X550EM_x: 2889 /* 2890 * Enable RSS & Setup RSS Hash functions 2891 */ 2892 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 | 2893 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2894 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2895 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2896 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2897 IXGBE_MRQC_RSS_FIELD_IPV6 | 2898 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2899 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2900 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2901 2902 /* 2903 * Enable VMDq+RSS. 2904 */ 2905 if (ixgbe->num_rx_groups > 32) { 2906 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN; 2907 } else { 2908 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN; 2909 } 2910 2911 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2912 2913 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2914 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2915 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2916 } 2917 break; 2918 2919 default: 2920 break; 2921 2922 } 2923 2924 if (hw->mac.type == ixgbe_mac_82599EB || 2925 hw->mac.type == ixgbe_mac_X540 || 2926 hw->mac.type == ixgbe_mac_X550 || 2927 hw->mac.type == ixgbe_mac_X550EM_x) { 2928 /* 2929 * Enable Virtualization and Replication. 2930 */ 2931 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2932 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2933 2934 /* 2935 * Enable receiving packets to all VFs 2936 */ 2937 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2938 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2939 } 2940 } 2941 2942 /* 2943 * ixgbe_setup_rss_table - Setup RSS table 2944 */ 2945 static void 2946 ixgbe_setup_rss_table(ixgbe_t *ixgbe) 2947 { 2948 struct ixgbe_hw *hw = &ixgbe->hw; 2949 uint32_t i, j; 2950 uint32_t random; 2951 uint32_t reta; 2952 uint32_t ring_per_group; 2953 uint32_t ring; 2954 uint32_t table_size; 2955 uint32_t index_mult; 2956 uint32_t rxcsum; 2957 2958 /* 2959 * Set multiplier for RETA setup and table size based on MAC type. 2960 * RETA table sizes vary by model: 2961 * 2962 * 82598, 82599, X540: 128 table entries. 2963 * X550: 512 table entries. 2964 */ 2965 index_mult = 0x1; 2966 table_size = 128; 2967 switch (ixgbe->hw.mac.type) { 2968 case ixgbe_mac_82598EB: 2969 index_mult = 0x11; 2970 break; 2971 case ixgbe_mac_X550: 2972 case ixgbe_mac_X550EM_x: 2973 table_size = 512; 2974 break; 2975 default: 2976 break; 2977 } 2978 2979 /* 2980 * Fill out RSS redirection table. The configuation of the indices is 2981 * hardware-dependent. 2982 * 2983 * 82598: 8 bits wide containing two 4 bit RSS indices 2984 * 82599, X540: 8 bits wide containing one 4 bit RSS index 2985 * X550: 8 bits wide containing one 6 bit RSS index 2986 */ 2987 reta = 0; 2988 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2989 2990 for (i = 0, j = 0; i < table_size; i++, j++) { 2991 if (j == ring_per_group) j = 0; 2992 2993 /* 2994 * The low 8 bits are for hash value (n+0); 2995 * The next 8 bits are for hash value (n+1), etc. 2996 */ 2997 ring = (j * index_mult); 2998 reta = reta >> 8; 2999 reta = reta | (((uint32_t)ring) << 24); 3000 3001 if ((i & 3) == 3) { 3002 /* 3003 * The first 128 table entries are programmed into the 3004 * RETA register, with any beyond that (eg; on X550) 3005 * into ERETA. 3006 */ 3007 if (i < 128) 3008 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3009 else 3010 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 3011 reta); 3012 reta = 0; 3013 } 3014 } 3015 3016 /* 3017 * Fill out hash function seeds with a random constant 3018 */ 3019 for (i = 0; i < 10; i++) { 3020 (void) random_get_pseudo_bytes((uint8_t *)&random, 3021 sizeof (uint32_t)); 3022 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 3023 } 3024 3025 /* 3026 * Disable Packet Checksum to enable RSS for multiple receive queues. 3027 * It is an adapter hardware limitation that Packet Checksum is 3028 * mutually exclusive with RSS. 3029 */ 3030 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3031 rxcsum |= IXGBE_RXCSUM_PCSD; 3032 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 3033 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3034 } 3035 3036 /* 3037 * ixgbe_init_unicst - Initialize the unicast addresses. 3038 */ 3039 static void 3040 ixgbe_init_unicst(ixgbe_t *ixgbe) 3041 { 3042 struct ixgbe_hw *hw = &ixgbe->hw; 3043 uint8_t *mac_addr; 3044 int slot; 3045 /* 3046 * Here we should consider two situations: 3047 * 3048 * 1. Chipset is initialized at the first time, 3049 * Clear all the multiple unicast addresses. 3050 * 3051 * 2. Chipset is reset 3052 * Recover the multiple unicast addresses from the 3053 * software data structure to the RAR registers. 3054 */ 3055 if (!ixgbe->unicst_init) { 3056 /* 3057 * Initialize the multiple unicast addresses 3058 */ 3059 ixgbe->unicst_total = hw->mac.num_rar_entries; 3060 ixgbe->unicst_avail = ixgbe->unicst_total; 3061 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3062 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3063 bzero(mac_addr, ETHERADDRL); 3064 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL); 3065 ixgbe->unicst_addr[slot].mac.set = 0; 3066 } 3067 ixgbe->unicst_init = B_TRUE; 3068 } else { 3069 /* Re-configure the RAR registers */ 3070 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3071 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3072 if (ixgbe->unicst_addr[slot].mac.set == 1) { 3073 (void) ixgbe_set_rar(hw, slot, mac_addr, 3074 ixgbe->unicst_addr[slot].mac.group_index, 3075 IXGBE_RAH_AV); 3076 } else { 3077 bzero(mac_addr, ETHERADDRL); 3078 (void) ixgbe_set_rar(hw, slot, mac_addr, 3079 NULL, NULL); 3080 } 3081 } 3082 } 3083 } 3084 3085 /* 3086 * ixgbe_unicst_find - Find the slot for the specified unicast address 3087 */ 3088 int 3089 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr) 3090 { 3091 int slot; 3092 3093 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3094 3095 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3096 if (bcmp(ixgbe->unicst_addr[slot].mac.addr, 3097 mac_addr, ETHERADDRL) == 0) 3098 return (slot); 3099 } 3100 3101 return (-1); 3102 } 3103 3104 /* 3105 * ixgbe_multicst_add - Add a multicst address. 3106 */ 3107 int 3108 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3109 { 3110 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3111 3112 if ((multiaddr[0] & 01) == 0) { 3113 return (EINVAL); 3114 } 3115 3116 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 3117 return (ENOENT); 3118 } 3119 3120 bcopy(multiaddr, 3121 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 3122 ixgbe->mcast_count++; 3123 3124 /* 3125 * Update the multicast table in the hardware 3126 */ 3127 ixgbe_setup_multicst(ixgbe); 3128 3129 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3130 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3131 return (EIO); 3132 } 3133 3134 return (0); 3135 } 3136 3137 /* 3138 * ixgbe_multicst_remove - Remove a multicst address. 3139 */ 3140 int 3141 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3142 { 3143 int i; 3144 3145 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3146 3147 for (i = 0; i < ixgbe->mcast_count; i++) { 3148 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 3149 ETHERADDRL) == 0) { 3150 for (i++; i < ixgbe->mcast_count; i++) { 3151 ixgbe->mcast_table[i - 1] = 3152 ixgbe->mcast_table[i]; 3153 } 3154 ixgbe->mcast_count--; 3155 break; 3156 } 3157 } 3158 3159 /* 3160 * Update the multicast table in the hardware 3161 */ 3162 ixgbe_setup_multicst(ixgbe); 3163 3164 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3165 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3166 return (EIO); 3167 } 3168 3169 return (0); 3170 } 3171 3172 /* 3173 * ixgbe_setup_multicast - Setup multicast data structures. 3174 * 3175 * This routine initializes all of the multicast related structures 3176 * and save them in the hardware registers. 3177 */ 3178 static void 3179 ixgbe_setup_multicst(ixgbe_t *ixgbe) 3180 { 3181 uint8_t *mc_addr_list; 3182 uint32_t mc_addr_count; 3183 struct ixgbe_hw *hw = &ixgbe->hw; 3184 3185 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3186 3187 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 3188 3189 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 3190 mc_addr_count = ixgbe->mcast_count; 3191 3192 /* 3193 * Update the multicast addresses to the MTA registers 3194 */ 3195 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 3196 ixgbe_mc_table_itr, TRUE); 3197 } 3198 3199 /* 3200 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode). 3201 * 3202 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers. 3203 * Different chipsets may have different allowed configuration of vmdq and rss. 3204 */ 3205 static void 3206 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe) 3207 { 3208 struct ixgbe_hw *hw = &ixgbe->hw; 3209 uint32_t ring_per_group; 3210 3211 switch (hw->mac.type) { 3212 case ixgbe_mac_82598EB: 3213 /* 3214 * 82598 supports the following combination: 3215 * vmdq no. x rss no. 3216 * [5..16] x 1 3217 * [1..4] x [1..16] 3218 * However 8 rss queue per pool (vmdq) is sufficient for 3219 * most cases. 3220 */ 3221 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3222 if (ixgbe->num_rx_groups > 4) { 3223 ixgbe->num_rx_rings = ixgbe->num_rx_groups; 3224 } else { 3225 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3226 min(8, ring_per_group); 3227 } 3228 3229 break; 3230 3231 case ixgbe_mac_82599EB: 3232 case ixgbe_mac_X540: 3233 case ixgbe_mac_X550: 3234 case ixgbe_mac_X550EM_x: 3235 /* 3236 * 82599 supports the following combination: 3237 * vmdq no. x rss no. 3238 * [33..64] x [1..2] 3239 * [2..32] x [1..4] 3240 * 1 x [1..16] 3241 * However 8 rss queue per pool (vmdq) is sufficient for 3242 * most cases. 3243 * 3244 * For now, treat X540 and X550 like the 82599. 3245 */ 3246 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3247 if (ixgbe->num_rx_groups == 1) { 3248 ixgbe->num_rx_rings = min(8, ring_per_group); 3249 } else if (ixgbe->num_rx_groups <= 32) { 3250 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3251 min(4, ring_per_group); 3252 } else if (ixgbe->num_rx_groups <= 64) { 3253 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3254 min(2, ring_per_group); 3255 } 3256 break; 3257 3258 default: 3259 break; 3260 } 3261 3262 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3263 3264 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) { 3265 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3266 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) { 3267 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ; 3268 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) { 3269 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS; 3270 } else { 3271 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS; 3272 } 3273 3274 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d", 3275 ixgbe->num_rx_groups, ixgbe->num_rx_rings); 3276 } 3277 3278 /* 3279 * ixgbe_get_conf - Get driver configurations set in driver.conf. 3280 * 3281 * This routine gets user-configured values out of the configuration 3282 * file ixgbe.conf. 3283 * 3284 * For each configurable value, there is a minimum, a maximum, and a 3285 * default. 3286 * If user does not configure a value, use the default. 3287 * If user configures below the minimum, use the minumum. 3288 * If user configures above the maximum, use the maxumum. 3289 */ 3290 static void 3291 ixgbe_get_conf(ixgbe_t *ixgbe) 3292 { 3293 struct ixgbe_hw *hw = &ixgbe->hw; 3294 uint32_t flow_control; 3295 3296 /* 3297 * ixgbe driver supports the following user configurations: 3298 * 3299 * Jumbo frame configuration: 3300 * default_mtu 3301 * 3302 * Ethernet flow control configuration: 3303 * flow_control 3304 * 3305 * Multiple rings configurations: 3306 * tx_queue_number 3307 * tx_ring_size 3308 * rx_queue_number 3309 * rx_ring_size 3310 * 3311 * Call ixgbe_get_prop() to get the value for a specific 3312 * configuration parameter. 3313 */ 3314 3315 /* 3316 * Jumbo frame configuration - max_frame_size controls host buffer 3317 * allocation, so includes MTU, ethernet header, vlan tag and 3318 * frame check sequence. 3319 */ 3320 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 3321 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU); 3322 3323 ixgbe->max_frame_size = ixgbe->default_mtu + 3324 sizeof (struct ether_vlan_header) + ETHERFCSL; 3325 3326 /* 3327 * Ethernet flow control configuration 3328 */ 3329 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 3330 ixgbe_fc_none, 3, ixgbe_fc_none); 3331 if (flow_control == 3) 3332 flow_control = ixgbe_fc_default; 3333 3334 /* 3335 * fc.requested mode is what the user requests. After autoneg, 3336 * fc.current_mode will be the flow_control mode that was negotiated. 3337 */ 3338 hw->fc.requested_mode = flow_control; 3339 3340 /* 3341 * Multiple rings configurations 3342 */ 3343 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 3344 ixgbe->capab->min_tx_que_num, 3345 ixgbe->capab->max_tx_que_num, 3346 ixgbe->capab->def_tx_que_num); 3347 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 3348 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 3349 3350 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 3351 ixgbe->capab->min_rx_que_num, 3352 ixgbe->capab->max_rx_que_num, 3353 ixgbe->capab->def_rx_que_num); 3354 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 3355 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 3356 3357 /* 3358 * Multiple groups configuration 3359 */ 3360 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM, 3361 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num, 3362 ixgbe->capab->def_rx_grp_num); 3363 3364 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE, 3365 0, 1, DEFAULT_MR_ENABLE); 3366 3367 if (ixgbe->mr_enable == B_FALSE) { 3368 ixgbe->num_tx_rings = 1; 3369 ixgbe->num_rx_rings = 1; 3370 ixgbe->num_rx_groups = 1; 3371 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3372 } else { 3373 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3374 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1); 3375 /* 3376 * The combination of num_rx_rings and num_rx_groups 3377 * may be not supported by h/w. We need to adjust 3378 * them to appropriate values. 3379 */ 3380 ixgbe_setup_vmdq_rss_conf(ixgbe); 3381 } 3382 3383 /* 3384 * Tunable used to force an interrupt type. The only use is 3385 * for testing of the lesser interrupt types. 3386 * 0 = don't force interrupt type 3387 * 1 = force interrupt type MSI-X 3388 * 2 = force interrupt type MSI 3389 * 3 = force interrupt type Legacy 3390 */ 3391 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 3392 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 3393 3394 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 3395 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 3396 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 3397 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 3398 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 3399 0, 1, DEFAULT_LSO_ENABLE); 3400 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE, 3401 0, 1, DEFAULT_LRO_ENABLE); 3402 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 3403 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 3404 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe, 3405 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE); 3406 3407 /* Head Write Back not recommended for 82599, X540 and X550 */ 3408 if (hw->mac.type == ixgbe_mac_82599EB || 3409 hw->mac.type == ixgbe_mac_X540 || 3410 hw->mac.type == ixgbe_mac_X550 || 3411 hw->mac.type == ixgbe_mac_X550EM_x) { 3412 ixgbe->tx_head_wb_enable = B_FALSE; 3413 } 3414 3415 /* 3416 * ixgbe LSO needs the tx h/w checksum support. 3417 * LSO will be disabled if tx h/w checksum is not 3418 * enabled. 3419 */ 3420 if (ixgbe->tx_hcksum_enable == B_FALSE) { 3421 ixgbe->lso_enable = B_FALSE; 3422 } 3423 3424 /* 3425 * ixgbe LRO needs the rx h/w checksum support. 3426 * LRO will be disabled if rx h/w checksum is not 3427 * enabled. 3428 */ 3429 if (ixgbe->rx_hcksum_enable == B_FALSE) { 3430 ixgbe->lro_enable = B_FALSE; 3431 } 3432 3433 /* 3434 * ixgbe LRO only supported by 82599, X540 and X550 3435 */ 3436 if (hw->mac.type == ixgbe_mac_82598EB) { 3437 ixgbe->lro_enable = B_FALSE; 3438 } 3439 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 3440 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 3441 DEFAULT_TX_COPY_THRESHOLD); 3442 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 3443 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 3444 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 3445 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 3446 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 3447 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 3448 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 3449 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 3450 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 3451 3452 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 3453 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 3454 DEFAULT_RX_COPY_THRESHOLD); 3455 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 3456 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 3457 DEFAULT_RX_LIMIT_PER_INTR); 3458 3459 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 3460 ixgbe->capab->min_intr_throttle, 3461 ixgbe->capab->max_intr_throttle, 3462 ixgbe->capab->def_intr_throttle); 3463 /* 3464 * 82599, X540 and X550 require the interrupt throttling rate is 3465 * a multiple of 8. This is enforced by the register definiton. 3466 */ 3467 if (hw->mac.type == ixgbe_mac_82599EB || 3468 hw->mac.type == ixgbe_mac_X540 || 3469 hw->mac.type == ixgbe_mac_X550 || 3470 hw->mac.type == ixgbe_mac_X550EM_x) 3471 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; 3472 3473 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe, 3474 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP); 3475 } 3476 3477 static void 3478 ixgbe_init_params(ixgbe_t *ixgbe) 3479 { 3480 struct ixgbe_hw *hw = &ixgbe->hw; 3481 ixgbe_link_speed speeds_supported = 0; 3482 boolean_t negotiate; 3483 3484 /* 3485 * Get a list of speeds the adapter supports. If the hw struct hasn't 3486 * been populated with this information yet, retrieve it from the 3487 * adapter and save it to our own variable. 3488 * 3489 * On certain adapters, such as ones which use SFPs, the contents of 3490 * hw->phy.speeds_supported (and hw->phy.autoneg_advertised) are not 3491 * updated, so we must rely on calling ixgbe_get_link_capabilities() 3492 * in order to ascertain the speeds which we are capable of supporting, 3493 * and in the case of SFP-equipped adapters, which speed we are 3494 * advertising. If ixgbe_get_link_capabilities() fails for some reason, 3495 * we'll go with a default list of speeds as a last resort. 3496 */ 3497 speeds_supported = hw->phy.speeds_supported; 3498 3499 if (speeds_supported == 0) { 3500 if (ixgbe_get_link_capabilities(hw, &speeds_supported, 3501 &negotiate) != IXGBE_SUCCESS) { 3502 if (hw->mac.type == ixgbe_mac_82598EB) { 3503 speeds_supported = 3504 IXGBE_LINK_SPEED_82598_AUTONEG; 3505 } else { 3506 speeds_supported = 3507 IXGBE_LINK_SPEED_82599_AUTONEG; 3508 } 3509 } 3510 } 3511 ixgbe->speeds_supported = speeds_supported; 3512 3513 /* 3514 * By default, all supported speeds are enabled and advertised. 3515 */ 3516 if (speeds_supported & IXGBE_LINK_SPEED_10GB_FULL) { 3517 ixgbe->param_en_10000fdx_cap = 1; 3518 ixgbe->param_adv_10000fdx_cap = 1; 3519 } else { 3520 ixgbe->param_en_10000fdx_cap = 0; 3521 ixgbe->param_adv_10000fdx_cap = 0; 3522 } 3523 3524 if (speeds_supported & IXGBE_LINK_SPEED_5GB_FULL) { 3525 ixgbe->param_en_5000fdx_cap = 1; 3526 ixgbe->param_adv_5000fdx_cap = 1; 3527 } else { 3528 ixgbe->param_en_5000fdx_cap = 0; 3529 ixgbe->param_adv_5000fdx_cap = 0; 3530 } 3531 3532 if (speeds_supported & IXGBE_LINK_SPEED_2_5GB_FULL) { 3533 ixgbe->param_en_2500fdx_cap = 1; 3534 ixgbe->param_adv_2500fdx_cap = 1; 3535 } else { 3536 ixgbe->param_en_2500fdx_cap = 0; 3537 ixgbe->param_adv_2500fdx_cap = 0; 3538 } 3539 3540 if (speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) { 3541 ixgbe->param_en_1000fdx_cap = 1; 3542 ixgbe->param_adv_1000fdx_cap = 1; 3543 } else { 3544 ixgbe->param_en_1000fdx_cap = 0; 3545 ixgbe->param_adv_1000fdx_cap = 0; 3546 } 3547 3548 if (speeds_supported & IXGBE_LINK_SPEED_100_FULL) { 3549 ixgbe->param_en_100fdx_cap = 1; 3550 ixgbe->param_adv_100fdx_cap = 1; 3551 } else { 3552 ixgbe->param_en_100fdx_cap = 0; 3553 ixgbe->param_adv_100fdx_cap = 0; 3554 } 3555 3556 ixgbe->param_pause_cap = 1; 3557 ixgbe->param_asym_pause_cap = 1; 3558 ixgbe->param_rem_fault = 0; 3559 3560 ixgbe->param_adv_autoneg_cap = 1; 3561 ixgbe->param_adv_pause_cap = 1; 3562 ixgbe->param_adv_asym_pause_cap = 1; 3563 ixgbe->param_adv_rem_fault = 0; 3564 3565 ixgbe->param_lp_10000fdx_cap = 0; 3566 ixgbe->param_lp_5000fdx_cap = 0; 3567 ixgbe->param_lp_2500fdx_cap = 0; 3568 ixgbe->param_lp_1000fdx_cap = 0; 3569 ixgbe->param_lp_100fdx_cap = 0; 3570 ixgbe->param_lp_autoneg_cap = 0; 3571 ixgbe->param_lp_pause_cap = 0; 3572 ixgbe->param_lp_asym_pause_cap = 0; 3573 ixgbe->param_lp_rem_fault = 0; 3574 } 3575 3576 /* 3577 * ixgbe_get_prop - Get a property value out of the configuration file 3578 * ixgbe.conf. 3579 * 3580 * Caller provides the name of the property, a default value, a minimum 3581 * value, and a maximum value. 3582 * 3583 * Return configured value of the property, with default, minimum and 3584 * maximum properly applied. 3585 */ 3586 static int 3587 ixgbe_get_prop(ixgbe_t *ixgbe, 3588 char *propname, /* name of the property */ 3589 int minval, /* minimum acceptable value */ 3590 int maxval, /* maximim acceptable value */ 3591 int defval) /* default value */ 3592 { 3593 int value; 3594 3595 /* 3596 * Call ddi_prop_get_int() to read the conf settings 3597 */ 3598 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 3599 DDI_PROP_DONTPASS, propname, defval); 3600 if (value > maxval) 3601 value = maxval; 3602 3603 if (value < minval) 3604 value = minval; 3605 3606 return (value); 3607 } 3608 3609 /* 3610 * ixgbe_driver_setup_link - Using the link properties to setup the link. 3611 */ 3612 int 3613 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 3614 { 3615 struct ixgbe_hw *hw = &ixgbe->hw; 3616 ixgbe_link_speed advertised = 0; 3617 3618 /* 3619 * Assemble a list of enabled speeds to auto-negotiate with. 3620 */ 3621 if (ixgbe->param_en_10000fdx_cap == 1) 3622 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 3623 3624 if (ixgbe->param_en_5000fdx_cap == 1) 3625 advertised |= IXGBE_LINK_SPEED_5GB_FULL; 3626 3627 if (ixgbe->param_en_2500fdx_cap == 1) 3628 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; 3629 3630 if (ixgbe->param_en_1000fdx_cap == 1) 3631 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 3632 3633 if (ixgbe->param_en_100fdx_cap == 1) 3634 advertised |= IXGBE_LINK_SPEED_100_FULL; 3635 3636 /* 3637 * As a last resort, autoneg with a default list of speeds. 3638 */ 3639 if (ixgbe->param_adv_autoneg_cap == 1 && advertised == 0) { 3640 ixgbe_notice(ixgbe, "Invalid link settings. Setting link " 3641 "to autonegotiate with full capabilities."); 3642 3643 if (hw->mac.type == ixgbe_mac_82598EB) 3644 advertised = IXGBE_LINK_SPEED_82598_AUTONEG; 3645 else 3646 advertised = IXGBE_LINK_SPEED_82599_AUTONEG; 3647 } 3648 3649 if (setup_hw) { 3650 if (ixgbe_setup_link(&ixgbe->hw, advertised, 3651 ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) { 3652 ixgbe_notice(ixgbe, "Setup link failed on this " 3653 "device."); 3654 return (IXGBE_FAILURE); 3655 } 3656 } 3657 3658 return (IXGBE_SUCCESS); 3659 } 3660 3661 /* 3662 * ixgbe_driver_link_check - Link status processing. 3663 * 3664 * This function can be called in both kernel context and interrupt context 3665 */ 3666 static void 3667 ixgbe_driver_link_check(ixgbe_t *ixgbe) 3668 { 3669 struct ixgbe_hw *hw = &ixgbe->hw; 3670 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 3671 boolean_t link_up = B_FALSE; 3672 boolean_t link_changed = B_FALSE; 3673 3674 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3675 3676 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 3677 if (link_up) { 3678 ixgbe->link_check_complete = B_TRUE; 3679 3680 /* Link is up, enable flow control settings */ 3681 (void) ixgbe_fc_enable(hw); 3682 3683 /* 3684 * The Link is up, check whether it was marked as down earlier 3685 */ 3686 if (ixgbe->link_state != LINK_STATE_UP) { 3687 switch (speed) { 3688 case IXGBE_LINK_SPEED_10GB_FULL: 3689 ixgbe->link_speed = SPEED_10GB; 3690 break; 3691 case IXGBE_LINK_SPEED_5GB_FULL: 3692 ixgbe->link_speed = SPEED_5GB; 3693 break; 3694 case IXGBE_LINK_SPEED_2_5GB_FULL: 3695 ixgbe->link_speed = SPEED_2_5GB; 3696 break; 3697 case IXGBE_LINK_SPEED_1GB_FULL: 3698 ixgbe->link_speed = SPEED_1GB; 3699 break; 3700 case IXGBE_LINK_SPEED_100_FULL: 3701 ixgbe->link_speed = SPEED_100; 3702 } 3703 ixgbe->link_duplex = LINK_DUPLEX_FULL; 3704 ixgbe->link_state = LINK_STATE_UP; 3705 link_changed = B_TRUE; 3706 } 3707 } else { 3708 if (ixgbe->link_check_complete == B_TRUE || 3709 (ixgbe->link_check_complete == B_FALSE && 3710 gethrtime() >= ixgbe->link_check_hrtime)) { 3711 /* 3712 * The link is really down 3713 */ 3714 ixgbe->link_check_complete = B_TRUE; 3715 3716 if (ixgbe->link_state != LINK_STATE_DOWN) { 3717 ixgbe->link_speed = 0; 3718 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN; 3719 ixgbe->link_state = LINK_STATE_DOWN; 3720 link_changed = B_TRUE; 3721 } 3722 } 3723 } 3724 3725 /* 3726 * If we are in an interrupt context, need to re-enable the 3727 * interrupt, which was automasked 3728 */ 3729 if (servicing_interrupt() != 0) { 3730 ixgbe->eims |= IXGBE_EICR_LSC; 3731 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3732 } 3733 3734 if (link_changed) { 3735 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3736 } 3737 } 3738 3739 /* 3740 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599. 3741 */ 3742 static void 3743 ixgbe_sfp_check(void *arg) 3744 { 3745 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3746 uint32_t eicr = ixgbe->eicr; 3747 struct ixgbe_hw *hw = &ixgbe->hw; 3748 3749 mutex_enter(&ixgbe->gen_lock); 3750 if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) { 3751 /* clear the interrupt */ 3752 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3753 3754 /* if link up, do multispeed fiber setup */ 3755 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3756 B_TRUE); 3757 ixgbe_driver_link_check(ixgbe); 3758 ixgbe_get_hw_state(ixgbe); 3759 } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) { 3760 /* clear the interrupt */ 3761 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw)); 3762 3763 /* if link up, do sfp module setup */ 3764 (void) hw->mac.ops.setup_sfp(hw); 3765 3766 /* do multispeed fiber setup */ 3767 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3768 B_TRUE); 3769 ixgbe_driver_link_check(ixgbe); 3770 ixgbe_get_hw_state(ixgbe); 3771 } 3772 mutex_exit(&ixgbe->gen_lock); 3773 3774 /* 3775 * We need to fully re-check the link later. 3776 */ 3777 ixgbe->link_check_complete = B_FALSE; 3778 ixgbe->link_check_hrtime = gethrtime() + 3779 (IXGBE_LINK_UP_TIME * 100000000ULL); 3780 } 3781 3782 /* 3783 * ixgbe_overtemp_check - overtemp module processing done in taskq 3784 * 3785 * This routine will only be called on adapters with temperature sensor. 3786 * The indication of over-temperature can be either SDP0 interrupt or the link 3787 * status change interrupt. 3788 */ 3789 static void 3790 ixgbe_overtemp_check(void *arg) 3791 { 3792 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3793 struct ixgbe_hw *hw = &ixgbe->hw; 3794 uint32_t eicr = ixgbe->eicr; 3795 ixgbe_link_speed speed; 3796 boolean_t link_up; 3797 3798 mutex_enter(&ixgbe->gen_lock); 3799 3800 /* make sure we know current state of link */ 3801 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 3802 3803 /* check over-temp condition */ 3804 if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) || 3805 (eicr & IXGBE_EICR_LSC)) { 3806 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) { 3807 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3808 3809 /* 3810 * Disable the adapter interrupts 3811 */ 3812 ixgbe_disable_adapter_interrupts(ixgbe); 3813 3814 /* 3815 * Disable Rx/Tx units 3816 */ 3817 (void) ixgbe_stop_adapter(hw); 3818 3819 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3820 ixgbe_error(ixgbe, 3821 "Problem: Network adapter has been stopped " 3822 "because it has overheated"); 3823 ixgbe_error(ixgbe, 3824 "Action: Restart the computer. " 3825 "If the problem persists, power off the system " 3826 "and replace the adapter"); 3827 } 3828 } 3829 3830 /* write to clear the interrupt */ 3831 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 3832 3833 mutex_exit(&ixgbe->gen_lock); 3834 } 3835 3836 /* 3837 * ixgbe_phy_check - taskq to process interrupts from an external PHY 3838 * 3839 * This routine will only be called on adapters with external PHYs 3840 * (such as X550) that may be trying to raise our attention to some event. 3841 * Currently, this is limited to claiming PHY overtemperature and link status 3842 * change (LSC) events, however this may expand to include other things in 3843 * future adapters. 3844 */ 3845 static void 3846 ixgbe_phy_check(void *arg) 3847 { 3848 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3849 struct ixgbe_hw *hw = &ixgbe->hw; 3850 int rv; 3851 3852 mutex_enter(&ixgbe->gen_lock); 3853 3854 /* 3855 * X550 baseT PHY overtemp and LSC events are handled here. 3856 * 3857 * If an overtemp event occurs, it will be reflected in the 3858 * return value of phy.ops.handle_lasi() and the common code will 3859 * automatically power off the baseT PHY. This is our cue to trigger 3860 * an FMA event. 3861 * 3862 * If a link status change event occurs, phy.ops.handle_lasi() will 3863 * automatically initiate a link setup between the integrated KR PHY 3864 * and the external X557 PHY to ensure that the link speed between 3865 * them matches the link speed of the baseT link. 3866 */ 3867 rv = ixgbe_handle_lasi(hw); 3868 3869 if (rv == IXGBE_ERR_OVERTEMP) { 3870 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3871 3872 /* 3873 * Disable the adapter interrupts 3874 */ 3875 ixgbe_disable_adapter_interrupts(ixgbe); 3876 3877 /* 3878 * Disable Rx/Tx units 3879 */ 3880 (void) ixgbe_stop_adapter(hw); 3881 3882 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3883 ixgbe_error(ixgbe, 3884 "Problem: Network adapter has been stopped due to a " 3885 "overtemperature event being detected."); 3886 ixgbe_error(ixgbe, 3887 "Action: Shut down or restart the computer. If the issue " 3888 "persists, please take action in accordance with the " 3889 "recommendations from your system vendor."); 3890 } 3891 3892 mutex_exit(&ixgbe->gen_lock); 3893 } 3894 3895 /* 3896 * ixgbe_link_timer - timer for link status detection 3897 */ 3898 static void 3899 ixgbe_link_timer(void *arg) 3900 { 3901 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3902 3903 mutex_enter(&ixgbe->gen_lock); 3904 ixgbe_driver_link_check(ixgbe); 3905 mutex_exit(&ixgbe->gen_lock); 3906 } 3907 3908 /* 3909 * ixgbe_local_timer - Driver watchdog function. 3910 * 3911 * This function will handle the transmit stall check and other routines. 3912 */ 3913 static void 3914 ixgbe_local_timer(void *arg) 3915 { 3916 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3917 3918 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP) 3919 goto out; 3920 3921 if (ixgbe->ixgbe_state & IXGBE_ERROR) { 3922 ixgbe->reset_count++; 3923 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3924 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3925 goto out; 3926 } 3927 3928 if (ixgbe_stall_check(ixgbe)) { 3929 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL); 3930 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3931 3932 ixgbe->reset_count++; 3933 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3934 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3935 } 3936 3937 out: 3938 ixgbe_restart_watchdog_timer(ixgbe); 3939 } 3940 3941 /* 3942 * ixgbe_stall_check - Check for transmit stall. 3943 * 3944 * This function checks if the adapter is stalled (in transmit). 3945 * 3946 * It is called each time the watchdog timeout is invoked. 3947 * If the transmit descriptor reclaim continuously fails, 3948 * the watchdog value will increment by 1. If the watchdog 3949 * value exceeds the threshold, the ixgbe is assumed to 3950 * have stalled and need to be reset. 3951 */ 3952 static boolean_t 3953 ixgbe_stall_check(ixgbe_t *ixgbe) 3954 { 3955 ixgbe_tx_ring_t *tx_ring; 3956 boolean_t result; 3957 int i; 3958 3959 if (ixgbe->link_state != LINK_STATE_UP) 3960 return (B_FALSE); 3961 3962 /* 3963 * If any tx ring is stalled, we'll reset the chipset 3964 */ 3965 result = B_FALSE; 3966 for (i = 0; i < ixgbe->num_tx_rings; i++) { 3967 tx_ring = &ixgbe->tx_rings[i]; 3968 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) { 3969 tx_ring->tx_recycle(tx_ring); 3970 } 3971 3972 if (tx_ring->recycle_fail > 0) 3973 tx_ring->stall_watchdog++; 3974 else 3975 tx_ring->stall_watchdog = 0; 3976 3977 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 3978 result = B_TRUE; 3979 break; 3980 } 3981 } 3982 3983 if (result) { 3984 tx_ring->stall_watchdog = 0; 3985 tx_ring->recycle_fail = 0; 3986 } 3987 3988 return (result); 3989 } 3990 3991 3992 /* 3993 * is_valid_mac_addr - Check if the mac address is valid. 3994 */ 3995 static boolean_t 3996 is_valid_mac_addr(uint8_t *mac_addr) 3997 { 3998 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 3999 const uint8_t addr_test2[6] = 4000 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4001 4002 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 4003 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 4004 return (B_FALSE); 4005 4006 return (B_TRUE); 4007 } 4008 4009 static boolean_t 4010 ixgbe_find_mac_address(ixgbe_t *ixgbe) 4011 { 4012 #ifdef __sparc 4013 struct ixgbe_hw *hw = &ixgbe->hw; 4014 uchar_t *bytes; 4015 struct ether_addr sysaddr; 4016 uint_t nelts; 4017 int err; 4018 boolean_t found = B_FALSE; 4019 4020 /* 4021 * The "vendor's factory-set address" may already have 4022 * been extracted from the chip, but if the property 4023 * "local-mac-address" is set we use that instead. 4024 * 4025 * We check whether it looks like an array of 6 4026 * bytes (which it should, if OBP set it). If we can't 4027 * make sense of it this way, we'll ignore it. 4028 */ 4029 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4030 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 4031 if (err == DDI_PROP_SUCCESS) { 4032 if (nelts == ETHERADDRL) { 4033 while (nelts--) 4034 hw->mac.addr[nelts] = bytes[nelts]; 4035 found = B_TRUE; 4036 } 4037 ddi_prop_free(bytes); 4038 } 4039 4040 /* 4041 * Look up the OBP property "local-mac-address?". If the user has set 4042 * 'local-mac-address? = false', use "the system address" instead. 4043 */ 4044 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 4045 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 4046 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 4047 if (localetheraddr(NULL, &sysaddr) != 0) { 4048 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 4049 found = B_TRUE; 4050 } 4051 } 4052 ddi_prop_free(bytes); 4053 } 4054 4055 /* 4056 * Finally(!), if there's a valid "mac-address" property (created 4057 * if we netbooted from this interface), we must use this instead 4058 * of any of the above to ensure that the NFS/install server doesn't 4059 * get confused by the address changing as illumos takes over! 4060 */ 4061 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4062 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 4063 if (err == DDI_PROP_SUCCESS) { 4064 if (nelts == ETHERADDRL) { 4065 while (nelts--) 4066 hw->mac.addr[nelts] = bytes[nelts]; 4067 found = B_TRUE; 4068 } 4069 ddi_prop_free(bytes); 4070 } 4071 4072 if (found) { 4073 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 4074 return (B_TRUE); 4075 } 4076 #else 4077 _NOTE(ARGUNUSED(ixgbe)); 4078 #endif 4079 4080 return (B_TRUE); 4081 } 4082 4083 #pragma inline(ixgbe_arm_watchdog_timer) 4084 static void 4085 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 4086 { 4087 /* 4088 * Fire a watchdog timer 4089 */ 4090 ixgbe->watchdog_tid = 4091 timeout(ixgbe_local_timer, 4092 (void *)ixgbe, 1 * drv_usectohz(1000000)); 4093 4094 } 4095 4096 /* 4097 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 4098 */ 4099 void 4100 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 4101 { 4102 mutex_enter(&ixgbe->watchdog_lock); 4103 4104 if (!ixgbe->watchdog_enable) { 4105 ixgbe->watchdog_enable = B_TRUE; 4106 ixgbe->watchdog_start = B_TRUE; 4107 ixgbe_arm_watchdog_timer(ixgbe); 4108 } 4109 4110 mutex_exit(&ixgbe->watchdog_lock); 4111 } 4112 4113 /* 4114 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 4115 */ 4116 void 4117 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 4118 { 4119 timeout_id_t tid; 4120 4121 mutex_enter(&ixgbe->watchdog_lock); 4122 4123 ixgbe->watchdog_enable = B_FALSE; 4124 ixgbe->watchdog_start = B_FALSE; 4125 tid = ixgbe->watchdog_tid; 4126 ixgbe->watchdog_tid = 0; 4127 4128 mutex_exit(&ixgbe->watchdog_lock); 4129 4130 if (tid != 0) 4131 (void) untimeout(tid); 4132 } 4133 4134 /* 4135 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 4136 */ 4137 void 4138 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 4139 { 4140 mutex_enter(&ixgbe->watchdog_lock); 4141 4142 if (ixgbe->watchdog_enable) { 4143 if (!ixgbe->watchdog_start) { 4144 ixgbe->watchdog_start = B_TRUE; 4145 ixgbe_arm_watchdog_timer(ixgbe); 4146 } 4147 } 4148 4149 mutex_exit(&ixgbe->watchdog_lock); 4150 } 4151 4152 /* 4153 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 4154 */ 4155 static void 4156 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 4157 { 4158 mutex_enter(&ixgbe->watchdog_lock); 4159 4160 if (ixgbe->watchdog_start) 4161 ixgbe_arm_watchdog_timer(ixgbe); 4162 4163 mutex_exit(&ixgbe->watchdog_lock); 4164 } 4165 4166 /* 4167 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 4168 */ 4169 void 4170 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 4171 { 4172 timeout_id_t tid; 4173 4174 mutex_enter(&ixgbe->watchdog_lock); 4175 4176 ixgbe->watchdog_start = B_FALSE; 4177 tid = ixgbe->watchdog_tid; 4178 ixgbe->watchdog_tid = 0; 4179 4180 mutex_exit(&ixgbe->watchdog_lock); 4181 4182 if (tid != 0) 4183 (void) untimeout(tid); 4184 } 4185 4186 /* 4187 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 4188 */ 4189 static void 4190 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 4191 { 4192 struct ixgbe_hw *hw = &ixgbe->hw; 4193 4194 /* 4195 * mask all interrupts off 4196 */ 4197 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 4198 4199 /* 4200 * for MSI-X, also disable autoclear 4201 */ 4202 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4203 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 4204 } 4205 4206 IXGBE_WRITE_FLUSH(hw); 4207 } 4208 4209 /* 4210 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 4211 */ 4212 static void 4213 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 4214 { 4215 struct ixgbe_hw *hw = &ixgbe->hw; 4216 uint32_t eiac, eiam; 4217 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 4218 4219 /* interrupt types to enable */ 4220 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 4221 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 4222 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */ 4223 4224 /* enable automask on "other" causes that this adapter can generate */ 4225 eiam = ixgbe->capab->other_intr; 4226 4227 /* 4228 * msi-x mode 4229 */ 4230 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4231 /* enable autoclear but not on bits 29:20 */ 4232 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR); 4233 4234 /* general purpose interrupt enable */ 4235 gpie |= (IXGBE_GPIE_MSIX_MODE 4236 | IXGBE_GPIE_PBA_SUPPORT 4237 | IXGBE_GPIE_OCD 4238 | IXGBE_GPIE_EIAME); 4239 /* 4240 * non-msi-x mode 4241 */ 4242 } else { 4243 4244 /* disable autoclear, leave gpie at default */ 4245 eiac = 0; 4246 4247 /* 4248 * General purpose interrupt enable. 4249 * For 82599, X540 and X550, extended interrupt 4250 * automask enable only in MSI or MSI-X mode 4251 */ 4252 if ((hw->mac.type == ixgbe_mac_82598EB) || 4253 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) { 4254 gpie |= IXGBE_GPIE_EIAME; 4255 } 4256 } 4257 4258 /* Enable specific "other" interrupt types */ 4259 switch (hw->mac.type) { 4260 case ixgbe_mac_82598EB: 4261 gpie |= ixgbe->capab->other_gpie; 4262 break; 4263 4264 case ixgbe_mac_82599EB: 4265 case ixgbe_mac_X540: 4266 case ixgbe_mac_X550: 4267 case ixgbe_mac_X550EM_x: 4268 gpie |= ixgbe->capab->other_gpie; 4269 4270 /* Enable RSC Delay 8us when LRO enabled */ 4271 if (ixgbe->lro_enable) { 4272 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT); 4273 } 4274 break; 4275 4276 default: 4277 break; 4278 } 4279 4280 /* write to interrupt control registers */ 4281 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4282 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 4283 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam); 4284 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 4285 IXGBE_WRITE_FLUSH(hw); 4286 } 4287 4288 /* 4289 * ixgbe_loopback_ioctl - Loopback support. 4290 */ 4291 enum ioc_reply 4292 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 4293 { 4294 lb_info_sz_t *lbsp; 4295 lb_property_t *lbpp; 4296 uint32_t *lbmp; 4297 uint32_t size; 4298 uint32_t value; 4299 4300 if (mp->b_cont == NULL) 4301 return (IOC_INVAL); 4302 4303 switch (iocp->ioc_cmd) { 4304 default: 4305 return (IOC_INVAL); 4306 4307 case LB_GET_INFO_SIZE: 4308 size = sizeof (lb_info_sz_t); 4309 if (iocp->ioc_count != size) 4310 return (IOC_INVAL); 4311 4312 value = sizeof (lb_normal); 4313 value += sizeof (lb_mac); 4314 value += sizeof (lb_external); 4315 4316 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 4317 *lbsp = value; 4318 break; 4319 4320 case LB_GET_INFO: 4321 value = sizeof (lb_normal); 4322 value += sizeof (lb_mac); 4323 value += sizeof (lb_external); 4324 4325 size = value; 4326 if (iocp->ioc_count != size) 4327 return (IOC_INVAL); 4328 4329 value = 0; 4330 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 4331 4332 lbpp[value++] = lb_normal; 4333 lbpp[value++] = lb_mac; 4334 lbpp[value++] = lb_external; 4335 break; 4336 4337 case LB_GET_MODE: 4338 size = sizeof (uint32_t); 4339 if (iocp->ioc_count != size) 4340 return (IOC_INVAL); 4341 4342 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4343 *lbmp = ixgbe->loopback_mode; 4344 break; 4345 4346 case LB_SET_MODE: 4347 size = 0; 4348 if (iocp->ioc_count != sizeof (uint32_t)) 4349 return (IOC_INVAL); 4350 4351 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4352 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 4353 return (IOC_INVAL); 4354 break; 4355 } 4356 4357 iocp->ioc_count = size; 4358 iocp->ioc_error = 0; 4359 4360 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4361 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4362 return (IOC_INVAL); 4363 } 4364 4365 return (IOC_REPLY); 4366 } 4367 4368 /* 4369 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 4370 */ 4371 static boolean_t 4372 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 4373 { 4374 if (mode == ixgbe->loopback_mode) 4375 return (B_TRUE); 4376 4377 ixgbe->loopback_mode = mode; 4378 4379 if (mode == IXGBE_LB_NONE) { 4380 /* 4381 * Reset the chip 4382 */ 4383 (void) ixgbe_reset(ixgbe); 4384 return (B_TRUE); 4385 } 4386 4387 mutex_enter(&ixgbe->gen_lock); 4388 4389 switch (mode) { 4390 default: 4391 mutex_exit(&ixgbe->gen_lock); 4392 return (B_FALSE); 4393 4394 case IXGBE_LB_EXTERNAL: 4395 break; 4396 4397 case IXGBE_LB_INTERNAL_MAC: 4398 ixgbe_set_internal_mac_loopback(ixgbe); 4399 break; 4400 } 4401 4402 mutex_exit(&ixgbe->gen_lock); 4403 4404 return (B_TRUE); 4405 } 4406 4407 /* 4408 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 4409 */ 4410 static void 4411 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 4412 { 4413 struct ixgbe_hw *hw; 4414 uint32_t reg; 4415 uint8_t atlas; 4416 4417 hw = &ixgbe->hw; 4418 4419 /* 4420 * Setup MAC loopback 4421 */ 4422 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 4423 reg |= IXGBE_HLREG0_LPBK; 4424 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 4425 4426 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4427 reg &= ~IXGBE_AUTOC_LMS_MASK; 4428 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4429 4430 /* 4431 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 4432 */ 4433 switch (hw->mac.type) { 4434 case ixgbe_mac_82598EB: 4435 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4436 &atlas); 4437 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 4438 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4439 atlas); 4440 4441 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4442 &atlas); 4443 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 4444 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4445 atlas); 4446 4447 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4448 &atlas); 4449 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 4450 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4451 atlas); 4452 4453 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4454 &atlas); 4455 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 4456 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4457 atlas); 4458 break; 4459 4460 case ixgbe_mac_82599EB: 4461 case ixgbe_mac_X540: 4462 case ixgbe_mac_X550: 4463 case ixgbe_mac_X550EM_x: 4464 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4465 reg |= (IXGBE_AUTOC_FLU | 4466 IXGBE_AUTOC_10G_KX4); 4467 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4468 4469 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL, 4470 B_FALSE); 4471 break; 4472 4473 default: 4474 break; 4475 } 4476 } 4477 4478 #pragma inline(ixgbe_intr_rx_work) 4479 /* 4480 * ixgbe_intr_rx_work - RX processing of ISR. 4481 */ 4482 static void 4483 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 4484 { 4485 mblk_t *mp; 4486 4487 mutex_enter(&rx_ring->rx_lock); 4488 4489 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4490 mutex_exit(&rx_ring->rx_lock); 4491 4492 if (mp != NULL) 4493 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4494 rx_ring->ring_gen_num); 4495 } 4496 4497 #pragma inline(ixgbe_intr_tx_work) 4498 /* 4499 * ixgbe_intr_tx_work - TX processing of ISR. 4500 */ 4501 static void 4502 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 4503 { 4504 ixgbe_t *ixgbe = tx_ring->ixgbe; 4505 4506 /* 4507 * Recycle the tx descriptors 4508 */ 4509 tx_ring->tx_recycle(tx_ring); 4510 4511 /* 4512 * Schedule the re-transmit 4513 */ 4514 if (tx_ring->reschedule && 4515 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) { 4516 tx_ring->reschedule = B_FALSE; 4517 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl, 4518 tx_ring->ring_handle); 4519 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 4520 } 4521 } 4522 4523 #pragma inline(ixgbe_intr_other_work) 4524 /* 4525 * ixgbe_intr_other_work - Process interrupt types other than tx/rx 4526 */ 4527 static void 4528 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr) 4529 { 4530 struct ixgbe_hw *hw = &ixgbe->hw; 4531 4532 ASSERT(mutex_owned(&ixgbe->gen_lock)); 4533 4534 /* 4535 * handle link status change 4536 */ 4537 if (eicr & IXGBE_EICR_LSC) { 4538 ixgbe_driver_link_check(ixgbe); 4539 ixgbe_get_hw_state(ixgbe); 4540 } 4541 4542 /* 4543 * check for fan failure on adapters with fans 4544 */ 4545 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 4546 (eicr & IXGBE_EICR_GPI_SDP1)) { 4547 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 4548 4549 /* 4550 * Disable the adapter interrupts 4551 */ 4552 ixgbe_disable_adapter_interrupts(ixgbe); 4553 4554 /* 4555 * Disable Rx/Tx units 4556 */ 4557 (void) ixgbe_stop_adapter(&ixgbe->hw); 4558 4559 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 4560 ixgbe_error(ixgbe, 4561 "Problem: Network adapter has been stopped " 4562 "because the fan has stopped.\n"); 4563 ixgbe_error(ixgbe, 4564 "Action: Replace the adapter.\n"); 4565 4566 /* re-enable the interrupt, which was automasked */ 4567 ixgbe->eims |= IXGBE_EICR_GPI_SDP1; 4568 } 4569 4570 /* 4571 * Do SFP check for adapters with hot-plug capability 4572 */ 4573 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) && 4574 ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) || 4575 (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) { 4576 ixgbe->eicr = eicr; 4577 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq, 4578 ixgbe_sfp_check, (void *)ixgbe, 4579 DDI_NOSLEEP)) != DDI_SUCCESS) { 4580 ixgbe_log(ixgbe, "No memory available to dispatch " 4581 "taskq for SFP check"); 4582 } 4583 } 4584 4585 /* 4586 * Do over-temperature check for adapters with temp sensor 4587 */ 4588 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) && 4589 ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) || 4590 (eicr & IXGBE_EICR_LSC))) { 4591 ixgbe->eicr = eicr; 4592 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq, 4593 ixgbe_overtemp_check, (void *)ixgbe, 4594 DDI_NOSLEEP)) != DDI_SUCCESS) { 4595 ixgbe_log(ixgbe, "No memory available to dispatch " 4596 "taskq for overtemp check"); 4597 } 4598 } 4599 4600 /* 4601 * Process an external PHY interrupt 4602 */ 4603 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 4604 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 4605 ixgbe->eicr = eicr; 4606 if ((ddi_taskq_dispatch(ixgbe->phy_taskq, 4607 ixgbe_phy_check, (void *)ixgbe, 4608 DDI_NOSLEEP)) != DDI_SUCCESS) { 4609 ixgbe_log(ixgbe, "No memory available to dispatch " 4610 "taskq for PHY check"); 4611 } 4612 } 4613 } 4614 4615 /* 4616 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 4617 */ 4618 static uint_t 4619 ixgbe_intr_legacy(void *arg1, void *arg2) 4620 { 4621 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4622 struct ixgbe_hw *hw = &ixgbe->hw; 4623 ixgbe_tx_ring_t *tx_ring; 4624 ixgbe_rx_ring_t *rx_ring; 4625 uint32_t eicr; 4626 mblk_t *mp; 4627 boolean_t tx_reschedule; 4628 uint_t result; 4629 4630 _NOTE(ARGUNUSED(arg2)); 4631 4632 mutex_enter(&ixgbe->gen_lock); 4633 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 4634 mutex_exit(&ixgbe->gen_lock); 4635 return (DDI_INTR_UNCLAIMED); 4636 } 4637 4638 mp = NULL; 4639 tx_reschedule = B_FALSE; 4640 4641 /* 4642 * Any bit set in eicr: claim this interrupt 4643 */ 4644 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4645 4646 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4647 mutex_exit(&ixgbe->gen_lock); 4648 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4649 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4650 return (DDI_INTR_CLAIMED); 4651 } 4652 4653 if (eicr) { 4654 /* 4655 * For legacy interrupt, we have only one interrupt, 4656 * so we have only one rx ring and one tx ring enabled. 4657 */ 4658 ASSERT(ixgbe->num_rx_rings == 1); 4659 ASSERT(ixgbe->num_tx_rings == 1); 4660 4661 /* 4662 * For legacy interrupt, rx rings[0] will use RTxQ[0]. 4663 */ 4664 if (eicr & 0x1) { 4665 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE; 4666 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4667 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4668 /* 4669 * Clean the rx descriptors 4670 */ 4671 rx_ring = &ixgbe->rx_rings[0]; 4672 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4673 } 4674 4675 /* 4676 * For legacy interrupt, tx rings[0] will use RTxQ[1]. 4677 */ 4678 if (eicr & 0x2) { 4679 /* 4680 * Recycle the tx descriptors 4681 */ 4682 tx_ring = &ixgbe->tx_rings[0]; 4683 tx_ring->tx_recycle(tx_ring); 4684 4685 /* 4686 * Schedule the re-transmit 4687 */ 4688 tx_reschedule = (tx_ring->reschedule && 4689 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)); 4690 } 4691 4692 /* any interrupt type other than tx/rx */ 4693 if (eicr & ixgbe->capab->other_intr) { 4694 switch (hw->mac.type) { 4695 case ixgbe_mac_82598EB: 4696 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4697 break; 4698 4699 case ixgbe_mac_82599EB: 4700 case ixgbe_mac_X540: 4701 case ixgbe_mac_X550: 4702 case ixgbe_mac_X550EM_x: 4703 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4704 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4705 break; 4706 4707 default: 4708 break; 4709 } 4710 ixgbe_intr_other_work(ixgbe, eicr); 4711 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4712 } 4713 4714 mutex_exit(&ixgbe->gen_lock); 4715 4716 result = DDI_INTR_CLAIMED; 4717 } else { 4718 mutex_exit(&ixgbe->gen_lock); 4719 4720 /* 4721 * No interrupt cause bits set: don't claim this interrupt. 4722 */ 4723 result = DDI_INTR_UNCLAIMED; 4724 } 4725 4726 /* re-enable the interrupts which were automasked */ 4727 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4728 4729 /* 4730 * Do the following work outside of the gen_lock 4731 */ 4732 if (mp != NULL) { 4733 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4734 rx_ring->ring_gen_num); 4735 } 4736 4737 if (tx_reschedule) { 4738 tx_ring->reschedule = B_FALSE; 4739 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle); 4740 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 4741 } 4742 4743 return (result); 4744 } 4745 4746 /* 4747 * ixgbe_intr_msi - Interrupt handler for MSI. 4748 */ 4749 static uint_t 4750 ixgbe_intr_msi(void *arg1, void *arg2) 4751 { 4752 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4753 struct ixgbe_hw *hw = &ixgbe->hw; 4754 uint32_t eicr; 4755 4756 _NOTE(ARGUNUSED(arg2)); 4757 4758 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4759 4760 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4761 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4762 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4763 return (DDI_INTR_CLAIMED); 4764 } 4765 4766 /* 4767 * For MSI interrupt, we have only one vector, 4768 * so we have only one rx ring and one tx ring enabled. 4769 */ 4770 ASSERT(ixgbe->num_rx_rings == 1); 4771 ASSERT(ixgbe->num_tx_rings == 1); 4772 4773 /* 4774 * For MSI interrupt, rx rings[0] will use RTxQ[0]. 4775 */ 4776 if (eicr & 0x1) { 4777 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 4778 } 4779 4780 /* 4781 * For MSI interrupt, tx rings[0] will use RTxQ[1]. 4782 */ 4783 if (eicr & 0x2) { 4784 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 4785 } 4786 4787 /* any interrupt type other than tx/rx */ 4788 if (eicr & ixgbe->capab->other_intr) { 4789 mutex_enter(&ixgbe->gen_lock); 4790 switch (hw->mac.type) { 4791 case ixgbe_mac_82598EB: 4792 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4793 break; 4794 4795 case ixgbe_mac_82599EB: 4796 case ixgbe_mac_X540: 4797 case ixgbe_mac_X550: 4798 case ixgbe_mac_X550EM_x: 4799 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4800 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4801 break; 4802 4803 default: 4804 break; 4805 } 4806 ixgbe_intr_other_work(ixgbe, eicr); 4807 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4808 mutex_exit(&ixgbe->gen_lock); 4809 } 4810 4811 /* re-enable the interrupts which were automasked */ 4812 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4813 4814 return (DDI_INTR_CLAIMED); 4815 } 4816 4817 /* 4818 * ixgbe_intr_msix - Interrupt handler for MSI-X. 4819 */ 4820 static uint_t 4821 ixgbe_intr_msix(void *arg1, void *arg2) 4822 { 4823 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1; 4824 ixgbe_t *ixgbe = vect->ixgbe; 4825 struct ixgbe_hw *hw = &ixgbe->hw; 4826 uint32_t eicr; 4827 int r_idx = 0; 4828 4829 _NOTE(ARGUNUSED(arg2)); 4830 4831 /* 4832 * Clean each rx ring that has its bit set in the map 4833 */ 4834 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 4835 while (r_idx >= 0) { 4836 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 4837 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 4838 (ixgbe->num_rx_rings - 1)); 4839 } 4840 4841 /* 4842 * Clean each tx ring that has its bit set in the map 4843 */ 4844 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1)); 4845 while (r_idx >= 0) { 4846 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]); 4847 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 4848 (ixgbe->num_tx_rings - 1)); 4849 } 4850 4851 4852 /* 4853 * Clean other interrupt (link change) that has its bit set in the map 4854 */ 4855 if (BT_TEST(vect->other_map, 0) == 1) { 4856 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4857 4858 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != 4859 DDI_FM_OK) { 4860 ddi_fm_service_impact(ixgbe->dip, 4861 DDI_SERVICE_DEGRADED); 4862 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4863 return (DDI_INTR_CLAIMED); 4864 } 4865 4866 /* 4867 * Check "other" cause bits: any interrupt type other than tx/rx 4868 */ 4869 if (eicr & ixgbe->capab->other_intr) { 4870 mutex_enter(&ixgbe->gen_lock); 4871 switch (hw->mac.type) { 4872 case ixgbe_mac_82598EB: 4873 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4874 ixgbe_intr_other_work(ixgbe, eicr); 4875 break; 4876 4877 case ixgbe_mac_82599EB: 4878 case ixgbe_mac_X540: 4879 case ixgbe_mac_X550: 4880 case ixgbe_mac_X550EM_x: 4881 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4882 ixgbe_intr_other_work(ixgbe, eicr); 4883 break; 4884 4885 default: 4886 break; 4887 } 4888 mutex_exit(&ixgbe->gen_lock); 4889 } 4890 4891 /* re-enable the interrupts which were automasked */ 4892 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4893 } 4894 4895 return (DDI_INTR_CLAIMED); 4896 } 4897 4898 /* 4899 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 4900 * 4901 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 4902 * if not successful, try Legacy. 4903 * ixgbe->intr_force can be used to force sequence to start with 4904 * any of the 3 types. 4905 * If MSI-X is not used, number of tx/rx rings is forced to 1. 4906 */ 4907 static int 4908 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 4909 { 4910 dev_info_t *devinfo; 4911 int intr_types; 4912 int rc; 4913 4914 devinfo = ixgbe->dip; 4915 4916 /* 4917 * Get supported interrupt types 4918 */ 4919 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 4920 4921 if (rc != DDI_SUCCESS) { 4922 ixgbe_log(ixgbe, 4923 "Get supported interrupt types failed: %d", rc); 4924 return (IXGBE_FAILURE); 4925 } 4926 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 4927 4928 ixgbe->intr_type = 0; 4929 4930 /* 4931 * Install MSI-X interrupts 4932 */ 4933 if ((intr_types & DDI_INTR_TYPE_MSIX) && 4934 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 4935 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 4936 if (rc == IXGBE_SUCCESS) 4937 return (IXGBE_SUCCESS); 4938 4939 ixgbe_log(ixgbe, 4940 "Allocate MSI-X failed, trying MSI interrupts..."); 4941 } 4942 4943 /* 4944 * MSI-X not used, force rings and groups to 1 4945 */ 4946 ixgbe->num_rx_rings = 1; 4947 ixgbe->num_rx_groups = 1; 4948 ixgbe->num_tx_rings = 1; 4949 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 4950 ixgbe_log(ixgbe, 4951 "MSI-X not used, force rings and groups number to 1"); 4952 4953 /* 4954 * Install MSI interrupts 4955 */ 4956 if ((intr_types & DDI_INTR_TYPE_MSI) && 4957 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 4958 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 4959 if (rc == IXGBE_SUCCESS) 4960 return (IXGBE_SUCCESS); 4961 4962 ixgbe_log(ixgbe, 4963 "Allocate MSI failed, trying Legacy interrupts..."); 4964 } 4965 4966 /* 4967 * Install legacy interrupts 4968 */ 4969 if (intr_types & DDI_INTR_TYPE_FIXED) { 4970 /* 4971 * Disallow legacy interrupts for X550. X550 has a silicon 4972 * bug which prevents Shared Legacy interrupts from working. 4973 * For details, please reference: 4974 * 4975 * Intel Ethernet Controller X550 Specification Update rev. 2.1 4976 * May 2016, erratum 22: PCIe Interrupt Status Bit 4977 */ 4978 if (ixgbe->hw.mac.type == ixgbe_mac_X550 || 4979 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x || 4980 ixgbe->hw.mac.type == ixgbe_mac_X550_vf || 4981 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x_vf) { 4982 ixgbe_log(ixgbe, 4983 "Legacy interrupts are not supported on this " 4984 "adapter. Please use MSI or MSI-X instead."); 4985 return (IXGBE_FAILURE); 4986 } 4987 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 4988 if (rc == IXGBE_SUCCESS) 4989 return (IXGBE_SUCCESS); 4990 4991 ixgbe_log(ixgbe, 4992 "Allocate Legacy interrupts failed"); 4993 } 4994 4995 /* 4996 * If none of the 3 types succeeded, return failure 4997 */ 4998 return (IXGBE_FAILURE); 4999 } 5000 5001 /* 5002 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 5003 * 5004 * For legacy and MSI, only 1 handle is needed. For MSI-X, 5005 * if fewer than 2 handles are available, return failure. 5006 * Upon success, this maps the vectors to rx and tx rings for 5007 * interrupts. 5008 */ 5009 static int 5010 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 5011 { 5012 dev_info_t *devinfo; 5013 int request, count, actual; 5014 int minimum; 5015 int rc; 5016 uint32_t ring_per_group; 5017 5018 devinfo = ixgbe->dip; 5019 5020 switch (intr_type) { 5021 case DDI_INTR_TYPE_FIXED: 5022 request = 1; /* Request 1 legacy interrupt handle */ 5023 minimum = 1; 5024 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 5025 break; 5026 5027 case DDI_INTR_TYPE_MSI: 5028 request = 1; /* Request 1 MSI interrupt handle */ 5029 minimum = 1; 5030 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 5031 break; 5032 5033 case DDI_INTR_TYPE_MSIX: 5034 /* 5035 * Best number of vectors for the adapter is 5036 * (# rx rings + # tx rings), however we will 5037 * limit the request number. 5038 */ 5039 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings); 5040 if (request > ixgbe->capab->max_ring_vect) 5041 request = ixgbe->capab->max_ring_vect; 5042 minimum = 1; 5043 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 5044 break; 5045 5046 default: 5047 ixgbe_log(ixgbe, 5048 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 5049 intr_type); 5050 return (IXGBE_FAILURE); 5051 } 5052 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 5053 request, minimum); 5054 5055 /* 5056 * Get number of supported interrupts 5057 */ 5058 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 5059 if ((rc != DDI_SUCCESS) || (count < minimum)) { 5060 ixgbe_log(ixgbe, 5061 "Get interrupt number failed. Return: %d, count: %d", 5062 rc, count); 5063 return (IXGBE_FAILURE); 5064 } 5065 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 5066 5067 actual = 0; 5068 ixgbe->intr_cnt = 0; 5069 ixgbe->intr_cnt_max = 0; 5070 ixgbe->intr_cnt_min = 0; 5071 5072 /* 5073 * Allocate an array of interrupt handles 5074 */ 5075 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 5076 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 5077 5078 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 5079 request, &actual, DDI_INTR_ALLOC_NORMAL); 5080 if (rc != DDI_SUCCESS) { 5081 ixgbe_log(ixgbe, "Allocate interrupts failed. " 5082 "return: %d, request: %d, actual: %d", 5083 rc, request, actual); 5084 goto alloc_handle_fail; 5085 } 5086 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 5087 5088 /* 5089 * upper/lower limit of interrupts 5090 */ 5091 ixgbe->intr_cnt = actual; 5092 ixgbe->intr_cnt_max = request; 5093 ixgbe->intr_cnt_min = minimum; 5094 5095 /* 5096 * rss number per group should not exceed the rx interrupt number, 5097 * else need to adjust rx ring number. 5098 */ 5099 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5100 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0); 5101 if (actual < ring_per_group) { 5102 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual; 5103 ixgbe_setup_vmdq_rss_conf(ixgbe); 5104 } 5105 5106 /* 5107 * Now we know the actual number of vectors. Here we map the vector 5108 * to other, rx rings and tx ring. 5109 */ 5110 if (actual < minimum) { 5111 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 5112 actual); 5113 goto alloc_handle_fail; 5114 } 5115 5116 /* 5117 * Get priority for first vector, assume remaining are all the same 5118 */ 5119 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 5120 if (rc != DDI_SUCCESS) { 5121 ixgbe_log(ixgbe, 5122 "Get interrupt priority failed: %d", rc); 5123 goto alloc_handle_fail; 5124 } 5125 5126 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 5127 if (rc != DDI_SUCCESS) { 5128 ixgbe_log(ixgbe, 5129 "Get interrupt cap failed: %d", rc); 5130 goto alloc_handle_fail; 5131 } 5132 5133 ixgbe->intr_type = intr_type; 5134 5135 return (IXGBE_SUCCESS); 5136 5137 alloc_handle_fail: 5138 ixgbe_rem_intrs(ixgbe); 5139 5140 return (IXGBE_FAILURE); 5141 } 5142 5143 /* 5144 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 5145 * 5146 * Before adding the interrupt handlers, the interrupt vectors have 5147 * been allocated, and the rx/tx rings have also been allocated. 5148 */ 5149 static int 5150 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 5151 { 5152 int vector = 0; 5153 int rc; 5154 5155 switch (ixgbe->intr_type) { 5156 case DDI_INTR_TYPE_MSIX: 5157 /* 5158 * Add interrupt handler for all vectors 5159 */ 5160 for (vector = 0; vector < ixgbe->intr_cnt; vector++) { 5161 /* 5162 * install pointer to vect_map[vector] 5163 */ 5164 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5165 (ddi_intr_handler_t *)ixgbe_intr_msix, 5166 (void *)&ixgbe->vect_map[vector], NULL); 5167 5168 if (rc != DDI_SUCCESS) { 5169 ixgbe_log(ixgbe, 5170 "Add interrupt handler failed. " 5171 "return: %d, vector: %d", rc, vector); 5172 for (vector--; vector >= 0; vector--) { 5173 (void) ddi_intr_remove_handler( 5174 ixgbe->htable[vector]); 5175 } 5176 return (IXGBE_FAILURE); 5177 } 5178 } 5179 5180 break; 5181 5182 case DDI_INTR_TYPE_MSI: 5183 /* 5184 * Add interrupt handlers for the only vector 5185 */ 5186 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5187 (ddi_intr_handler_t *)ixgbe_intr_msi, 5188 (void *)ixgbe, NULL); 5189 5190 if (rc != DDI_SUCCESS) { 5191 ixgbe_log(ixgbe, 5192 "Add MSI interrupt handler failed: %d", rc); 5193 return (IXGBE_FAILURE); 5194 } 5195 5196 break; 5197 5198 case DDI_INTR_TYPE_FIXED: 5199 /* 5200 * Add interrupt handlers for the only vector 5201 */ 5202 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5203 (ddi_intr_handler_t *)ixgbe_intr_legacy, 5204 (void *)ixgbe, NULL); 5205 5206 if (rc != DDI_SUCCESS) { 5207 ixgbe_log(ixgbe, 5208 "Add legacy interrupt handler failed: %d", rc); 5209 return (IXGBE_FAILURE); 5210 } 5211 5212 break; 5213 5214 default: 5215 return (IXGBE_FAILURE); 5216 } 5217 5218 return (IXGBE_SUCCESS); 5219 } 5220 5221 #pragma inline(ixgbe_map_rxring_to_vector) 5222 /* 5223 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 5224 */ 5225 static void 5226 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 5227 { 5228 /* 5229 * Set bit in map 5230 */ 5231 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 5232 5233 /* 5234 * Count bits set 5235 */ 5236 ixgbe->vect_map[v_idx].rxr_cnt++; 5237 5238 /* 5239 * Remember bit position 5240 */ 5241 ixgbe->rx_rings[r_idx].intr_vector = v_idx; 5242 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 5243 } 5244 5245 #pragma inline(ixgbe_map_txring_to_vector) 5246 /* 5247 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 5248 */ 5249 static void 5250 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 5251 { 5252 /* 5253 * Set bit in map 5254 */ 5255 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 5256 5257 /* 5258 * Count bits set 5259 */ 5260 ixgbe->vect_map[v_idx].txr_cnt++; 5261 5262 /* 5263 * Remember bit position 5264 */ 5265 ixgbe->tx_rings[t_idx].intr_vector = v_idx; 5266 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 5267 } 5268 5269 /* 5270 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector 5271 * allocation register (IVAR). 5272 * cause: 5273 * -1 : other cause 5274 * 0 : rx 5275 * 1 : tx 5276 */ 5277 static void 5278 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector, 5279 int8_t cause) 5280 { 5281 struct ixgbe_hw *hw = &ixgbe->hw; 5282 u32 ivar, index; 5283 5284 switch (hw->mac.type) { 5285 case ixgbe_mac_82598EB: 5286 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5287 if (cause == -1) { 5288 cause = 0; 5289 } 5290 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5291 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5292 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3))); 5293 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3))); 5294 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5295 break; 5296 5297 case ixgbe_mac_82599EB: 5298 case ixgbe_mac_X540: 5299 case ixgbe_mac_X550: 5300 case ixgbe_mac_X550EM_x: 5301 if (cause == -1) { 5302 /* other causes */ 5303 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5304 index = (intr_alloc_entry & 1) * 8; 5305 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5306 ivar &= ~(0xFF << index); 5307 ivar |= (msix_vector << index); 5308 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5309 } else { 5310 /* tx or rx causes */ 5311 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5312 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5313 ivar = IXGBE_READ_REG(hw, 5314 IXGBE_IVAR(intr_alloc_entry >> 1)); 5315 ivar &= ~(0xFF << index); 5316 ivar |= (msix_vector << index); 5317 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5318 ivar); 5319 } 5320 break; 5321 5322 default: 5323 break; 5324 } 5325 } 5326 5327 /* 5328 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of 5329 * given interrupt vector allocation register (IVAR). 5330 * cause: 5331 * -1 : other cause 5332 * 0 : rx 5333 * 1 : tx 5334 */ 5335 static void 5336 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5337 { 5338 struct ixgbe_hw *hw = &ixgbe->hw; 5339 u32 ivar, index; 5340 5341 switch (hw->mac.type) { 5342 case ixgbe_mac_82598EB: 5343 if (cause == -1) { 5344 cause = 0; 5345 } 5346 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5347 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5348 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 * 5349 (intr_alloc_entry & 0x3))); 5350 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5351 break; 5352 5353 case ixgbe_mac_82599EB: 5354 case ixgbe_mac_X540: 5355 case ixgbe_mac_X550: 5356 case ixgbe_mac_X550EM_x: 5357 if (cause == -1) { 5358 /* other causes */ 5359 index = (intr_alloc_entry & 1) * 8; 5360 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5361 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5362 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5363 } else { 5364 /* tx or rx causes */ 5365 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5366 ivar = IXGBE_READ_REG(hw, 5367 IXGBE_IVAR(intr_alloc_entry >> 1)); 5368 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5369 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5370 ivar); 5371 } 5372 break; 5373 5374 default: 5375 break; 5376 } 5377 } 5378 5379 /* 5380 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of 5381 * given interrupt vector allocation register (IVAR). 5382 * cause: 5383 * -1 : other cause 5384 * 0 : rx 5385 * 1 : tx 5386 */ 5387 static void 5388 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5389 { 5390 struct ixgbe_hw *hw = &ixgbe->hw; 5391 u32 ivar, index; 5392 5393 switch (hw->mac.type) { 5394 case ixgbe_mac_82598EB: 5395 if (cause == -1) { 5396 cause = 0; 5397 } 5398 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5399 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5400 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 * 5401 (intr_alloc_entry & 0x3))); 5402 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5403 break; 5404 5405 case ixgbe_mac_82599EB: 5406 case ixgbe_mac_X540: 5407 case ixgbe_mac_X550: 5408 case ixgbe_mac_X550EM_x: 5409 if (cause == -1) { 5410 /* other causes */ 5411 index = (intr_alloc_entry & 1) * 8; 5412 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5413 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5414 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5415 } else { 5416 /* tx or rx causes */ 5417 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5418 ivar = IXGBE_READ_REG(hw, 5419 IXGBE_IVAR(intr_alloc_entry >> 1)); 5420 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5421 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5422 ivar); 5423 } 5424 break; 5425 5426 default: 5427 break; 5428 } 5429 } 5430 5431 /* 5432 * Convert the rx ring index driver maintained to the rx ring index 5433 * in h/w. 5434 */ 5435 static uint32_t 5436 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index) 5437 { 5438 5439 struct ixgbe_hw *hw = &ixgbe->hw; 5440 uint32_t rx_ring_per_group, hw_rx_index; 5441 5442 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS || 5443 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) { 5444 return (sw_rx_index); 5445 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) { 5446 switch (hw->mac.type) { 5447 case ixgbe_mac_82598EB: 5448 return (sw_rx_index); 5449 5450 case ixgbe_mac_82599EB: 5451 case ixgbe_mac_X540: 5452 case ixgbe_mac_X550: 5453 case ixgbe_mac_X550EM_x: 5454 return (sw_rx_index * 2); 5455 5456 default: 5457 break; 5458 } 5459 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) { 5460 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5461 5462 switch (hw->mac.type) { 5463 case ixgbe_mac_82598EB: 5464 hw_rx_index = (sw_rx_index / rx_ring_per_group) * 5465 16 + (sw_rx_index % rx_ring_per_group); 5466 return (hw_rx_index); 5467 5468 case ixgbe_mac_82599EB: 5469 case ixgbe_mac_X540: 5470 case ixgbe_mac_X550: 5471 case ixgbe_mac_X550EM_x: 5472 if (ixgbe->num_rx_groups > 32) { 5473 hw_rx_index = (sw_rx_index / 5474 rx_ring_per_group) * 2 + 5475 (sw_rx_index % rx_ring_per_group); 5476 } else { 5477 hw_rx_index = (sw_rx_index / 5478 rx_ring_per_group) * 4 + 5479 (sw_rx_index % rx_ring_per_group); 5480 } 5481 return (hw_rx_index); 5482 5483 default: 5484 break; 5485 } 5486 } 5487 5488 /* 5489 * Should never reach. Just to make compiler happy. 5490 */ 5491 return (sw_rx_index); 5492 } 5493 5494 /* 5495 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors. 5496 * 5497 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt 5498 * to vector[0 - (intr_cnt -1)]. 5499 */ 5500 static int 5501 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe) 5502 { 5503 int i, vector = 0; 5504 5505 /* initialize vector map */ 5506 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 5507 for (i = 0; i < ixgbe->intr_cnt; i++) { 5508 ixgbe->vect_map[i].ixgbe = ixgbe; 5509 } 5510 5511 /* 5512 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0], 5513 * tx rings[0] on RTxQ[1]. 5514 */ 5515 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5516 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 5517 ixgbe_map_txring_to_vector(ixgbe, 0, 1); 5518 return (IXGBE_SUCCESS); 5519 } 5520 5521 /* 5522 * Interrupts/vectors mapping for MSI-X 5523 */ 5524 5525 /* 5526 * Map other interrupt to vector 0, 5527 * Set bit in map and count the bits set. 5528 */ 5529 BT_SET(ixgbe->vect_map[vector].other_map, 0); 5530 ixgbe->vect_map[vector].other_cnt++; 5531 5532 /* 5533 * Map rx ring interrupts to vectors 5534 */ 5535 for (i = 0; i < ixgbe->num_rx_rings; i++) { 5536 ixgbe_map_rxring_to_vector(ixgbe, i, vector); 5537 vector = (vector +1) % ixgbe->intr_cnt; 5538 } 5539 5540 /* 5541 * Map tx ring interrupts to vectors 5542 */ 5543 for (i = 0; i < ixgbe->num_tx_rings; i++) { 5544 ixgbe_map_txring_to_vector(ixgbe, i, vector); 5545 vector = (vector +1) % ixgbe->intr_cnt; 5546 } 5547 5548 return (IXGBE_SUCCESS); 5549 } 5550 5551 /* 5552 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 5553 * 5554 * This relies on ring/vector mapping already set up in the 5555 * vect_map[] structures 5556 */ 5557 static void 5558 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 5559 { 5560 struct ixgbe_hw *hw = &ixgbe->hw; 5561 ixgbe_intr_vector_t *vect; /* vector bitmap */ 5562 int r_idx; /* ring index */ 5563 int v_idx; /* vector index */ 5564 uint32_t hw_index; 5565 5566 /* 5567 * Clear any previous entries 5568 */ 5569 switch (hw->mac.type) { 5570 case ixgbe_mac_82598EB: 5571 for (v_idx = 0; v_idx < 25; v_idx++) 5572 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5573 break; 5574 5575 case ixgbe_mac_82599EB: 5576 case ixgbe_mac_X540: 5577 case ixgbe_mac_X550: 5578 case ixgbe_mac_X550EM_x: 5579 for (v_idx = 0; v_idx < 64; v_idx++) 5580 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5581 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0); 5582 break; 5583 5584 default: 5585 break; 5586 } 5587 5588 /* 5589 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and 5590 * tx rings[0] will use RTxQ[1]. 5591 */ 5592 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5593 ixgbe_setup_ivar(ixgbe, 0, 0, 0); 5594 ixgbe_setup_ivar(ixgbe, 0, 1, 1); 5595 return; 5596 } 5597 5598 /* 5599 * For MSI-X interrupt, "Other" is always on vector[0]. 5600 */ 5601 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1); 5602 5603 /* 5604 * For each interrupt vector, populate the IVAR table 5605 */ 5606 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 5607 vect = &ixgbe->vect_map[v_idx]; 5608 5609 /* 5610 * For each rx ring bit set 5611 */ 5612 r_idx = bt_getlowbit(vect->rx_map, 0, 5613 (ixgbe->num_rx_rings - 1)); 5614 5615 while (r_idx >= 0) { 5616 hw_index = ixgbe->rx_rings[r_idx].hw_index; 5617 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0); 5618 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 5619 (ixgbe->num_rx_rings - 1)); 5620 } 5621 5622 /* 5623 * For each tx ring bit set 5624 */ 5625 r_idx = bt_getlowbit(vect->tx_map, 0, 5626 (ixgbe->num_tx_rings - 1)); 5627 5628 while (r_idx >= 0) { 5629 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1); 5630 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 5631 (ixgbe->num_tx_rings - 1)); 5632 } 5633 } 5634 } 5635 5636 /* 5637 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 5638 */ 5639 static void 5640 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 5641 { 5642 int i; 5643 int rc; 5644 5645 for (i = 0; i < ixgbe->intr_cnt; i++) { 5646 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 5647 if (rc != DDI_SUCCESS) { 5648 IXGBE_DEBUGLOG_1(ixgbe, 5649 "Remove intr handler failed: %d", rc); 5650 } 5651 } 5652 } 5653 5654 /* 5655 * ixgbe_rem_intrs - Remove the allocated interrupts. 5656 */ 5657 static void 5658 ixgbe_rem_intrs(ixgbe_t *ixgbe) 5659 { 5660 int i; 5661 int rc; 5662 5663 for (i = 0; i < ixgbe->intr_cnt; i++) { 5664 rc = ddi_intr_free(ixgbe->htable[i]); 5665 if (rc != DDI_SUCCESS) { 5666 IXGBE_DEBUGLOG_1(ixgbe, 5667 "Free intr failed: %d", rc); 5668 } 5669 } 5670 5671 kmem_free(ixgbe->htable, ixgbe->intr_size); 5672 ixgbe->htable = NULL; 5673 } 5674 5675 /* 5676 * ixgbe_enable_intrs - Enable all the ddi interrupts. 5677 */ 5678 static int 5679 ixgbe_enable_intrs(ixgbe_t *ixgbe) 5680 { 5681 int i; 5682 int rc; 5683 5684 /* 5685 * Enable interrupts 5686 */ 5687 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5688 /* 5689 * Call ddi_intr_block_enable() for MSI 5690 */ 5691 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 5692 if (rc != DDI_SUCCESS) { 5693 ixgbe_log(ixgbe, 5694 "Enable block intr failed: %d", rc); 5695 return (IXGBE_FAILURE); 5696 } 5697 } else { 5698 /* 5699 * Call ddi_intr_enable() for Legacy/MSI non block enable 5700 */ 5701 for (i = 0; i < ixgbe->intr_cnt; i++) { 5702 rc = ddi_intr_enable(ixgbe->htable[i]); 5703 if (rc != DDI_SUCCESS) { 5704 ixgbe_log(ixgbe, 5705 "Enable intr failed: %d", rc); 5706 return (IXGBE_FAILURE); 5707 } 5708 } 5709 } 5710 5711 return (IXGBE_SUCCESS); 5712 } 5713 5714 /* 5715 * ixgbe_disable_intrs - Disable all the interrupts. 5716 */ 5717 static int 5718 ixgbe_disable_intrs(ixgbe_t *ixgbe) 5719 { 5720 int i; 5721 int rc; 5722 5723 /* 5724 * Disable all interrupts 5725 */ 5726 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5727 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 5728 if (rc != DDI_SUCCESS) { 5729 ixgbe_log(ixgbe, 5730 "Disable block intr failed: %d", rc); 5731 return (IXGBE_FAILURE); 5732 } 5733 } else { 5734 for (i = 0; i < ixgbe->intr_cnt; i++) { 5735 rc = ddi_intr_disable(ixgbe->htable[i]); 5736 if (rc != DDI_SUCCESS) { 5737 ixgbe_log(ixgbe, 5738 "Disable intr failed: %d", rc); 5739 return (IXGBE_FAILURE); 5740 } 5741 } 5742 } 5743 5744 return (IXGBE_SUCCESS); 5745 } 5746 5747 /* 5748 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 5749 */ 5750 static void 5751 ixgbe_get_hw_state(ixgbe_t *ixgbe) 5752 { 5753 struct ixgbe_hw *hw = &ixgbe->hw; 5754 ixgbe_link_speed speed = 0; 5755 boolean_t link_up = B_FALSE; 5756 uint32_t pcs1g_anlp = 0; 5757 5758 ASSERT(mutex_owned(&ixgbe->gen_lock)); 5759 ixgbe->param_lp_1000fdx_cap = 0; 5760 ixgbe->param_lp_100fdx_cap = 0; 5761 5762 /* check for link, don't wait */ 5763 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 5764 5765 /* 5766 * Update the observed Link Partner's capabilities. Not all adapters 5767 * can provide full information on the LP's capable speeds, so we 5768 * provide what we can. 5769 */ 5770 if (link_up) { 5771 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 5772 5773 ixgbe->param_lp_1000fdx_cap = 5774 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5775 ixgbe->param_lp_100fdx_cap = 5776 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5777 } 5778 5779 /* 5780 * Update GLD's notion of the adapter's currently advertised speeds. 5781 * Since the common code doesn't always record the current autonegotiate 5782 * settings in the phy struct for all parts (specifically, adapters with 5783 * SFPs) we first test to see if it is 0, and if so, we fall back to 5784 * using the adapter's speed capabilities which we saved during instance 5785 * init in ixgbe_init_params(). 5786 * 5787 * Adapters with SFPs will always be shown as advertising all of their 5788 * supported speeds, and adapters with baseT PHYs (where the phy struct 5789 * is maintained by the common code) will always have a factual view of 5790 * their currently-advertised speeds. In the case of SFPs, this is 5791 * acceptable as we default to advertising all speeds that the adapter 5792 * claims to support, and those properties are immutable; unlike on 5793 * baseT (copper) PHYs, where speeds can be enabled or disabled at will. 5794 */ 5795 speed = hw->phy.autoneg_advertised; 5796 if (speed == 0) 5797 speed = ixgbe->speeds_supported; 5798 5799 ixgbe->param_adv_10000fdx_cap = 5800 (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 1 : 0; 5801 ixgbe->param_adv_5000fdx_cap = 5802 (speed & IXGBE_LINK_SPEED_5GB_FULL) ? 1 : 0; 5803 ixgbe->param_adv_2500fdx_cap = 5804 (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 1 : 0; 5805 ixgbe->param_adv_1000fdx_cap = 5806 (speed & IXGBE_LINK_SPEED_1GB_FULL) ? 1 : 0; 5807 ixgbe->param_adv_100fdx_cap = 5808 (speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0; 5809 } 5810 5811 /* 5812 * ixgbe_get_driver_control - Notify that driver is in control of device. 5813 */ 5814 static void 5815 ixgbe_get_driver_control(struct ixgbe_hw *hw) 5816 { 5817 uint32_t ctrl_ext; 5818 5819 /* 5820 * Notify firmware that driver is in control of device 5821 */ 5822 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5823 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 5824 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5825 } 5826 5827 /* 5828 * ixgbe_release_driver_control - Notify that driver is no longer in control 5829 * of device. 5830 */ 5831 static void 5832 ixgbe_release_driver_control(struct ixgbe_hw *hw) 5833 { 5834 uint32_t ctrl_ext; 5835 5836 /* 5837 * Notify firmware that driver is no longer in control of device 5838 */ 5839 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5840 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 5841 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5842 } 5843 5844 /* 5845 * ixgbe_atomic_reserve - Atomic decrease operation. 5846 */ 5847 int 5848 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 5849 { 5850 uint32_t oldval; 5851 uint32_t newval; 5852 5853 /* 5854 * ATOMICALLY 5855 */ 5856 do { 5857 oldval = *count_p; 5858 if (oldval < n) 5859 return (-1); 5860 newval = oldval - n; 5861 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 5862 5863 return (newval); 5864 } 5865 5866 /* 5867 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 5868 */ 5869 static uint8_t * 5870 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 5871 { 5872 uint8_t *addr = *upd_ptr; 5873 uint8_t *new_ptr; 5874 5875 _NOTE(ARGUNUSED(hw)); 5876 _NOTE(ARGUNUSED(vmdq)); 5877 5878 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 5879 *upd_ptr = new_ptr; 5880 return (addr); 5881 } 5882 5883 /* 5884 * FMA support 5885 */ 5886 int 5887 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 5888 { 5889 ddi_fm_error_t de; 5890 5891 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5892 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 5893 return (de.fme_status); 5894 } 5895 5896 int 5897 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 5898 { 5899 ddi_fm_error_t de; 5900 5901 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5902 return (de.fme_status); 5903 } 5904 5905 /* 5906 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 5907 */ 5908 static int 5909 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5910 { 5911 _NOTE(ARGUNUSED(impl_data)); 5912 /* 5913 * as the driver can always deal with an error in any dma or 5914 * access handle, we can just return the fme_status value. 5915 */ 5916 pci_ereport_post(dip, err, NULL); 5917 return (err->fme_status); 5918 } 5919 5920 static void 5921 ixgbe_fm_init(ixgbe_t *ixgbe) 5922 { 5923 ddi_iblock_cookie_t iblk; 5924 int fma_dma_flag; 5925 5926 /* 5927 * Only register with IO Fault Services if we have some capability 5928 */ 5929 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 5930 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5931 } else { 5932 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5933 } 5934 5935 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 5936 fma_dma_flag = 1; 5937 } else { 5938 fma_dma_flag = 0; 5939 } 5940 5941 ixgbe_set_fma_flags(fma_dma_flag); 5942 5943 if (ixgbe->fm_capabilities) { 5944 5945 /* 5946 * Register capabilities with IO Fault Services 5947 */ 5948 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 5949 5950 /* 5951 * Initialize pci ereport capabilities if ereport capable 5952 */ 5953 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5954 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5955 pci_ereport_setup(ixgbe->dip); 5956 5957 /* 5958 * Register error callback if error callback capable 5959 */ 5960 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5961 ddi_fm_handler_register(ixgbe->dip, 5962 ixgbe_fm_error_cb, (void*) ixgbe); 5963 } 5964 } 5965 5966 static void 5967 ixgbe_fm_fini(ixgbe_t *ixgbe) 5968 { 5969 /* 5970 * Only unregister FMA capabilities if they are registered 5971 */ 5972 if (ixgbe->fm_capabilities) { 5973 5974 /* 5975 * Release any resources allocated by pci_ereport_setup() 5976 */ 5977 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5978 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5979 pci_ereport_teardown(ixgbe->dip); 5980 5981 /* 5982 * Un-register error callback if error callback capable 5983 */ 5984 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5985 ddi_fm_handler_unregister(ixgbe->dip); 5986 5987 /* 5988 * Unregister from IO Fault Service 5989 */ 5990 ddi_fm_fini(ixgbe->dip); 5991 } 5992 } 5993 5994 void 5995 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 5996 { 5997 uint64_t ena; 5998 char buf[FM_MAX_CLASS]; 5999 6000 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6001 ena = fm_ena_generate(0, FM_ENA_FMT1); 6002 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 6003 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 6004 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6005 } 6006 } 6007 6008 static int 6009 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 6010 { 6011 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh; 6012 6013 mutex_enter(&rx_ring->rx_lock); 6014 rx_ring->ring_gen_num = mr_gen_num; 6015 mutex_exit(&rx_ring->rx_lock); 6016 return (0); 6017 } 6018 6019 /* 6020 * Get the global ring index by a ring index within a group. 6021 */ 6022 static int 6023 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex) 6024 { 6025 ixgbe_rx_ring_t *rx_ring; 6026 int i; 6027 6028 for (i = 0; i < ixgbe->num_rx_rings; i++) { 6029 rx_ring = &ixgbe->rx_rings[i]; 6030 if (rx_ring->group_index == gindex) 6031 rindex--; 6032 if (rindex < 0) 6033 return (i); 6034 } 6035 6036 return (-1); 6037 } 6038 6039 /* 6040 * Callback funtion for MAC layer to register all rings. 6041 */ 6042 /* ARGSUSED */ 6043 void 6044 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index, 6045 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 6046 { 6047 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6048 mac_intr_t *mintr = &infop->mri_intr; 6049 6050 switch (rtype) { 6051 case MAC_RING_TYPE_RX: { 6052 /* 6053 * 'index' is the ring index within the group. 6054 * Need to get the global ring index by searching in groups. 6055 */ 6056 int global_ring_index = ixgbe_get_rx_ring_index( 6057 ixgbe, group_index, ring_index); 6058 6059 ASSERT(global_ring_index >= 0); 6060 6061 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index]; 6062 rx_ring->ring_handle = rh; 6063 6064 infop->mri_driver = (mac_ring_driver_t)rx_ring; 6065 infop->mri_start = ixgbe_ring_start; 6066 infop->mri_stop = NULL; 6067 infop->mri_poll = ixgbe_ring_rx_poll; 6068 infop->mri_stat = ixgbe_rx_ring_stat; 6069 6070 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 6071 mintr->mi_enable = ixgbe_rx_ring_intr_enable; 6072 mintr->mi_disable = ixgbe_rx_ring_intr_disable; 6073 if (ixgbe->intr_type & 6074 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6075 mintr->mi_ddi_handle = 6076 ixgbe->htable[rx_ring->intr_vector]; 6077 } 6078 6079 break; 6080 } 6081 case MAC_RING_TYPE_TX: { 6082 ASSERT(group_index == -1); 6083 ASSERT(ring_index < ixgbe->num_tx_rings); 6084 6085 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index]; 6086 tx_ring->ring_handle = rh; 6087 6088 infop->mri_driver = (mac_ring_driver_t)tx_ring; 6089 infop->mri_start = NULL; 6090 infop->mri_stop = NULL; 6091 infop->mri_tx = ixgbe_ring_tx; 6092 infop->mri_stat = ixgbe_tx_ring_stat; 6093 if (ixgbe->intr_type & 6094 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6095 mintr->mi_ddi_handle = 6096 ixgbe->htable[tx_ring->intr_vector]; 6097 } 6098 break; 6099 } 6100 default: 6101 break; 6102 } 6103 } 6104 6105 /* 6106 * Callback funtion for MAC layer to register all groups. 6107 */ 6108 void 6109 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index, 6110 mac_group_info_t *infop, mac_group_handle_t gh) 6111 { 6112 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6113 6114 switch (rtype) { 6115 case MAC_RING_TYPE_RX: { 6116 ixgbe_rx_group_t *rx_group; 6117 6118 rx_group = &ixgbe->rx_groups[index]; 6119 rx_group->group_handle = gh; 6120 6121 infop->mgi_driver = (mac_group_driver_t)rx_group; 6122 infop->mgi_start = NULL; 6123 infop->mgi_stop = NULL; 6124 infop->mgi_addmac = ixgbe_addmac; 6125 infop->mgi_remmac = ixgbe_remmac; 6126 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups); 6127 6128 break; 6129 } 6130 case MAC_RING_TYPE_TX: 6131 break; 6132 default: 6133 break; 6134 } 6135 } 6136 6137 /* 6138 * Enable interrupt on the specificed rx ring. 6139 */ 6140 int 6141 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh) 6142 { 6143 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6144 ixgbe_t *ixgbe = rx_ring->ixgbe; 6145 int r_idx = rx_ring->index; 6146 int hw_r_idx = rx_ring->hw_index; 6147 int v_idx = rx_ring->intr_vector; 6148 6149 mutex_enter(&ixgbe->gen_lock); 6150 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6151 mutex_exit(&ixgbe->gen_lock); 6152 /* 6153 * Simply return 0. 6154 * Interrupts are being adjusted. ixgbe_intr_adjust() 6155 * will eventually re-enable the interrupt when it's 6156 * done with the adjustment. 6157 */ 6158 return (0); 6159 } 6160 6161 /* 6162 * To enable interrupt by setting the VAL bit of given interrupt 6163 * vector allocation register (IVAR). 6164 */ 6165 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0); 6166 6167 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 6168 6169 /* 6170 * Trigger a Rx interrupt on this ring 6171 */ 6172 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx)); 6173 IXGBE_WRITE_FLUSH(&ixgbe->hw); 6174 6175 mutex_exit(&ixgbe->gen_lock); 6176 6177 return (0); 6178 } 6179 6180 /* 6181 * Disable interrupt on the specificed rx ring. 6182 */ 6183 int 6184 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh) 6185 { 6186 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6187 ixgbe_t *ixgbe = rx_ring->ixgbe; 6188 int r_idx = rx_ring->index; 6189 int hw_r_idx = rx_ring->hw_index; 6190 int v_idx = rx_ring->intr_vector; 6191 6192 mutex_enter(&ixgbe->gen_lock); 6193 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6194 mutex_exit(&ixgbe->gen_lock); 6195 /* 6196 * Simply return 0. 6197 * In the rare case where an interrupt is being 6198 * disabled while interrupts are being adjusted, 6199 * we don't fail the operation. No interrupts will 6200 * be generated while they are adjusted, and 6201 * ixgbe_intr_adjust() will cause the interrupts 6202 * to be re-enabled once it completes. Note that 6203 * in this case, packets may be delivered to the 6204 * stack via interrupts before xgbe_rx_ring_intr_enable() 6205 * is called again. This is acceptable since interrupt 6206 * adjustment is infrequent, and the stack will be 6207 * able to handle these packets. 6208 */ 6209 return (0); 6210 } 6211 6212 /* 6213 * To disable interrupt by clearing the VAL bit of given interrupt 6214 * vector allocation register (IVAR). 6215 */ 6216 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0); 6217 6218 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx); 6219 6220 mutex_exit(&ixgbe->gen_lock); 6221 6222 return (0); 6223 } 6224 6225 /* 6226 * Add a mac address. 6227 */ 6228 static int 6229 ixgbe_addmac(void *arg, const uint8_t *mac_addr) 6230 { 6231 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6232 ixgbe_t *ixgbe = rx_group->ixgbe; 6233 struct ixgbe_hw *hw = &ixgbe->hw; 6234 int slot, i; 6235 6236 mutex_enter(&ixgbe->gen_lock); 6237 6238 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6239 mutex_exit(&ixgbe->gen_lock); 6240 return (ECANCELED); 6241 } 6242 6243 if (ixgbe->unicst_avail == 0) { 6244 /* no slots available */ 6245 mutex_exit(&ixgbe->gen_lock); 6246 return (ENOSPC); 6247 } 6248 6249 /* 6250 * The first ixgbe->num_rx_groups slots are reserved for each respective 6251 * group. The rest slots are shared by all groups. While adding a 6252 * MAC address, reserved slots are firstly checked then the shared 6253 * slots are searched. 6254 */ 6255 slot = -1; 6256 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) { 6257 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) { 6258 if (ixgbe->unicst_addr[i].mac.set == 0) { 6259 slot = i; 6260 break; 6261 } 6262 } 6263 } else { 6264 slot = rx_group->index; 6265 } 6266 6267 if (slot == -1) { 6268 /* no slots available */ 6269 mutex_exit(&ixgbe->gen_lock); 6270 return (ENOSPC); 6271 } 6272 6273 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6274 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr, 6275 rx_group->index, IXGBE_RAH_AV); 6276 ixgbe->unicst_addr[slot].mac.set = 1; 6277 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index; 6278 ixgbe->unicst_avail--; 6279 6280 mutex_exit(&ixgbe->gen_lock); 6281 6282 return (0); 6283 } 6284 6285 /* 6286 * Remove a mac address. 6287 */ 6288 static int 6289 ixgbe_remmac(void *arg, const uint8_t *mac_addr) 6290 { 6291 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6292 ixgbe_t *ixgbe = rx_group->ixgbe; 6293 struct ixgbe_hw *hw = &ixgbe->hw; 6294 int slot; 6295 6296 mutex_enter(&ixgbe->gen_lock); 6297 6298 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6299 mutex_exit(&ixgbe->gen_lock); 6300 return (ECANCELED); 6301 } 6302 6303 slot = ixgbe_unicst_find(ixgbe, mac_addr); 6304 if (slot == -1) { 6305 mutex_exit(&ixgbe->gen_lock); 6306 return (EINVAL); 6307 } 6308 6309 if (ixgbe->unicst_addr[slot].mac.set == 0) { 6310 mutex_exit(&ixgbe->gen_lock); 6311 return (EINVAL); 6312 } 6313 6314 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6315 (void) ixgbe_clear_rar(hw, slot); 6316 ixgbe->unicst_addr[slot].mac.set = 0; 6317 ixgbe->unicst_avail++; 6318 6319 mutex_exit(&ixgbe->gen_lock); 6320 6321 return (0); 6322 } 6323