1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 28 * Copyright 2020 Joyent, Inc. 29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved. 31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved. 32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved. 33 * Copyright 2020 Oxide Computer Company 34 */ 35 36 #include "ixgbe_sw.h" 37 38 static char ixgbe_ident[] = "Intel 10Gb Ethernet"; 39 40 /* 41 * Local function protoypes 42 */ 43 static int ixgbe_register_mac(ixgbe_t *); 44 static int ixgbe_identify_hardware(ixgbe_t *); 45 static int ixgbe_regs_map(ixgbe_t *); 46 static void ixgbe_init_properties(ixgbe_t *); 47 static int ixgbe_init_driver_settings(ixgbe_t *); 48 static void ixgbe_init_locks(ixgbe_t *); 49 static void ixgbe_destroy_locks(ixgbe_t *); 50 static int ixgbe_init(ixgbe_t *); 51 static int ixgbe_chip_start(ixgbe_t *); 52 static void ixgbe_chip_stop(ixgbe_t *); 53 static int ixgbe_reset(ixgbe_t *); 54 static void ixgbe_tx_clean(ixgbe_t *); 55 static boolean_t ixgbe_tx_drain(ixgbe_t *); 56 static boolean_t ixgbe_rx_drain(ixgbe_t *); 57 static int ixgbe_alloc_rings(ixgbe_t *); 58 static void ixgbe_free_rings(ixgbe_t *); 59 static int ixgbe_alloc_rx_data(ixgbe_t *); 60 static void ixgbe_free_rx_data(ixgbe_t *); 61 static int ixgbe_setup_rings(ixgbe_t *); 62 static int ixgbe_setup_rx(ixgbe_t *); 63 static void ixgbe_setup_tx(ixgbe_t *); 64 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 65 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 66 static void ixgbe_setup_rss(ixgbe_t *); 67 static void ixgbe_setup_vmdq(ixgbe_t *); 68 static void ixgbe_setup_vmdq_rss(ixgbe_t *); 69 static void ixgbe_setup_rss_table(ixgbe_t *); 70 static void ixgbe_init_unicst(ixgbe_t *); 71 static int ixgbe_init_vlan(ixgbe_t *); 72 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); 73 static void ixgbe_setup_multicst(ixgbe_t *); 74 static void ixgbe_get_hw_state(ixgbe_t *); 75 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); 76 static void ixgbe_get_conf(ixgbe_t *); 77 static void ixgbe_init_params(ixgbe_t *); 78 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 79 static void ixgbe_driver_link_check(ixgbe_t *); 80 static void ixgbe_sfp_check(void *); 81 static void ixgbe_overtemp_check(void *); 82 static void ixgbe_phy_check(void *); 83 static void ixgbe_link_timer(void *); 84 static void ixgbe_local_timer(void *); 85 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 86 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 87 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 88 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 89 static boolean_t is_valid_mac_addr(uint8_t *); 90 static boolean_t ixgbe_stall_check(ixgbe_t *); 91 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 92 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 93 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 94 static int ixgbe_alloc_intrs(ixgbe_t *); 95 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 96 static int ixgbe_add_intr_handlers(ixgbe_t *); 97 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 98 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 99 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t); 100 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t); 101 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t); 102 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index); 103 static int ixgbe_map_intrs_to_vectors(ixgbe_t *); 104 static void ixgbe_setup_adapter_vector(ixgbe_t *); 105 static void ixgbe_rem_intr_handlers(ixgbe_t *); 106 static void ixgbe_rem_intrs(ixgbe_t *); 107 static int ixgbe_enable_intrs(ixgbe_t *); 108 static int ixgbe_disable_intrs(ixgbe_t *); 109 static uint_t ixgbe_intr_legacy(void *, void *); 110 static uint_t ixgbe_intr_msi(void *, void *); 111 static uint_t ixgbe_intr_msix(void *, void *); 112 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 113 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 114 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); 115 static void ixgbe_get_driver_control(struct ixgbe_hw *); 116 static int ixgbe_addmac(void *, const uint8_t *); 117 static int ixgbe_remmac(void *, const uint8_t *); 118 static int ixgbe_addvlan(mac_group_driver_t, uint16_t); 119 static int ixgbe_remvlan(mac_group_driver_t, uint16_t); 120 static void ixgbe_release_driver_control(struct ixgbe_hw *); 121 122 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 123 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 124 static int ixgbe_resume(dev_info_t *); 125 static int ixgbe_suspend(dev_info_t *); 126 static int ixgbe_quiesce(dev_info_t *); 127 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 128 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 129 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *); 130 static int ixgbe_intr_cb_register(ixgbe_t *); 131 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int); 132 133 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 134 const void *impl_data); 135 static void ixgbe_fm_init(ixgbe_t *); 136 static void ixgbe_fm_fini(ixgbe_t *); 137 static int ixgbe_ufm_fill_image(ddi_ufm_handle_t *, void *arg, uint_t, 138 ddi_ufm_image_t *); 139 static int ixgbe_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t, 140 ddi_ufm_slot_t *); 141 static int ixgbe_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *); 142 static int ixgbe_ufm_readimg(ddi_ufm_handle_t *, void *, uint_t, uint_t, 143 uint64_t, uint64_t, void *, uint64_t *); 144 145 char *ixgbe_priv_props[] = { 146 "_tx_copy_thresh", 147 "_tx_recycle_thresh", 148 "_tx_overload_thresh", 149 "_tx_resched_thresh", 150 "_rx_copy_thresh", 151 "_rx_limit_per_intr", 152 "_intr_throttling", 153 "_adv_pause_cap", 154 "_adv_asym_pause_cap", 155 NULL 156 }; 157 158 #define IXGBE_MAX_PRIV_PROPS \ 159 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t)) 160 161 static struct cb_ops ixgbe_cb_ops = { 162 nulldev, /* cb_open */ 163 nulldev, /* cb_close */ 164 nodev, /* cb_strategy */ 165 nodev, /* cb_print */ 166 nodev, /* cb_dump */ 167 nodev, /* cb_read */ 168 nodev, /* cb_write */ 169 nodev, /* cb_ioctl */ 170 nodev, /* cb_devmap */ 171 nodev, /* cb_mmap */ 172 nodev, /* cb_segmap */ 173 nochpoll, /* cb_chpoll */ 174 ddi_prop_op, /* cb_prop_op */ 175 NULL, /* cb_stream */ 176 D_MP | D_HOTPLUG, /* cb_flag */ 177 CB_REV, /* cb_rev */ 178 nodev, /* cb_aread */ 179 nodev /* cb_awrite */ 180 }; 181 182 static struct dev_ops ixgbe_dev_ops = { 183 DEVO_REV, /* devo_rev */ 184 0, /* devo_refcnt */ 185 NULL, /* devo_getinfo */ 186 nulldev, /* devo_identify */ 187 nulldev, /* devo_probe */ 188 ixgbe_attach, /* devo_attach */ 189 ixgbe_detach, /* devo_detach */ 190 nodev, /* devo_reset */ 191 &ixgbe_cb_ops, /* devo_cb_ops */ 192 NULL, /* devo_bus_ops */ 193 ddi_power, /* devo_power */ 194 ixgbe_quiesce, /* devo_quiesce */ 195 }; 196 197 static struct modldrv ixgbe_modldrv = { 198 &mod_driverops, /* Type of module. This one is a driver */ 199 ixgbe_ident, /* Discription string */ 200 &ixgbe_dev_ops /* driver ops */ 201 }; 202 203 static struct modlinkage ixgbe_modlinkage = { 204 MODREV_1, &ixgbe_modldrv, NULL 205 }; 206 207 /* 208 * Access attributes for register mapping 209 */ 210 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 211 DDI_DEVICE_ATTR_V1, 212 DDI_STRUCTURE_LE_ACC, 213 DDI_STRICTORDER_ACC, 214 DDI_FLAGERR_ACC 215 }; 216 217 /* 218 * Loopback property 219 */ 220 static lb_property_t lb_normal = { 221 normal, "normal", IXGBE_LB_NONE 222 }; 223 224 static lb_property_t lb_mac = { 225 internal, "MAC", IXGBE_LB_INTERNAL_MAC 226 }; 227 228 static lb_property_t lb_external = { 229 external, "External", IXGBE_LB_EXTERNAL 230 }; 231 232 #define IXGBE_M_CALLBACK_FLAGS \ 233 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 234 235 static mac_callbacks_t ixgbe_m_callbacks = { 236 IXGBE_M_CALLBACK_FLAGS, 237 ixgbe_m_stat, 238 ixgbe_m_start, 239 ixgbe_m_stop, 240 ixgbe_m_promisc, 241 ixgbe_m_multicst, 242 NULL, 243 NULL, 244 NULL, 245 ixgbe_m_ioctl, 246 ixgbe_m_getcapab, 247 NULL, 248 NULL, 249 ixgbe_m_setprop, 250 ixgbe_m_getprop, 251 ixgbe_m_propinfo 252 }; 253 254 /* 255 * Initialize capabilities of each supported adapter type 256 */ 257 static adapter_info_t ixgbe_82598eb_cap = { 258 64, /* maximum number of rx queues */ 259 1, /* minimum number of rx queues */ 260 64, /* default number of rx queues */ 261 16, /* maximum number of rx groups */ 262 1, /* minimum number of rx groups */ 263 1, /* default number of rx groups */ 264 32, /* maximum number of tx queues */ 265 1, /* minimum number of tx queues */ 266 8, /* default number of tx queues */ 267 16366, /* maximum MTU size */ 268 0xFFFF, /* maximum interrupt throttle rate */ 269 0, /* minimum interrupt throttle rate */ 270 200, /* default interrupt throttle rate */ 271 18, /* maximum total msix vectors */ 272 16, /* maximum number of ring vectors */ 273 2, /* maximum number of other vectors */ 274 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 275 0, /* "other" interrupt types enable mask */ 276 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 277 | IXGBE_FLAG_RSS_CAPABLE 278 | IXGBE_FLAG_VMDQ_CAPABLE) 279 }; 280 281 static adapter_info_t ixgbe_82599eb_cap = { 282 128, /* maximum number of rx queues */ 283 1, /* minimum number of rx queues */ 284 128, /* default number of rx queues */ 285 64, /* maximum number of rx groups */ 286 1, /* minimum number of rx groups */ 287 1, /* default number of rx groups */ 288 128, /* maximum number of tx queues */ 289 1, /* minimum number of tx queues */ 290 8, /* default number of tx queues */ 291 15500, /* maximum MTU size */ 292 0xFF8, /* maximum interrupt throttle rate */ 293 0, /* minimum interrupt throttle rate */ 294 200, /* default interrupt throttle rate */ 295 64, /* maximum total msix vectors */ 296 16, /* maximum number of ring vectors */ 297 2, /* maximum number of other vectors */ 298 (IXGBE_EICR_LSC 299 | IXGBE_EICR_GPI_SDP1 300 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */ 301 302 (IXGBE_SDP1_GPIEN 303 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */ 304 305 (IXGBE_FLAG_DCA_CAPABLE 306 | IXGBE_FLAG_RSS_CAPABLE 307 | IXGBE_FLAG_VMDQ_CAPABLE 308 | IXGBE_FLAG_RSC_CAPABLE 309 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */ 310 }; 311 312 static adapter_info_t ixgbe_X540_cap = { 313 128, /* maximum number of rx queues */ 314 1, /* minimum number of rx queues */ 315 128, /* default number of rx queues */ 316 64, /* maximum number of rx groups */ 317 1, /* minimum number of rx groups */ 318 1, /* default number of rx groups */ 319 128, /* maximum number of tx queues */ 320 1, /* minimum number of tx queues */ 321 8, /* default number of tx queues */ 322 15500, /* maximum MTU size */ 323 0xFF8, /* maximum interrupt throttle rate */ 324 0, /* minimum interrupt throttle rate */ 325 200, /* default interrupt throttle rate */ 326 64, /* maximum total msix vectors */ 327 16, /* maximum number of ring vectors */ 328 2, /* maximum number of other vectors */ 329 (IXGBE_EICR_LSC 330 | IXGBE_EICR_GPI_SDP1_X540 331 | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */ 332 333 (IXGBE_SDP1_GPIEN_X540 334 | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */ 335 336 (IXGBE_FLAG_DCA_CAPABLE 337 | IXGBE_FLAG_RSS_CAPABLE 338 | IXGBE_FLAG_VMDQ_CAPABLE 339 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 340 }; 341 342 static adapter_info_t ixgbe_X550_cap = { 343 128, /* maximum number of rx queues */ 344 1, /* minimum number of rx queues */ 345 128, /* default number of rx queues */ 346 64, /* maximum number of rx groups */ 347 1, /* minimum number of rx groups */ 348 1, /* default number of rx groups */ 349 128, /* maximum number of tx queues */ 350 1, /* minimum number of tx queues */ 351 8, /* default number of tx queues */ 352 15500, /* maximum MTU size */ 353 0xFF8, /* maximum interrupt throttle rate */ 354 0, /* minimum interrupt throttle rate */ 355 0x200, /* default interrupt throttle rate */ 356 64, /* maximum total msix vectors */ 357 16, /* maximum number of ring vectors */ 358 2, /* maximum number of other vectors */ 359 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 360 0, /* "other" interrupt types enable mask */ 361 (IXGBE_FLAG_RSS_CAPABLE 362 | IXGBE_FLAG_VMDQ_CAPABLE 363 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 364 }; 365 366 static ddi_ufm_ops_t ixgbe_ufm_ops = { 367 .ddi_ufm_op_fill_image = ixgbe_ufm_fill_image, 368 .ddi_ufm_op_fill_slot = ixgbe_ufm_fill_slot, 369 .ddi_ufm_op_getcaps = ixgbe_ufm_getcaps, 370 .ddi_ufm_op_readimg = ixgbe_ufm_readimg 371 }; 372 373 374 /* 375 * Module Initialization Functions. 376 */ 377 378 int 379 _init(void) 380 { 381 int status; 382 383 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 384 385 status = mod_install(&ixgbe_modlinkage); 386 387 if (status != DDI_SUCCESS) { 388 mac_fini_ops(&ixgbe_dev_ops); 389 } 390 391 return (status); 392 } 393 394 int 395 _fini(void) 396 { 397 int status; 398 399 status = mod_remove(&ixgbe_modlinkage); 400 401 if (status == DDI_SUCCESS) { 402 mac_fini_ops(&ixgbe_dev_ops); 403 } 404 405 return (status); 406 } 407 408 int 409 _info(struct modinfo *modinfop) 410 { 411 int status; 412 413 status = mod_info(&ixgbe_modlinkage, modinfop); 414 415 return (status); 416 } 417 418 /* 419 * ixgbe_attach - Driver attach. 420 * 421 * This function is the device specific initialization entry 422 * point. This entry point is required and must be written. 423 * The DDI_ATTACH command must be provided in the attach entry 424 * point. When attach() is called with cmd set to DDI_ATTACH, 425 * all normal kernel services (such as kmem_alloc(9F)) are 426 * available for use by the driver. 427 * 428 * The attach() function will be called once for each instance 429 * of the device on the system with cmd set to DDI_ATTACH. 430 * Until attach() succeeds, the only driver entry points which 431 * may be called are open(9E) and getinfo(9E). 432 */ 433 static int 434 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 435 { 436 ixgbe_t *ixgbe; 437 struct ixgbe_osdep *osdep; 438 struct ixgbe_hw *hw; 439 int instance; 440 char taskqname[32]; 441 442 /* 443 * Check the command and perform corresponding operations 444 */ 445 switch (cmd) { 446 default: 447 return (DDI_FAILURE); 448 449 case DDI_RESUME: 450 return (ixgbe_resume(devinfo)); 451 452 case DDI_ATTACH: 453 break; 454 } 455 456 /* Get the device instance */ 457 instance = ddi_get_instance(devinfo); 458 459 /* Allocate memory for the instance data structure */ 460 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 461 462 ixgbe->dip = devinfo; 463 ixgbe->instance = instance; 464 465 hw = &ixgbe->hw; 466 osdep = &ixgbe->osdep; 467 hw->back = osdep; 468 osdep->ixgbe = ixgbe; 469 470 /* Attach the instance pointer to the dev_info data structure */ 471 ddi_set_driver_private(devinfo, ixgbe); 472 473 /* 474 * Initialize for FMA support 475 */ 476 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 477 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 478 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 479 ixgbe_fm_init(ixgbe); 480 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 481 482 /* 483 * Map PCI config space registers 484 */ 485 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 486 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 487 goto attach_fail; 488 } 489 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 490 491 /* 492 * Identify the chipset family 493 */ 494 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 495 ixgbe_error(ixgbe, "Failed to identify hardware"); 496 goto attach_fail; 497 } 498 499 /* 500 * Map device registers 501 */ 502 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 503 ixgbe_error(ixgbe, "Failed to map device registers"); 504 goto attach_fail; 505 } 506 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 507 508 /* 509 * Initialize driver parameters 510 */ 511 ixgbe_init_properties(ixgbe); 512 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 513 514 /* 515 * Register interrupt callback 516 */ 517 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) { 518 ixgbe_error(ixgbe, "Failed to register interrupt callback"); 519 goto attach_fail; 520 } 521 522 /* 523 * Allocate interrupts 524 */ 525 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 526 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 527 goto attach_fail; 528 } 529 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 530 531 /* 532 * Allocate rx/tx rings based on the ring numbers. 533 * The actual numbers of rx/tx rings are decided by the number of 534 * allocated interrupt vectors, so we should allocate the rings after 535 * interrupts are allocated. 536 */ 537 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 538 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 539 goto attach_fail; 540 } 541 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 542 543 /* 544 * Map rings to interrupt vectors 545 */ 546 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 547 ixgbe_error(ixgbe, "Failed to map interrupts to vectors"); 548 goto attach_fail; 549 } 550 551 /* 552 * Add interrupt handlers 553 */ 554 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 555 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 556 goto attach_fail; 557 } 558 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 559 560 /* 561 * Create a taskq for sfp-change 562 */ 563 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance); 564 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname, 565 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 566 ixgbe_error(ixgbe, "sfp_taskq create failed"); 567 goto attach_fail; 568 } 569 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ; 570 571 /* 572 * Create a taskq for over-temp 573 */ 574 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance); 575 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname, 576 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 577 ixgbe_error(ixgbe, "overtemp_taskq create failed"); 578 goto attach_fail; 579 } 580 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ; 581 582 /* 583 * Create a taskq for processing external PHY interrupts 584 */ 585 (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance); 586 if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname, 587 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 588 ixgbe_error(ixgbe, "phy_taskq create failed"); 589 goto attach_fail; 590 } 591 ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ; 592 593 /* 594 * Initialize driver parameters 595 */ 596 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 597 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 598 goto attach_fail; 599 } 600 601 /* 602 * Initialize mutexes for this device. 603 * Do this before enabling the interrupt handler and 604 * register the softint to avoid the condition where 605 * interrupt handler can try using uninitialized mutex. 606 */ 607 ixgbe_init_locks(ixgbe); 608 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 609 610 /* 611 * Initialize chipset hardware 612 */ 613 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 614 ixgbe_error(ixgbe, "Failed to initialize adapter"); 615 goto attach_fail; 616 } 617 ixgbe->link_check_complete = B_FALSE; 618 ixgbe->link_check_hrtime = gethrtime() + 619 (IXGBE_LINK_UP_TIME * 100000000ULL); 620 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 621 622 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 623 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 624 goto attach_fail; 625 } 626 627 /* 628 * Initialize adapter capabilities 629 */ 630 ixgbe_init_params(ixgbe); 631 632 /* 633 * Initialize statistics 634 */ 635 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 636 ixgbe_error(ixgbe, "Failed to initialize statistics"); 637 goto attach_fail; 638 } 639 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 640 641 /* 642 * Register the driver to the MAC 643 */ 644 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 645 ixgbe_error(ixgbe, "Failed to register MAC"); 646 goto attach_fail; 647 } 648 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 649 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 650 651 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe, 652 IXGBE_CYCLIC_PERIOD, DDI_IPL_0); 653 if (ixgbe->periodic_id == 0) { 654 ixgbe_error(ixgbe, "Failed to add the link check timer"); 655 goto attach_fail; 656 } 657 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER; 658 659 /* 660 * Now that mutex locks are initialized, and the chip is also 661 * initialized, enable interrupts. 662 */ 663 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 664 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 665 goto attach_fail; 666 } 667 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 668 669 if (ixgbe->hw.bus.func == 0) { 670 if (ddi_ufm_init(devinfo, DDI_UFM_CURRENT_VERSION, 671 &ixgbe_ufm_ops, &ixgbe->ixgbe_ufmh, ixgbe) != 0) { 672 ixgbe_error(ixgbe, "Failed to enable DDI UFM support"); 673 goto attach_fail; 674 } 675 ixgbe->attach_progress |= ATTACH_PROGRESS_UFM; 676 ddi_ufm_update(ixgbe->ixgbe_ufmh); 677 } 678 679 ixgbe_log(ixgbe, "%s", ixgbe_ident); 680 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED); 681 682 return (DDI_SUCCESS); 683 684 attach_fail: 685 ixgbe_unconfigure(devinfo, ixgbe); 686 return (DDI_FAILURE); 687 } 688 689 /* 690 * ixgbe_detach - Driver detach. 691 * 692 * The detach() function is the complement of the attach routine. 693 * If cmd is set to DDI_DETACH, detach() is used to remove the 694 * state associated with a given instance of a device node 695 * prior to the removal of that instance from the system. 696 * 697 * The detach() function will be called once for each instance 698 * of the device for which there has been a successful attach() 699 * once there are no longer any opens on the device. 700 * 701 * Interrupts routine are disabled, All memory allocated by this 702 * driver are freed. 703 */ 704 static int 705 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 706 { 707 ixgbe_t *ixgbe; 708 709 /* 710 * Check detach command 711 */ 712 switch (cmd) { 713 default: 714 return (DDI_FAILURE); 715 716 case DDI_SUSPEND: 717 return (ixgbe_suspend(devinfo)); 718 719 case DDI_DETACH: 720 break; 721 } 722 723 /* 724 * Get the pointer to the driver private data structure 725 */ 726 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 727 if (ixgbe == NULL) 728 return (DDI_FAILURE); 729 730 /* 731 * If the device is still running, it needs to be stopped first. 732 * This check is necessary because under some specific circumstances, 733 * the detach routine can be called without stopping the interface 734 * first. 735 */ 736 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 737 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 738 mutex_enter(&ixgbe->gen_lock); 739 ixgbe_stop(ixgbe, B_TRUE); 740 mutex_exit(&ixgbe->gen_lock); 741 /* Disable and stop the watchdog timer */ 742 ixgbe_disable_watchdog_timer(ixgbe); 743 } 744 745 /* 746 * Check if there are still rx buffers held by the upper layer. 747 * If so, fail the detach. 748 */ 749 if (!ixgbe_rx_drain(ixgbe)) 750 return (DDI_FAILURE); 751 752 /* 753 * Do the remaining unconfigure routines 754 */ 755 ixgbe_unconfigure(devinfo, ixgbe); 756 757 return (DDI_SUCCESS); 758 } 759 760 /* 761 * quiesce(9E) entry point. 762 * 763 * This function is called when the system is single-threaded at high 764 * PIL with preemption disabled. Therefore, this function must not be 765 * blocked. 766 * 767 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 768 * DDI_FAILURE indicates an error condition and should almost never happen. 769 */ 770 static int 771 ixgbe_quiesce(dev_info_t *devinfo) 772 { 773 ixgbe_t *ixgbe; 774 struct ixgbe_hw *hw; 775 776 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 777 778 if (ixgbe == NULL) 779 return (DDI_FAILURE); 780 781 hw = &ixgbe->hw; 782 783 /* 784 * Disable the adapter interrupts 785 */ 786 ixgbe_disable_adapter_interrupts(ixgbe); 787 788 /* 789 * Tell firmware driver is no longer in control 790 */ 791 ixgbe_release_driver_control(hw); 792 793 /* 794 * Reset the chipset 795 */ 796 (void) ixgbe_reset_hw(hw); 797 798 /* 799 * Reset PHY 800 */ 801 (void) ixgbe_reset_phy(hw); 802 803 return (DDI_SUCCESS); 804 } 805 806 static void 807 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 808 { 809 /* 810 * Disable interrupt 811 */ 812 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 813 (void) ixgbe_disable_intrs(ixgbe); 814 } 815 816 /* 817 * remove the link check timer 818 */ 819 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) { 820 if (ixgbe->periodic_id != NULL) { 821 ddi_periodic_delete(ixgbe->periodic_id); 822 ixgbe->periodic_id = NULL; 823 } 824 } 825 826 /* 827 * Clean up the UFM subsystem. Note this only is set on function 0. 828 */ 829 if (ixgbe->attach_progress & ATTACH_PROGRESS_UFM) { 830 ddi_ufm_fini(ixgbe->ixgbe_ufmh); 831 } 832 833 /* 834 * Unregister MAC 835 */ 836 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 837 (void) mac_unregister(ixgbe->mac_hdl); 838 } 839 840 /* 841 * Free statistics 842 */ 843 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 844 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 845 } 846 847 /* 848 * Remove interrupt handlers 849 */ 850 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 851 ixgbe_rem_intr_handlers(ixgbe); 852 } 853 854 /* 855 * Remove taskq for sfp-status-change 856 */ 857 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) { 858 ddi_taskq_destroy(ixgbe->sfp_taskq); 859 } 860 861 /* 862 * Remove taskq for over-temp 863 */ 864 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) { 865 ddi_taskq_destroy(ixgbe->overtemp_taskq); 866 } 867 868 /* 869 * Remove taskq for external PHYs 870 */ 871 if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) { 872 ddi_taskq_destroy(ixgbe->phy_taskq); 873 } 874 875 /* 876 * Remove interrupts 877 */ 878 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 879 ixgbe_rem_intrs(ixgbe); 880 } 881 882 /* 883 * Unregister interrupt callback handler 884 */ 885 if (ixgbe->cb_hdl != NULL) { 886 (void) ddi_cb_unregister(ixgbe->cb_hdl); 887 } 888 889 /* 890 * Remove driver properties 891 */ 892 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 893 (void) ddi_prop_remove_all(devinfo); 894 } 895 896 /* 897 * Stop the chipset 898 */ 899 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 900 mutex_enter(&ixgbe->gen_lock); 901 ixgbe_chip_stop(ixgbe); 902 mutex_exit(&ixgbe->gen_lock); 903 } 904 905 /* 906 * Free register handle 907 */ 908 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 909 if (ixgbe->osdep.reg_handle != NULL) 910 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 911 } 912 913 /* 914 * Free PCI config handle 915 */ 916 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 917 if (ixgbe->osdep.cfg_handle != NULL) 918 pci_config_teardown(&ixgbe->osdep.cfg_handle); 919 } 920 921 /* 922 * Free locks 923 */ 924 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 925 ixgbe_destroy_locks(ixgbe); 926 } 927 928 /* 929 * Free the rx/tx rings 930 */ 931 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 932 ixgbe_free_rings(ixgbe); 933 } 934 935 /* 936 * Unregister FMA capabilities 937 */ 938 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 939 ixgbe_fm_fini(ixgbe); 940 } 941 942 /* 943 * Free the driver data structure 944 */ 945 kmem_free(ixgbe, sizeof (ixgbe_t)); 946 947 ddi_set_driver_private(devinfo, NULL); 948 } 949 950 /* 951 * ixgbe_register_mac - Register the driver and its function pointers with 952 * the GLD interface. 953 */ 954 static int 955 ixgbe_register_mac(ixgbe_t *ixgbe) 956 { 957 struct ixgbe_hw *hw = &ixgbe->hw; 958 mac_register_t *mac; 959 int status; 960 961 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 962 return (IXGBE_FAILURE); 963 964 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 965 mac->m_driver = ixgbe; 966 mac->m_dip = ixgbe->dip; 967 mac->m_src_addr = hw->mac.addr; 968 mac->m_callbacks = &ixgbe_m_callbacks; 969 mac->m_min_sdu = 0; 970 mac->m_max_sdu = ixgbe->default_mtu; 971 mac->m_margin = VLAN_TAGSZ; 972 mac->m_priv_props = ixgbe_priv_props; 973 mac->m_v12n = MAC_VIRT_LEVEL1; 974 975 status = mac_register(mac, &ixgbe->mac_hdl); 976 977 mac_free(mac); 978 979 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 980 } 981 982 /* 983 * ixgbe_identify_hardware - Identify the type of the chipset. 984 */ 985 static int 986 ixgbe_identify_hardware(ixgbe_t *ixgbe) 987 { 988 struct ixgbe_hw *hw = &ixgbe->hw; 989 struct ixgbe_osdep *osdep = &ixgbe->osdep; 990 991 /* 992 * Get the device id 993 */ 994 hw->vendor_id = 995 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 996 hw->device_id = 997 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 998 hw->revision_id = 999 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 1000 hw->subsystem_device_id = 1001 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 1002 hw->subsystem_vendor_id = 1003 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 1004 1005 /* 1006 * Set the mac type of the adapter based on the device id 1007 */ 1008 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) { 1009 return (IXGBE_FAILURE); 1010 } 1011 1012 /* 1013 * Install adapter capabilities 1014 */ 1015 switch (hw->mac.type) { 1016 case ixgbe_mac_82598EB: 1017 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n"); 1018 ixgbe->capab = &ixgbe_82598eb_cap; 1019 1020 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { 1021 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 1022 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1; 1023 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN; 1024 } 1025 break; 1026 1027 case ixgbe_mac_82599EB: 1028 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n"); 1029 ixgbe->capab = &ixgbe_82599eb_cap; 1030 1031 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) { 1032 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE; 1033 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0; 1034 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN; 1035 } 1036 break; 1037 1038 case ixgbe_mac_X540: 1039 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n"); 1040 ixgbe->capab = &ixgbe_X540_cap; 1041 /* 1042 * For now, X540 is all set in its capab structure. 1043 * As other X540 variants show up, things can change here. 1044 */ 1045 break; 1046 1047 case ixgbe_mac_X550: 1048 case ixgbe_mac_X550EM_x: 1049 case ixgbe_mac_X550EM_a: 1050 IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n"); 1051 ixgbe->capab = &ixgbe_X550_cap; 1052 1053 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 1054 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 1055 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 1056 hw->device_id == IXGBE_DEV_ID_X550EM_A_QSFP || 1057 hw->device_id == IXGBE_DEV_ID_X550EM_A_QSFP_N) { 1058 ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE; 1059 } 1060 1061 /* 1062 * Link detection on X552 SFP+ and X552/X557-AT 1063 */ 1064 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 1065 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 1066 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 1067 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 1068 ixgbe->capab->other_intr |= 1069 IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 1070 } 1071 if (hw->phy.type == ixgbe_phy_x550em_ext_t) { 1072 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540; 1073 } 1074 break; 1075 1076 default: 1077 IXGBE_DEBUGLOG_1(ixgbe, 1078 "adapter not supported in ixgbe_identify_hardware(): %d\n", 1079 hw->mac.type); 1080 return (IXGBE_FAILURE); 1081 } 1082 1083 return (IXGBE_SUCCESS); 1084 } 1085 1086 /* 1087 * ixgbe_regs_map - Map the device registers. 1088 * 1089 */ 1090 static int 1091 ixgbe_regs_map(ixgbe_t *ixgbe) 1092 { 1093 dev_info_t *devinfo = ixgbe->dip; 1094 struct ixgbe_hw *hw = &ixgbe->hw; 1095 struct ixgbe_osdep *osdep = &ixgbe->osdep; 1096 off_t mem_size; 1097 1098 /* 1099 * First get the size of device registers to be mapped. 1100 */ 1101 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size) 1102 != DDI_SUCCESS) { 1103 return (IXGBE_FAILURE); 1104 } 1105 1106 /* 1107 * Call ddi_regs_map_setup() to map registers 1108 */ 1109 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET, 1110 (caddr_t *)&hw->hw_addr, 0, 1111 mem_size, &ixgbe_regs_acc_attr, 1112 &osdep->reg_handle)) != DDI_SUCCESS) { 1113 return (IXGBE_FAILURE); 1114 } 1115 1116 return (IXGBE_SUCCESS); 1117 } 1118 1119 /* 1120 * ixgbe_init_properties - Initialize driver properties. 1121 */ 1122 static void 1123 ixgbe_init_properties(ixgbe_t *ixgbe) 1124 { 1125 /* 1126 * Get conf file properties, including link settings 1127 * jumbo frames, ring number, descriptor number, etc. 1128 */ 1129 ixgbe_get_conf(ixgbe); 1130 } 1131 1132 /* 1133 * ixgbe_init_driver_settings - Initialize driver settings. 1134 * 1135 * The settings include hardware function pointers, bus information, 1136 * rx/tx rings settings, link state, and any other parameters that 1137 * need to be setup during driver initialization. 1138 */ 1139 static int 1140 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 1141 { 1142 struct ixgbe_hw *hw = &ixgbe->hw; 1143 dev_info_t *devinfo = ixgbe->dip; 1144 ixgbe_rx_ring_t *rx_ring; 1145 ixgbe_rx_group_t *rx_group; 1146 ixgbe_tx_ring_t *tx_ring; 1147 uint32_t rx_size; 1148 uint32_t tx_size; 1149 uint32_t ring_per_group; 1150 int i; 1151 1152 /* 1153 * Initialize chipset specific hardware function pointers 1154 */ 1155 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 1156 return (IXGBE_FAILURE); 1157 } 1158 1159 /* 1160 * Get the system page size 1161 */ 1162 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1); 1163 1164 /* 1165 * Set rx buffer size 1166 * 1167 * The IP header alignment room is counted in the calculation. 1168 * The rx buffer size is in unit of 1K that is required by the 1169 * chipset hardware. 1170 */ 1171 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 1172 ixgbe->rx_buf_size = ((rx_size >> 10) + 1173 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1174 1175 /* 1176 * Set tx buffer size 1177 */ 1178 tx_size = ixgbe->max_frame_size; 1179 ixgbe->tx_buf_size = ((tx_size >> 10) + 1180 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1181 1182 /* 1183 * Initialize rx/tx rings/groups parameters 1184 */ 1185 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 1186 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1187 rx_ring = &ixgbe->rx_rings[i]; 1188 rx_ring->index = i; 1189 rx_ring->ixgbe = ixgbe; 1190 rx_ring->group_index = i / ring_per_group; 1191 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i); 1192 } 1193 1194 for (i = 0; i < ixgbe->num_rx_groups; i++) { 1195 rx_group = &ixgbe->rx_groups[i]; 1196 rx_group->index = i; 1197 rx_group->ixgbe = ixgbe; 1198 list_create(&rx_group->vlans, sizeof (ixgbe_vlan_t), 1199 offsetof(ixgbe_vlan_t, ixvl_link)); 1200 } 1201 1202 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1203 tx_ring = &ixgbe->tx_rings[i]; 1204 tx_ring->index = i; 1205 tx_ring->ixgbe = ixgbe; 1206 if (ixgbe->tx_head_wb_enable) 1207 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 1208 else 1209 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 1210 1211 tx_ring->ring_size = ixgbe->tx_ring_size; 1212 tx_ring->free_list_size = ixgbe->tx_ring_size + 1213 (ixgbe->tx_ring_size >> 1); 1214 } 1215 1216 /* 1217 * Initialize values of interrupt throttling rate 1218 */ 1219 for (i = 1; i < MAX_INTR_VECTOR; i++) 1220 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 1221 1222 /* 1223 * The initial link state should be "unknown" 1224 */ 1225 ixgbe->link_state = LINK_STATE_UNKNOWN; 1226 1227 return (IXGBE_SUCCESS); 1228 } 1229 1230 /* 1231 * ixgbe_init_locks - Initialize locks. 1232 */ 1233 static void 1234 ixgbe_init_locks(ixgbe_t *ixgbe) 1235 { 1236 ixgbe_rx_ring_t *rx_ring; 1237 ixgbe_tx_ring_t *tx_ring; 1238 int i; 1239 1240 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1241 rx_ring = &ixgbe->rx_rings[i]; 1242 mutex_init(&rx_ring->rx_lock, NULL, 1243 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1244 } 1245 1246 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1247 tx_ring = &ixgbe->tx_rings[i]; 1248 mutex_init(&tx_ring->tx_lock, NULL, 1249 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1250 mutex_init(&tx_ring->recycle_lock, NULL, 1251 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1252 mutex_init(&tx_ring->tcb_head_lock, NULL, 1253 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1254 mutex_init(&tx_ring->tcb_tail_lock, NULL, 1255 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1256 } 1257 1258 mutex_init(&ixgbe->gen_lock, NULL, 1259 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1260 1261 mutex_init(&ixgbe->watchdog_lock, NULL, 1262 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1263 } 1264 1265 /* 1266 * ixgbe_destroy_locks - Destroy locks. 1267 */ 1268 static void 1269 ixgbe_destroy_locks(ixgbe_t *ixgbe) 1270 { 1271 ixgbe_rx_ring_t *rx_ring; 1272 ixgbe_tx_ring_t *tx_ring; 1273 int i; 1274 1275 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1276 rx_ring = &ixgbe->rx_rings[i]; 1277 mutex_destroy(&rx_ring->rx_lock); 1278 } 1279 1280 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1281 tx_ring = &ixgbe->tx_rings[i]; 1282 mutex_destroy(&tx_ring->tx_lock); 1283 mutex_destroy(&tx_ring->recycle_lock); 1284 mutex_destroy(&tx_ring->tcb_head_lock); 1285 mutex_destroy(&tx_ring->tcb_tail_lock); 1286 } 1287 1288 mutex_destroy(&ixgbe->gen_lock); 1289 mutex_destroy(&ixgbe->watchdog_lock); 1290 } 1291 1292 /* 1293 * We need to try and determine which LED index in hardware corresponds to the 1294 * link/activity LED. This is the one that'll be overwritten when we perform 1295 * GLDv3 LED activity. 1296 */ 1297 static void 1298 ixgbe_led_init(ixgbe_t *ixgbe) 1299 { 1300 uint32_t reg, i; 1301 struct ixgbe_hw *hw = &ixgbe->hw; 1302 1303 reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1304 for (i = 0; i < 4; i++) { 1305 if (((reg >> IXGBE_LED_MODE_SHIFT(i)) & 1306 IXGBE_LED_MODE_MASK_BASE) == IXGBE_LED_LINK_ACTIVE) { 1307 ixgbe->ixgbe_led_index = i; 1308 return; 1309 } 1310 } 1311 1312 /* 1313 * If we couldn't determine this, we use the default for various MACs 1314 * based on information Intel has inserted into other drivers over the 1315 * years. 1316 */ 1317 switch (hw->mac.type) { 1318 case ixgbe_mac_X550EM_a: 1319 ixgbe->ixgbe_led_index = 0; 1320 break; 1321 case ixgbe_mac_X550EM_x: 1322 ixgbe->ixgbe_led_index = 1; 1323 break; 1324 default: 1325 ixgbe->ixgbe_led_index = 2; 1326 break; 1327 } 1328 } 1329 1330 static int 1331 ixgbe_resume(dev_info_t *devinfo) 1332 { 1333 ixgbe_t *ixgbe; 1334 int i; 1335 1336 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1337 if (ixgbe == NULL) 1338 return (DDI_FAILURE); 1339 1340 mutex_enter(&ixgbe->gen_lock); 1341 1342 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1343 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1344 mutex_exit(&ixgbe->gen_lock); 1345 return (DDI_FAILURE); 1346 } 1347 1348 /* 1349 * Enable and start the watchdog timer 1350 */ 1351 ixgbe_enable_watchdog_timer(ixgbe); 1352 } 1353 1354 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED); 1355 1356 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1357 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1358 mac_tx_ring_update(ixgbe->mac_hdl, 1359 ixgbe->tx_rings[i].ring_handle); 1360 } 1361 } 1362 1363 mutex_exit(&ixgbe->gen_lock); 1364 1365 return (DDI_SUCCESS); 1366 } 1367 1368 static int 1369 ixgbe_suspend(dev_info_t *devinfo) 1370 { 1371 ixgbe_t *ixgbe; 1372 1373 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1374 if (ixgbe == NULL) 1375 return (DDI_FAILURE); 1376 1377 mutex_enter(&ixgbe->gen_lock); 1378 1379 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED); 1380 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1381 mutex_exit(&ixgbe->gen_lock); 1382 return (DDI_SUCCESS); 1383 } 1384 ixgbe_stop(ixgbe, B_FALSE); 1385 1386 mutex_exit(&ixgbe->gen_lock); 1387 1388 /* 1389 * Disable and stop the watchdog timer 1390 */ 1391 ixgbe_disable_watchdog_timer(ixgbe); 1392 1393 return (DDI_SUCCESS); 1394 } 1395 1396 /* 1397 * ixgbe_init - Initialize the device. 1398 */ 1399 static int 1400 ixgbe_init(ixgbe_t *ixgbe) 1401 { 1402 struct ixgbe_hw *hw = &ixgbe->hw; 1403 u8 pbanum[IXGBE_PBANUM_LENGTH]; 1404 int rv; 1405 1406 mutex_enter(&ixgbe->gen_lock); 1407 1408 /* 1409 * Configure/Initialize hardware 1410 */ 1411 rv = ixgbe_init_hw(hw); 1412 if (rv != IXGBE_SUCCESS) { 1413 switch (rv) { 1414 1415 /* 1416 * The first three errors are not prohibitive to us progressing 1417 * further, and are maily advisory in nature. In the case of a 1418 * SFP module not being present or not deemed supported by the 1419 * common code, we adivse the operator of this fact but carry on 1420 * instead of failing hard, as SFPs can be inserted or replaced 1421 * while the driver is running. In the case of a unknown error, 1422 * we fail-hard, logging the reason and emitting a FMA event. 1423 */ 1424 case IXGBE_ERR_EEPROM_VERSION: 1425 ixgbe_error(ixgbe, 1426 "This Intel 10Gb Ethernet device is pre-release and" 1427 " contains outdated firmware. Please contact your" 1428 " hardware vendor for a replacement."); 1429 break; 1430 case IXGBE_ERR_SFP_NOT_PRESENT: 1431 ixgbe_error(ixgbe, 1432 "No SFP+ module detected on this interface. Please " 1433 "install a supported SFP+ module for this " 1434 "interface to become operational."); 1435 break; 1436 case IXGBE_ERR_SFP_NOT_SUPPORTED: 1437 ixgbe_error(ixgbe, 1438 "Unsupported SFP+ module detected. Please replace " 1439 "it with a supported SFP+ module per Intel " 1440 "documentation, or bypass this check with " 1441 "allow_unsupported_sfp=1 in ixgbe.conf."); 1442 break; 1443 default: 1444 ixgbe_error(ixgbe, 1445 "Failed to initialize hardware. ixgbe_init_hw " 1446 "returned %d", rv); 1447 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1448 goto init_fail; 1449 } 1450 } 1451 1452 /* 1453 * Need to init eeprom before validating the checksum. 1454 */ 1455 if (ixgbe_init_eeprom_params(hw) < 0) { 1456 ixgbe_error(ixgbe, 1457 "Unable to intitialize the eeprom interface."); 1458 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1459 goto init_fail; 1460 } 1461 1462 /* 1463 * NVM validation 1464 */ 1465 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1466 /* 1467 * Some PCI-E parts fail the first check due to 1468 * the link being in sleep state. Call it again, 1469 * if it fails a second time it's a real issue. 1470 */ 1471 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1472 ixgbe_error(ixgbe, 1473 "Invalid NVM checksum. Please contact " 1474 "the vendor to update the NVM."); 1475 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1476 goto init_fail; 1477 } 1478 } 1479 1480 /* 1481 * Setup default flow control thresholds - enable/disable 1482 * & flow control type is controlled by ixgbe.conf 1483 */ 1484 hw->fc.high_water[0] = DEFAULT_FCRTH; 1485 hw->fc.low_water[0] = DEFAULT_FCRTL; 1486 hw->fc.pause_time = DEFAULT_FCPAUSE; 1487 hw->fc.send_xon = true; 1488 1489 /* 1490 * Initialize flow control 1491 */ 1492 (void) ixgbe_start_hw(hw); 1493 1494 /* 1495 * Initialize link settings 1496 */ 1497 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1498 1499 /* 1500 * Initialize the chipset hardware 1501 */ 1502 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1503 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1504 goto init_fail; 1505 } 1506 1507 /* 1508 * Read identifying information and place in devinfo. 1509 */ 1510 pbanum[0] = '\0'; 1511 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum)); 1512 if (*pbanum != '\0') { 1513 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip, 1514 "printed-board-assembly", (char *)pbanum); 1515 } 1516 1517 /* 1518 * Determine LED index. 1519 */ 1520 ixgbe_led_init(ixgbe); 1521 1522 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1523 goto init_fail; 1524 } 1525 1526 mutex_exit(&ixgbe->gen_lock); 1527 return (IXGBE_SUCCESS); 1528 1529 init_fail: 1530 /* 1531 * Reset PHY 1532 */ 1533 (void) ixgbe_reset_phy(hw); 1534 1535 mutex_exit(&ixgbe->gen_lock); 1536 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1537 return (IXGBE_FAILURE); 1538 } 1539 1540 /* 1541 * ixgbe_chip_start - Initialize and start the chipset hardware. 1542 */ 1543 static int 1544 ixgbe_chip_start(ixgbe_t *ixgbe) 1545 { 1546 struct ixgbe_hw *hw = &ixgbe->hw; 1547 int i; 1548 1549 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1550 1551 /* 1552 * Get the mac address 1553 * This function should handle SPARC case correctly. 1554 */ 1555 if (!ixgbe_find_mac_address(ixgbe)) { 1556 ixgbe_error(ixgbe, "Failed to get the mac address"); 1557 return (IXGBE_FAILURE); 1558 } 1559 1560 /* 1561 * Validate the mac address 1562 */ 1563 (void) ixgbe_init_rx_addrs(hw); 1564 if (!is_valid_mac_addr(hw->mac.addr)) { 1565 ixgbe_error(ixgbe, "Invalid mac address"); 1566 return (IXGBE_FAILURE); 1567 } 1568 1569 /* 1570 * Re-enable relaxed ordering for performance. It is disabled 1571 * by default in the hardware init. 1572 */ 1573 if (ixgbe->relax_order_enable == B_TRUE) 1574 ixgbe_enable_relaxed_ordering(hw); 1575 1576 /* 1577 * Setup adapter interrupt vectors 1578 */ 1579 ixgbe_setup_adapter_vector(ixgbe); 1580 1581 /* 1582 * Initialize unicast addresses. 1583 */ 1584 ixgbe_init_unicst(ixgbe); 1585 1586 /* 1587 * Setup and initialize the mctable structures. 1588 */ 1589 ixgbe_setup_multicst(ixgbe); 1590 1591 /* 1592 * Set interrupt throttling rate 1593 */ 1594 for (i = 0; i < ixgbe->intr_cnt; i++) { 1595 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1596 } 1597 1598 /* 1599 * Disable Wake-on-LAN 1600 */ 1601 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 1602 1603 /* 1604 * Some adapters offer Energy Efficient Ethernet (EEE) support. 1605 * Due to issues with EEE in e1000g/igb, we disable this by default 1606 * as a precautionary measure. 1607 * 1608 * Currently, this is present on a number of the X550 family parts. 1609 */ 1610 (void) ixgbe_setup_eee(hw, B_FALSE); 1611 1612 /* 1613 * Turn on any present SFP Tx laser 1614 */ 1615 ixgbe_enable_tx_laser(hw); 1616 1617 /* 1618 * Power on the PHY 1619 */ 1620 (void) ixgbe_set_phy_power(hw, B_TRUE); 1621 1622 /* 1623 * Save the state of the PHY 1624 */ 1625 ixgbe_get_hw_state(ixgbe); 1626 1627 /* 1628 * Make sure driver has control 1629 */ 1630 ixgbe_get_driver_control(hw); 1631 1632 return (IXGBE_SUCCESS); 1633 } 1634 1635 /* 1636 * ixgbe_chip_stop - Stop the chipset hardware 1637 */ 1638 static void 1639 ixgbe_chip_stop(ixgbe_t *ixgbe) 1640 { 1641 struct ixgbe_hw *hw = &ixgbe->hw; 1642 int rv; 1643 1644 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1645 1646 /* 1647 * Stop interupt generation and disable Tx unit 1648 */ 1649 hw->adapter_stopped = false; 1650 (void) ixgbe_stop_adapter(hw); 1651 1652 /* 1653 * Reset the chipset 1654 */ 1655 (void) ixgbe_reset_hw(hw); 1656 1657 /* 1658 * Reset PHY 1659 */ 1660 (void) ixgbe_reset_phy(hw); 1661 1662 /* 1663 * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting 1664 * the PHY while doing so. Else, just power down the PHY. 1665 */ 1666 if (hw->phy.ops.enter_lplu != NULL) { 1667 hw->phy.reset_disable = true; 1668 rv = hw->phy.ops.enter_lplu(hw); 1669 if (rv != IXGBE_SUCCESS) 1670 ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv); 1671 hw->phy.reset_disable = false; 1672 } else { 1673 (void) ixgbe_set_phy_power(hw, false); 1674 } 1675 1676 /* 1677 * Turn off any present SFP Tx laser 1678 * Expected for health and safety reasons 1679 */ 1680 ixgbe_disable_tx_laser(hw); 1681 1682 /* 1683 * Tell firmware driver is no longer in control 1684 */ 1685 ixgbe_release_driver_control(hw); 1686 1687 } 1688 1689 /* 1690 * ixgbe_reset - Reset the chipset and re-start the driver. 1691 * 1692 * It involves stopping and re-starting the chipset, 1693 * and re-configuring the rx/tx rings. 1694 */ 1695 static int 1696 ixgbe_reset(ixgbe_t *ixgbe) 1697 { 1698 int i; 1699 1700 /* 1701 * Disable and stop the watchdog timer 1702 */ 1703 ixgbe_disable_watchdog_timer(ixgbe); 1704 1705 mutex_enter(&ixgbe->gen_lock); 1706 1707 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1708 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 1709 1710 ixgbe_stop(ixgbe, B_FALSE); 1711 1712 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1713 mutex_exit(&ixgbe->gen_lock); 1714 return (IXGBE_FAILURE); 1715 } 1716 1717 /* 1718 * After resetting, need to recheck the link status. 1719 */ 1720 ixgbe->link_check_complete = B_FALSE; 1721 ixgbe->link_check_hrtime = gethrtime() + 1722 (IXGBE_LINK_UP_TIME * 100000000ULL); 1723 1724 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED); 1725 1726 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) { 1727 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1728 mac_tx_ring_update(ixgbe->mac_hdl, 1729 ixgbe->tx_rings[i].ring_handle); 1730 } 1731 } 1732 1733 mutex_exit(&ixgbe->gen_lock); 1734 1735 /* 1736 * Enable and start the watchdog timer 1737 */ 1738 ixgbe_enable_watchdog_timer(ixgbe); 1739 1740 return (IXGBE_SUCCESS); 1741 } 1742 1743 /* 1744 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1745 */ 1746 static void 1747 ixgbe_tx_clean(ixgbe_t *ixgbe) 1748 { 1749 ixgbe_tx_ring_t *tx_ring; 1750 tx_control_block_t *tcb; 1751 link_list_t pending_list; 1752 uint32_t desc_num; 1753 int i, j; 1754 1755 LINK_LIST_INIT(&pending_list); 1756 1757 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1758 tx_ring = &ixgbe->tx_rings[i]; 1759 1760 mutex_enter(&tx_ring->recycle_lock); 1761 1762 /* 1763 * Clean the pending tx data - the pending packets in the 1764 * work_list that have no chances to be transmitted again. 1765 * 1766 * We must ensure the chipset is stopped or the link is down 1767 * before cleaning the transmit packets. 1768 */ 1769 desc_num = 0; 1770 for (j = 0; j < tx_ring->ring_size; j++) { 1771 tcb = tx_ring->work_list[j]; 1772 if (tcb != NULL) { 1773 desc_num += tcb->desc_num; 1774 1775 tx_ring->work_list[j] = NULL; 1776 1777 ixgbe_free_tcb(tcb); 1778 1779 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1780 } 1781 } 1782 1783 if (desc_num > 0) { 1784 atomic_add_32(&tx_ring->tbd_free, desc_num); 1785 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1786 1787 /* 1788 * Reset the head and tail pointers of the tbd ring; 1789 * Reset the writeback head if it's enable. 1790 */ 1791 tx_ring->tbd_head = 0; 1792 tx_ring->tbd_tail = 0; 1793 if (ixgbe->tx_head_wb_enable) 1794 *tx_ring->tbd_head_wb = 0; 1795 1796 IXGBE_WRITE_REG(&ixgbe->hw, 1797 IXGBE_TDH(tx_ring->index), 0); 1798 IXGBE_WRITE_REG(&ixgbe->hw, 1799 IXGBE_TDT(tx_ring->index), 0); 1800 } 1801 1802 mutex_exit(&tx_ring->recycle_lock); 1803 1804 /* 1805 * Add the tx control blocks in the pending list to 1806 * the free list. 1807 */ 1808 ixgbe_put_free_list(tx_ring, &pending_list); 1809 } 1810 } 1811 1812 /* 1813 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1814 * transmitted. 1815 */ 1816 static boolean_t 1817 ixgbe_tx_drain(ixgbe_t *ixgbe) 1818 { 1819 ixgbe_tx_ring_t *tx_ring; 1820 boolean_t done; 1821 int i, j; 1822 1823 /* 1824 * Wait for a specific time to allow pending tx packets 1825 * to be transmitted. 1826 * 1827 * Check the counter tbd_free to see if transmission is done. 1828 * No lock protection is needed here. 1829 * 1830 * Return B_TRUE if all pending packets have been transmitted; 1831 * Otherwise return B_FALSE; 1832 */ 1833 for (i = 0; i < TX_DRAIN_TIME; i++) { 1834 1835 done = B_TRUE; 1836 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1837 tx_ring = &ixgbe->tx_rings[j]; 1838 done = done && 1839 (tx_ring->tbd_free == tx_ring->ring_size); 1840 } 1841 1842 if (done) 1843 break; 1844 1845 msec_delay(1); 1846 } 1847 1848 return (done); 1849 } 1850 1851 /* 1852 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1853 */ 1854 static boolean_t 1855 ixgbe_rx_drain(ixgbe_t *ixgbe) 1856 { 1857 boolean_t done = B_TRUE; 1858 int i; 1859 1860 /* 1861 * Polling the rx free list to check if those rx buffers held by 1862 * the upper layer are released. 1863 * 1864 * Check the counter rcb_free to see if all pending buffers are 1865 * released. No lock protection is needed here. 1866 * 1867 * Return B_TRUE if all pending buffers have been released; 1868 * Otherwise return B_FALSE; 1869 */ 1870 for (i = 0; i < RX_DRAIN_TIME; i++) { 1871 done = (ixgbe->rcb_pending == 0); 1872 1873 if (done) 1874 break; 1875 1876 msec_delay(1); 1877 } 1878 1879 return (done); 1880 } 1881 1882 /* 1883 * ixgbe_start - Start the driver/chipset. 1884 */ 1885 int 1886 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer) 1887 { 1888 struct ixgbe_hw *hw = &ixgbe->hw; 1889 int i; 1890 1891 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1892 1893 if (alloc_buffer) { 1894 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) { 1895 ixgbe_error(ixgbe, 1896 "Failed to allocate software receive rings"); 1897 return (IXGBE_FAILURE); 1898 } 1899 1900 /* Allocate buffers for all the rx/tx rings */ 1901 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) { 1902 ixgbe_error(ixgbe, "Failed to allocate DMA resource"); 1903 return (IXGBE_FAILURE); 1904 } 1905 1906 ixgbe->tx_ring_init = B_TRUE; 1907 } else { 1908 ixgbe->tx_ring_init = B_FALSE; 1909 } 1910 1911 for (i = 0; i < ixgbe->num_rx_rings; i++) 1912 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1913 for (i = 0; i < ixgbe->num_tx_rings; i++) 1914 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1915 1916 /* 1917 * Start the chipset hardware 1918 */ 1919 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1920 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1921 goto start_failure; 1922 } 1923 1924 /* 1925 * Configure link now for X550 1926 * 1927 * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the 1928 * resting state of the adapter at a 1Gb FDX speed. Prior to the X550, 1929 * the resting state of the link would be the maximum speed that 1930 * autonegotiation will allow (usually 10Gb, infrastructure allowing) 1931 * so we never bothered with explicitly setting the link to 10Gb as it 1932 * would already be at that state on driver attach. With X550, we must 1933 * trigger a re-negotiation of the link in order to switch from a LPLU 1934 * 1Gb link to 10Gb (cable and link partner permitting.) 1935 */ 1936 if (hw->mac.type == ixgbe_mac_X550 || 1937 hw->mac.type == ixgbe_mac_X550EM_a || 1938 hw->mac.type == ixgbe_mac_X550EM_x) { 1939 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE); 1940 ixgbe_get_hw_state(ixgbe); 1941 } 1942 1943 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1944 goto start_failure; 1945 } 1946 1947 /* 1948 * Setup the rx/tx rings 1949 */ 1950 if (ixgbe_setup_rings(ixgbe) != IXGBE_SUCCESS) 1951 goto start_failure; 1952 1953 /* 1954 * ixgbe_start() will be called when resetting, however if reset 1955 * happens, we need to clear the ERROR, STALL and OVERTEMP flags 1956 * before enabling the interrupts. 1957 */ 1958 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR 1959 | IXGBE_STALL| IXGBE_OVERTEMP)); 1960 1961 /* 1962 * Enable adapter interrupts 1963 * The interrupts must be enabled after the driver state is START 1964 */ 1965 ixgbe_enable_adapter_interrupts(ixgbe); 1966 1967 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1968 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1969 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1970 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1971 1972 return (IXGBE_SUCCESS); 1973 1974 start_failure: 1975 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1976 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1977 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1978 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1979 1980 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1981 1982 return (IXGBE_FAILURE); 1983 } 1984 1985 /* 1986 * ixgbe_stop - Stop the driver/chipset. 1987 */ 1988 void 1989 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer) 1990 { 1991 int i; 1992 1993 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1994 1995 /* 1996 * Disable the adapter interrupts 1997 */ 1998 ixgbe_disable_adapter_interrupts(ixgbe); 1999 2000 /* 2001 * Drain the pending tx packets 2002 */ 2003 (void) ixgbe_tx_drain(ixgbe); 2004 2005 for (i = 0; i < ixgbe->num_rx_rings; i++) 2006 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 2007 for (i = 0; i < ixgbe->num_tx_rings; i++) 2008 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 2009 2010 /* 2011 * Stop the chipset hardware 2012 */ 2013 ixgbe_chip_stop(ixgbe); 2014 2015 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2016 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 2017 } 2018 2019 /* 2020 * Clean the pending tx data/resources 2021 */ 2022 ixgbe_tx_clean(ixgbe); 2023 2024 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 2025 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 2026 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 2027 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 2028 2029 if (ixgbe->link_state == LINK_STATE_UP) { 2030 ixgbe->link_state = LINK_STATE_UNKNOWN; 2031 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 2032 } 2033 2034 if (free_buffer) { 2035 /* 2036 * Release the DMA/memory resources of rx/tx rings 2037 */ 2038 ixgbe_free_dma(ixgbe); 2039 ixgbe_free_rx_data(ixgbe); 2040 } 2041 } 2042 2043 /* 2044 * ixgbe_cbfunc - Driver interface for generic DDI callbacks 2045 */ 2046 /* ARGSUSED */ 2047 static int 2048 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg, 2049 void *arg1, void *arg2) 2050 { 2051 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 2052 2053 switch (cbaction) { 2054 /* IRM callback */ 2055 int count; 2056 case DDI_CB_INTR_ADD: 2057 case DDI_CB_INTR_REMOVE: 2058 count = (int)(uintptr_t)cbarg; 2059 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX); 2060 DTRACE_PROBE2(ixgbe__irm__callback, int, count, 2061 int, ixgbe->intr_cnt); 2062 if (ixgbe_intr_adjust(ixgbe, cbaction, count) != 2063 DDI_SUCCESS) { 2064 ixgbe_error(ixgbe, 2065 "IRM CB: Failed to adjust interrupts"); 2066 goto cb_fail; 2067 } 2068 break; 2069 default: 2070 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported", 2071 cbaction); 2072 return (DDI_ENOTSUP); 2073 } 2074 return (DDI_SUCCESS); 2075 cb_fail: 2076 return (DDI_FAILURE); 2077 } 2078 2079 /* 2080 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request. 2081 */ 2082 static int 2083 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count) 2084 { 2085 int i, rc, actual; 2086 2087 if (count == 0) 2088 return (DDI_SUCCESS); 2089 2090 if ((cbaction == DDI_CB_INTR_ADD && 2091 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) || 2092 (cbaction == DDI_CB_INTR_REMOVE && 2093 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)) 2094 return (DDI_FAILURE); 2095 2096 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 2097 return (DDI_FAILURE); 2098 } 2099 2100 for (i = 0; i < ixgbe->num_rx_rings; i++) 2101 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL); 2102 for (i = 0; i < ixgbe->num_tx_rings; i++) 2103 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL); 2104 2105 mutex_enter(&ixgbe->gen_lock); 2106 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 2107 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST; 2108 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 2109 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 2110 2111 ixgbe_stop(ixgbe, B_FALSE); 2112 /* 2113 * Disable interrupts 2114 */ 2115 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 2116 rc = ixgbe_disable_intrs(ixgbe); 2117 ASSERT(rc == IXGBE_SUCCESS); 2118 } 2119 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR; 2120 2121 /* 2122 * Remove interrupt handlers 2123 */ 2124 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 2125 ixgbe_rem_intr_handlers(ixgbe); 2126 } 2127 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR; 2128 2129 /* 2130 * Clear vect_map 2131 */ 2132 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 2133 switch (cbaction) { 2134 case DDI_CB_INTR_ADD: 2135 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable, 2136 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual, 2137 DDI_INTR_ALLOC_NORMAL); 2138 if (rc != DDI_SUCCESS || actual != count) { 2139 ixgbe_log(ixgbe, "Adjust interrupts failed." 2140 "return: %d, irm cb size: %d, actual: %d", 2141 rc, count, actual); 2142 goto intr_adjust_fail; 2143 } 2144 ixgbe->intr_cnt += count; 2145 break; 2146 2147 case DDI_CB_INTR_REMOVE: 2148 for (i = ixgbe->intr_cnt - count; 2149 i < ixgbe->intr_cnt; i ++) { 2150 rc = ddi_intr_free(ixgbe->htable[i]); 2151 ixgbe->htable[i] = NULL; 2152 if (rc != DDI_SUCCESS) { 2153 ixgbe_log(ixgbe, "Adjust interrupts failed." 2154 "return: %d, irm cb size: %d, actual: %d", 2155 rc, count, actual); 2156 goto intr_adjust_fail; 2157 } 2158 } 2159 ixgbe->intr_cnt -= count; 2160 break; 2161 } 2162 2163 /* 2164 * Get priority for first vector, assume remaining are all the same 2165 */ 2166 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 2167 if (rc != DDI_SUCCESS) { 2168 ixgbe_log(ixgbe, 2169 "Get interrupt priority failed: %d", rc); 2170 goto intr_adjust_fail; 2171 } 2172 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 2173 if (rc != DDI_SUCCESS) { 2174 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc); 2175 goto intr_adjust_fail; 2176 } 2177 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 2178 2179 /* 2180 * Map rings to interrupt vectors 2181 */ 2182 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 2183 ixgbe_error(ixgbe, 2184 "IRM CB: Failed to map interrupts to vectors"); 2185 goto intr_adjust_fail; 2186 } 2187 2188 /* 2189 * Add interrupt handlers 2190 */ 2191 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 2192 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers"); 2193 goto intr_adjust_fail; 2194 } 2195 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 2196 2197 /* 2198 * Now that mutex locks are initialized, and the chip is also 2199 * initialized, enable interrupts. 2200 */ 2201 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 2202 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts"); 2203 goto intr_adjust_fail; 2204 } 2205 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 2206 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 2207 ixgbe_error(ixgbe, "IRM CB: Failed to start"); 2208 goto intr_adjust_fail; 2209 } 2210 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST; 2211 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 2212 ixgbe->ixgbe_state |= IXGBE_STARTED; 2213 mutex_exit(&ixgbe->gen_lock); 2214 2215 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2216 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, 2217 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]); 2218 } 2219 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2220 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, 2221 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]); 2222 } 2223 2224 /* Wakeup all Tx rings */ 2225 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2226 mac_tx_ring_update(ixgbe->mac_hdl, 2227 ixgbe->tx_rings[i].ring_handle); 2228 } 2229 2230 IXGBE_DEBUGLOG_3(ixgbe, 2231 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).", 2232 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max); 2233 return (DDI_SUCCESS); 2234 2235 intr_adjust_fail: 2236 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 2237 mutex_exit(&ixgbe->gen_lock); 2238 return (DDI_FAILURE); 2239 } 2240 2241 /* 2242 * ixgbe_intr_cb_register - Register interrupt callback function. 2243 */ 2244 static int 2245 ixgbe_intr_cb_register(ixgbe_t *ixgbe) 2246 { 2247 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc, 2248 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) { 2249 return (IXGBE_FAILURE); 2250 } 2251 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered."); 2252 return (IXGBE_SUCCESS); 2253 } 2254 2255 /* 2256 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 2257 */ 2258 static int 2259 ixgbe_alloc_rings(ixgbe_t *ixgbe) 2260 { 2261 /* 2262 * Allocate memory space for rx rings 2263 */ 2264 ixgbe->rx_rings = kmem_zalloc( 2265 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 2266 KM_NOSLEEP); 2267 2268 if (ixgbe->rx_rings == NULL) { 2269 return (IXGBE_FAILURE); 2270 } 2271 2272 /* 2273 * Allocate memory space for tx rings 2274 */ 2275 ixgbe->tx_rings = kmem_zalloc( 2276 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 2277 KM_NOSLEEP); 2278 2279 if (ixgbe->tx_rings == NULL) { 2280 kmem_free(ixgbe->rx_rings, 2281 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2282 ixgbe->rx_rings = NULL; 2283 return (IXGBE_FAILURE); 2284 } 2285 2286 /* 2287 * Allocate memory space for rx ring groups 2288 */ 2289 ixgbe->rx_groups = kmem_zalloc( 2290 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups, 2291 KM_NOSLEEP); 2292 2293 if (ixgbe->rx_groups == NULL) { 2294 kmem_free(ixgbe->rx_rings, 2295 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2296 kmem_free(ixgbe->tx_rings, 2297 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2298 ixgbe->rx_rings = NULL; 2299 ixgbe->tx_rings = NULL; 2300 return (IXGBE_FAILURE); 2301 } 2302 2303 return (IXGBE_SUCCESS); 2304 } 2305 2306 /* 2307 * ixgbe_free_rings - Free the memory space of rx/tx rings. 2308 */ 2309 static void 2310 ixgbe_free_rings(ixgbe_t *ixgbe) 2311 { 2312 if (ixgbe->rx_rings != NULL) { 2313 kmem_free(ixgbe->rx_rings, 2314 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2315 ixgbe->rx_rings = NULL; 2316 } 2317 2318 if (ixgbe->tx_rings != NULL) { 2319 kmem_free(ixgbe->tx_rings, 2320 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2321 ixgbe->tx_rings = NULL; 2322 } 2323 2324 for (uint_t i = 0; i < ixgbe->num_rx_groups; i++) { 2325 ixgbe_vlan_t *vlp; 2326 ixgbe_rx_group_t *rx_group = &ixgbe->rx_groups[i]; 2327 2328 while ((vlp = list_remove_head(&rx_group->vlans)) != NULL) 2329 kmem_free(vlp, sizeof (ixgbe_vlan_t)); 2330 2331 list_destroy(&rx_group->vlans); 2332 } 2333 2334 if (ixgbe->rx_groups != NULL) { 2335 kmem_free(ixgbe->rx_groups, 2336 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); 2337 ixgbe->rx_groups = NULL; 2338 } 2339 } 2340 2341 static int 2342 ixgbe_alloc_rx_data(ixgbe_t *ixgbe) 2343 { 2344 ixgbe_rx_ring_t *rx_ring; 2345 int i; 2346 2347 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2348 rx_ring = &ixgbe->rx_rings[i]; 2349 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS) 2350 goto alloc_rx_rings_failure; 2351 } 2352 return (IXGBE_SUCCESS); 2353 2354 alloc_rx_rings_failure: 2355 ixgbe_free_rx_data(ixgbe); 2356 return (IXGBE_FAILURE); 2357 } 2358 2359 static void 2360 ixgbe_free_rx_data(ixgbe_t *ixgbe) 2361 { 2362 ixgbe_rx_ring_t *rx_ring; 2363 ixgbe_rx_data_t *rx_data; 2364 int i; 2365 2366 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2367 rx_ring = &ixgbe->rx_rings[i]; 2368 2369 mutex_enter(&ixgbe->rx_pending_lock); 2370 rx_data = rx_ring->rx_data; 2371 2372 if (rx_data != NULL) { 2373 rx_data->flag |= IXGBE_RX_STOPPED; 2374 2375 if (rx_data->rcb_pending == 0) { 2376 ixgbe_free_rx_ring_data(rx_data); 2377 rx_ring->rx_data = NULL; 2378 } 2379 } 2380 2381 mutex_exit(&ixgbe->rx_pending_lock); 2382 } 2383 } 2384 2385 /* 2386 * ixgbe_setup_rings - Setup rx/tx rings. 2387 */ 2388 static int 2389 ixgbe_setup_rings(ixgbe_t *ixgbe) 2390 { 2391 /* 2392 * Setup the rx/tx rings, including the following: 2393 * 2394 * 1. Setup the descriptor ring and the control block buffers; 2395 * 2. Initialize necessary registers for receive/transmit; 2396 * 3. Initialize software pointers/parameters for receive/transmit; 2397 */ 2398 if (ixgbe_setup_rx(ixgbe) != IXGBE_SUCCESS) 2399 return (IXGBE_FAILURE); 2400 2401 ixgbe_setup_tx(ixgbe); 2402 2403 return (IXGBE_SUCCESS); 2404 } 2405 2406 static void 2407 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 2408 { 2409 ixgbe_t *ixgbe = rx_ring->ixgbe; 2410 ixgbe_rx_data_t *rx_data = rx_ring->rx_data; 2411 struct ixgbe_hw *hw = &ixgbe->hw; 2412 rx_control_block_t *rcb; 2413 union ixgbe_adv_rx_desc *rbd; 2414 uint32_t size; 2415 uint32_t buf_low; 2416 uint32_t buf_high; 2417 uint32_t reg_val; 2418 int i; 2419 2420 ASSERT(mutex_owned(&rx_ring->rx_lock)); 2421 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2422 2423 for (i = 0; i < ixgbe->rx_ring_size; i++) { 2424 rcb = rx_data->work_list[i]; 2425 rbd = &rx_data->rbd_ring[i]; 2426 2427 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 2428 rbd->read.hdr_addr = 0; 2429 } 2430 2431 /* 2432 * Initialize the length register 2433 */ 2434 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc); 2435 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size); 2436 2437 /* 2438 * Initialize the base address registers 2439 */ 2440 buf_low = (uint32_t)rx_data->rbd_area.dma_address; 2441 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); 2442 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high); 2443 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low); 2444 2445 /* 2446 * Setup head & tail pointers 2447 */ 2448 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index), 2449 rx_data->ring_size - 1); 2450 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0); 2451 2452 rx_data->rbd_next = 0; 2453 rx_data->lro_first = 0; 2454 2455 /* 2456 * Setup the Receive Descriptor Control Register (RXDCTL) 2457 * PTHRESH=32 descriptors (half the internal cache) 2458 * HTHRESH=0 descriptors (to minimize latency on fetch) 2459 * WTHRESH defaults to 1 (writeback each descriptor) 2460 */ 2461 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index)); 2462 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 2463 2464 /* Not a valid value for 82599, X540 or X550 */ 2465 if (hw->mac.type == ixgbe_mac_82598EB) { 2466 reg_val |= 0x0020; /* pthresh */ 2467 } 2468 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); 2469 2470 if (hw->mac.type == ixgbe_mac_82599EB || 2471 hw->mac.type == ixgbe_mac_X540 || 2472 hw->mac.type == ixgbe_mac_X550 || 2473 hw->mac.type == ixgbe_mac_X550EM_x || 2474 hw->mac.type == ixgbe_mac_X550EM_a) { 2475 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2476 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); 2477 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2478 } 2479 2480 /* 2481 * Setup the Split and Replication Receive Control Register. 2482 * Set the rx buffer size and the advanced descriptor type. 2483 */ 2484 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 2485 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2486 reg_val |= IXGBE_SRRCTL_DROP_EN; 2487 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); 2488 } 2489 2490 static int 2491 ixgbe_setup_rx(ixgbe_t *ixgbe) 2492 { 2493 ixgbe_rx_ring_t *rx_ring; 2494 struct ixgbe_hw *hw = &ixgbe->hw; 2495 uint32_t reg_val; 2496 uint32_t i; 2497 uint32_t psrtype_rss_bit; 2498 2499 /* 2500 * Ensure that Rx is disabled while setting up 2501 * the Rx unit and Rx descriptor ring(s) 2502 */ 2503 ixgbe_disable_rx(hw); 2504 2505 /* PSRTYPE must be configured for 82599 */ 2506 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ && 2507 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) { 2508 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2509 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2510 reg_val |= IXGBE_PSRTYPE_L2HDR; 2511 reg_val |= 0x80000000; 2512 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val); 2513 } else { 2514 if (ixgbe->num_rx_groups > 32) { 2515 psrtype_rss_bit = 0x20000000; 2516 } else { 2517 psrtype_rss_bit = 0x40000000; 2518 } 2519 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) { 2520 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2521 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2522 reg_val |= IXGBE_PSRTYPE_L2HDR; 2523 reg_val |= psrtype_rss_bit; 2524 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val); 2525 } 2526 } 2527 2528 /* 2529 * Set filter control in FCTRL to determine types of packets are passed 2530 * up to the driver. 2531 * - Pass broadcast packets. 2532 * - Do not pass flow control pause frames (82598-specific) 2533 */ 2534 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2535 reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */ 2536 if (hw->mac.type == ixgbe_mac_82598EB) { 2537 reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */ 2538 } 2539 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 2540 2541 /* 2542 * Hardware checksum settings 2543 */ 2544 if (ixgbe->rx_hcksum_enable) { 2545 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2546 reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 2547 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 2548 } 2549 2550 /* 2551 * Setup VMDq and RSS for multiple receive queues 2552 */ 2553 switch (ixgbe->classify_mode) { 2554 case IXGBE_CLASSIFY_RSS: 2555 /* 2556 * One group, only RSS is needed when more than 2557 * one ring enabled. 2558 */ 2559 ixgbe_setup_rss(ixgbe); 2560 break; 2561 2562 case IXGBE_CLASSIFY_VMDQ: 2563 /* 2564 * Multiple groups, each group has one ring, 2565 * only VMDq is needed. 2566 */ 2567 ixgbe_setup_vmdq(ixgbe); 2568 break; 2569 2570 case IXGBE_CLASSIFY_VMDQ_RSS: 2571 /* 2572 * Multiple groups and multiple rings, both 2573 * VMDq and RSS are needed. 2574 */ 2575 ixgbe_setup_vmdq_rss(ixgbe); 2576 break; 2577 2578 default: 2579 break; 2580 } 2581 2582 /* 2583 * Initialize VLAN SW and HW state if VLAN filtering is 2584 * enabled. 2585 */ 2586 if (ixgbe->vlft_enabled) { 2587 if (ixgbe_init_vlan(ixgbe) != IXGBE_SUCCESS) 2588 return (IXGBE_FAILURE); 2589 } 2590 2591 /* 2592 * Enable the receive unit. This must be done after filter 2593 * control is set in FCTRL. On 82598, we disable the descriptor monitor. 2594 * 82598 is the only adapter which defines this RXCTRL option. 2595 */ 2596 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2597 if (hw->mac.type == ixgbe_mac_82598EB) 2598 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */ 2599 reg_val |= IXGBE_RXCTRL_RXEN; 2600 (void) ixgbe_enable_rx_dma(hw, reg_val); 2601 2602 /* 2603 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 2604 */ 2605 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2606 rx_ring = &ixgbe->rx_rings[i]; 2607 ixgbe_setup_rx_ring(rx_ring); 2608 } 2609 2610 /* 2611 * The 82598 controller gives us the RNBC (Receive No Buffer 2612 * Count) register to determine the number of frames dropped 2613 * due to no available descriptors on the destination queue. 2614 * However, this register was removed starting with 82599 and 2615 * it was replaced with the RQSMR/QPRDC registers. The nice 2616 * thing about the new registers is that they allow you to map 2617 * groups of queues to specific stat registers. The bad thing 2618 * is there are only 16 slots in the stat registers, so this 2619 * won't work when we have 32 Rx groups. Instead, we map all 2620 * queues to the zero slot of the stat registers, giving us a 2621 * global counter at QPRDC[0] (with the equivalent semantics 2622 * of RNBC). Perhaps future controllers will have more slots 2623 * and we can implement per-group counters. 2624 */ 2625 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2626 uint32_t index = ixgbe->rx_rings[i].hw_index; 2627 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), 0); 2628 } 2629 2630 /* 2631 * The Max Frame Size in MHADD/MAXFRS will be internally increased 2632 * by four bytes if the packet has a VLAN field, so includes MTU, 2633 * ethernet header and frame check sequence. 2634 * Register is MAXFRS in 82599. 2635 */ 2636 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD); 2637 reg_val &= ~IXGBE_MHADD_MFS_MASK; 2638 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header) 2639 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 2640 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 2641 2642 /* 2643 * Setup Jumbo Frame enable bit 2644 */ 2645 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2646 if (ixgbe->default_mtu > ETHERMTU) 2647 reg_val |= IXGBE_HLREG0_JUMBOEN; 2648 else 2649 reg_val &= ~IXGBE_HLREG0_JUMBOEN; 2650 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2651 2652 /* 2653 * Setup RSC for multiple receive queues. 2654 */ 2655 if (ixgbe->lro_enable) { 2656 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2657 /* 2658 * Make sure rx_buf_size * MAXDESC not greater 2659 * than 65535. 2660 * Intel recommends 4 for MAXDESC field value. 2661 */ 2662 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i)); 2663 reg_val |= IXGBE_RSCCTL_RSCEN; 2664 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k) 2665 reg_val |= IXGBE_RSCCTL_MAXDESC_1; 2666 else 2667 reg_val |= IXGBE_RSCCTL_MAXDESC_4; 2668 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val); 2669 } 2670 2671 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU); 2672 reg_val |= IXGBE_RSCDBU_RSCACKDIS; 2673 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val); 2674 2675 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2676 reg_val |= IXGBE_RDRXCTL_RSCACKC; 2677 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX; 2678 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2679 2680 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2681 } 2682 2683 return (IXGBE_SUCCESS); 2684 } 2685 2686 static void 2687 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 2688 { 2689 ixgbe_t *ixgbe = tx_ring->ixgbe; 2690 struct ixgbe_hw *hw = &ixgbe->hw; 2691 uint32_t size; 2692 uint32_t buf_low; 2693 uint32_t buf_high; 2694 uint32_t reg_val; 2695 2696 ASSERT(mutex_owned(&tx_ring->tx_lock)); 2697 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2698 2699 /* 2700 * Initialize the length register 2701 */ 2702 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 2703 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 2704 2705 /* 2706 * Initialize the base address registers 2707 */ 2708 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 2709 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 2710 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 2711 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 2712 2713 /* 2714 * Setup head & tail pointers 2715 */ 2716 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 2717 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 2718 2719 /* 2720 * Setup head write-back 2721 */ 2722 if (ixgbe->tx_head_wb_enable) { 2723 /* 2724 * The memory of the head write-back is allocated using 2725 * the extra tbd beyond the tail of the tbd ring. 2726 */ 2727 tx_ring->tbd_head_wb = (uint32_t *) 2728 ((uintptr_t)tx_ring->tbd_area.address + size); 2729 *tx_ring->tbd_head_wb = 0; 2730 2731 buf_low = (uint32_t) 2732 (tx_ring->tbd_area.dma_address + size); 2733 buf_high = (uint32_t) 2734 ((tx_ring->tbd_area.dma_address + size) >> 32); 2735 2736 /* Set the head write-back enable bit */ 2737 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 2738 2739 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 2740 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 2741 2742 /* 2743 * Turn off relaxed ordering for head write back or it will 2744 * cause problems with the tx recycling 2745 */ 2746 2747 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ? 2748 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) : 2749 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index)); 2750 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 2751 if (hw->mac.type == ixgbe_mac_82598EB) { 2752 IXGBE_WRITE_REG(hw, 2753 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 2754 } else { 2755 IXGBE_WRITE_REG(hw, 2756 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val); 2757 } 2758 } else { 2759 tx_ring->tbd_head_wb = NULL; 2760 } 2761 2762 tx_ring->tbd_head = 0; 2763 tx_ring->tbd_tail = 0; 2764 tx_ring->tbd_free = tx_ring->ring_size; 2765 2766 if (ixgbe->tx_ring_init == B_TRUE) { 2767 tx_ring->tcb_head = 0; 2768 tx_ring->tcb_tail = 0; 2769 tx_ring->tcb_free = tx_ring->free_list_size; 2770 } 2771 2772 /* 2773 * Initialize the s/w context structure 2774 */ 2775 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); 2776 } 2777 2778 static void 2779 ixgbe_setup_tx(ixgbe_t *ixgbe) 2780 { 2781 struct ixgbe_hw *hw = &ixgbe->hw; 2782 ixgbe_tx_ring_t *tx_ring; 2783 uint32_t reg_val; 2784 int i; 2785 2786 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2787 tx_ring = &ixgbe->tx_rings[i]; 2788 ixgbe_setup_tx_ring(tx_ring); 2789 } 2790 2791 /* 2792 * Setup the per-ring statistics mapping. We map all Tx queues 2793 * to slot 0 to stay consistent with Rx. 2794 */ 2795 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2796 switch (hw->mac.type) { 2797 case ixgbe_mac_82598EB: 2798 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 0); 2799 break; 2800 2801 default: 2802 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 0); 2803 break; 2804 } 2805 } 2806 2807 /* 2808 * Enable CRC appending and TX padding (for short tx frames) 2809 */ 2810 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2811 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 2812 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2813 2814 /* 2815 * enable DMA for 82599, X540 and X550 parts 2816 */ 2817 if (hw->mac.type == ixgbe_mac_82599EB || 2818 hw->mac.type == ixgbe_mac_X540 || 2819 hw->mac.type == ixgbe_mac_X550 || 2820 hw->mac.type == ixgbe_mac_X550EM_x || 2821 hw->mac.type == ixgbe_mac_X550EM_a) { 2822 /* DMATXCTL.TE must be set after all Tx config is complete */ 2823 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2824 reg_val |= IXGBE_DMATXCTL_TE; 2825 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); 2826 2827 /* Disable arbiter to set MTQC */ 2828 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2829 reg_val |= IXGBE_RTTDCS_ARBDIS; 2830 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2831 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2832 reg_val &= ~IXGBE_RTTDCS_ARBDIS; 2833 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2834 } 2835 2836 /* 2837 * Enabling tx queues .. 2838 * For 82599 must be done after DMATXCTL.TE is set 2839 */ 2840 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2841 tx_ring = &ixgbe->tx_rings[i]; 2842 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index)); 2843 reg_val |= IXGBE_TXDCTL_ENABLE; 2844 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 2845 } 2846 } 2847 2848 /* 2849 * ixgbe_setup_rss - Setup receive-side scaling feature. 2850 */ 2851 static void 2852 ixgbe_setup_rss(ixgbe_t *ixgbe) 2853 { 2854 struct ixgbe_hw *hw = &ixgbe->hw; 2855 uint32_t mrqc; 2856 2857 /* 2858 * Initialize RETA/ERETA table 2859 */ 2860 ixgbe_setup_rss_table(ixgbe); 2861 2862 /* 2863 * Enable RSS & perform hash on these packet types 2864 */ 2865 mrqc = IXGBE_MRQC_RSSEN | 2866 IXGBE_MRQC_RSS_FIELD_IPV4 | 2867 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2868 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2869 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2870 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2871 IXGBE_MRQC_RSS_FIELD_IPV6 | 2872 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2873 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2874 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2875 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2876 } 2877 2878 /* 2879 * ixgbe_setup_vmdq - Setup MAC classification feature 2880 */ 2881 static void 2882 ixgbe_setup_vmdq(ixgbe_t *ixgbe) 2883 { 2884 struct ixgbe_hw *hw = &ixgbe->hw; 2885 uint32_t vmdctl, i, vtctl, vlnctl; 2886 2887 /* 2888 * Setup the VMDq Control register, enable VMDq based on 2889 * packet destination MAC address: 2890 */ 2891 switch (hw->mac.type) { 2892 case ixgbe_mac_82598EB: 2893 /* 2894 * VMDq Enable = 1; 2895 * VMDq Filter = 0; MAC filtering 2896 * Default VMDq output index = 0; 2897 */ 2898 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2899 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2900 break; 2901 2902 case ixgbe_mac_82599EB: 2903 case ixgbe_mac_X540: 2904 case ixgbe_mac_X550: 2905 case ixgbe_mac_X550EM_x: 2906 case ixgbe_mac_X550EM_a: 2907 /* 2908 * Enable VMDq-only. 2909 */ 2910 vmdctl = IXGBE_MRQC_VMDQEN; 2911 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl); 2912 2913 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2914 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2915 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2916 } 2917 2918 /* 2919 * Enable Virtualization and Replication. 2920 */ 2921 vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 2922 ixgbe->rx_def_group = vtctl & IXGBE_VT_CTL_POOL_MASK; 2923 vtctl |= IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2924 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2925 2926 /* 2927 * Enable VLAN filtering and switching (VFTA and VLVF). 2928 */ 2929 vlnctl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2930 vlnctl |= IXGBE_VLNCTRL_VFE; 2931 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctl); 2932 ixgbe->vlft_enabled = B_TRUE; 2933 2934 /* 2935 * Enable receiving packets to all VFs 2936 */ 2937 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2938 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2939 break; 2940 2941 default: 2942 break; 2943 } 2944 } 2945 2946 /* 2947 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature. 2948 */ 2949 static void 2950 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) 2951 { 2952 struct ixgbe_hw *hw = &ixgbe->hw; 2953 uint32_t i, mrqc; 2954 uint32_t vtctl, vmdctl, vlnctl; 2955 2956 /* 2957 * Initialize RETA/ERETA table 2958 */ 2959 ixgbe_setup_rss_table(ixgbe); 2960 2961 /* 2962 * Enable and setup RSS and VMDq 2963 */ 2964 switch (hw->mac.type) { 2965 case ixgbe_mac_82598EB: 2966 /* 2967 * Enable RSS & Setup RSS Hash functions 2968 */ 2969 mrqc = IXGBE_MRQC_RSSEN | 2970 IXGBE_MRQC_RSS_FIELD_IPV4 | 2971 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2972 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2973 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2974 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2975 IXGBE_MRQC_RSS_FIELD_IPV6 | 2976 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2977 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2978 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2979 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2980 2981 /* 2982 * Enable and Setup VMDq 2983 * VMDq Filter = 0; MAC filtering 2984 * Default VMDq output index = 0; 2985 */ 2986 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2987 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2988 break; 2989 2990 case ixgbe_mac_82599EB: 2991 case ixgbe_mac_X540: 2992 case ixgbe_mac_X550: 2993 case ixgbe_mac_X550EM_x: 2994 case ixgbe_mac_X550EM_a: 2995 /* 2996 * Enable RSS & Setup RSS Hash functions 2997 */ 2998 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 | 2999 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 3000 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 3001 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 3002 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 3003 IXGBE_MRQC_RSS_FIELD_IPV6 | 3004 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 3005 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 3006 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 3007 3008 /* 3009 * Enable VMDq+RSS. 3010 */ 3011 if (ixgbe->num_rx_groups > 32) { 3012 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN; 3013 } else { 3014 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN; 3015 } 3016 3017 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 3018 3019 for (i = 0; i < hw->mac.num_rar_entries; i++) { 3020 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 3021 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 3022 } 3023 break; 3024 3025 default: 3026 break; 3027 3028 } 3029 3030 if (hw->mac.type == ixgbe_mac_82599EB || 3031 hw->mac.type == ixgbe_mac_X540 || 3032 hw->mac.type == ixgbe_mac_X550 || 3033 hw->mac.type == ixgbe_mac_X550EM_x || 3034 hw->mac.type == ixgbe_mac_X550EM_a) { 3035 /* 3036 * Enable Virtualization and Replication. 3037 */ 3038 vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 3039 ixgbe->rx_def_group = vtctl & IXGBE_VT_CTL_POOL_MASK; 3040 vtctl |= IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 3041 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 3042 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 3043 3044 /* 3045 * Enable VLAN filtering and switching (VFTA and VLVF). 3046 */ 3047 vlnctl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3048 vlnctl |= IXGBE_VLNCTRL_VFE; 3049 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctl); 3050 ixgbe->vlft_enabled = B_TRUE; 3051 3052 /* 3053 * Enable receiving packets to all VFs 3054 */ 3055 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 3056 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 3057 } 3058 } 3059 3060 /* 3061 * ixgbe_setup_rss_table - Setup RSS table 3062 */ 3063 static void 3064 ixgbe_setup_rss_table(ixgbe_t *ixgbe) 3065 { 3066 struct ixgbe_hw *hw = &ixgbe->hw; 3067 uint32_t i, j; 3068 uint32_t random; 3069 uint32_t reta; 3070 uint32_t ring_per_group; 3071 uint32_t ring; 3072 uint32_t table_size; 3073 uint32_t index_mult; 3074 uint32_t rxcsum; 3075 3076 /* 3077 * Set multiplier for RETA setup and table size based on MAC type. 3078 * RETA table sizes vary by model: 3079 * 3080 * 82598, 82599, X540: 128 table entries. 3081 * X550: 512 table entries. 3082 */ 3083 index_mult = 0x1; 3084 table_size = 128; 3085 switch (ixgbe->hw.mac.type) { 3086 case ixgbe_mac_82598EB: 3087 index_mult = 0x11; 3088 break; 3089 case ixgbe_mac_X550: 3090 case ixgbe_mac_X550EM_x: 3091 case ixgbe_mac_X550EM_a: 3092 table_size = 512; 3093 break; 3094 default: 3095 break; 3096 } 3097 3098 /* 3099 * Fill out RSS redirection table. The configuation of the indices is 3100 * hardware-dependent. 3101 * 3102 * 82598: 8 bits wide containing two 4 bit RSS indices 3103 * 82599, X540: 8 bits wide containing one 4 bit RSS index 3104 * X550: 8 bits wide containing one 6 bit RSS index 3105 */ 3106 reta = 0; 3107 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3108 3109 for (i = 0, j = 0; i < table_size; i++, j++) { 3110 if (j == ring_per_group) j = 0; 3111 3112 /* 3113 * The low 8 bits are for hash value (n+0); 3114 * The next 8 bits are for hash value (n+1), etc. 3115 */ 3116 ring = (j * index_mult); 3117 reta = reta >> 8; 3118 reta = reta | (((uint32_t)ring) << 24); 3119 3120 if ((i & 3) == 3) { 3121 /* 3122 * The first 128 table entries are programmed into the 3123 * RETA register, with any beyond that (eg; on X550) 3124 * into ERETA. 3125 */ 3126 if (i < 128) 3127 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3128 else 3129 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 3130 reta); 3131 reta = 0; 3132 } 3133 } 3134 3135 /* 3136 * Fill out hash function seeds with a random constant 3137 */ 3138 for (i = 0; i < 10; i++) { 3139 (void) random_get_pseudo_bytes((uint8_t *)&random, 3140 sizeof (uint32_t)); 3141 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 3142 } 3143 3144 /* 3145 * Disable Packet Checksum to enable RSS for multiple receive queues. 3146 * It is an adapter hardware limitation that Packet Checksum is 3147 * mutually exclusive with RSS. 3148 */ 3149 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3150 rxcsum |= IXGBE_RXCSUM_PCSD; 3151 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 3152 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3153 } 3154 3155 /* 3156 * ixgbe_init_unicst - Initialize the unicast addresses. 3157 */ 3158 static void 3159 ixgbe_init_unicst(ixgbe_t *ixgbe) 3160 { 3161 struct ixgbe_hw *hw = &ixgbe->hw; 3162 uint8_t *mac_addr; 3163 int slot; 3164 /* 3165 * Here we should consider two situations: 3166 * 3167 * 1. Chipset is initialized at the first time, 3168 * Clear all the multiple unicast addresses. 3169 * 3170 * 2. Chipset is reset 3171 * Recover the multiple unicast addresses from the 3172 * software data structure to the RAR registers. 3173 */ 3174 if (!ixgbe->unicst_init) { 3175 /* 3176 * Initialize the multiple unicast addresses 3177 */ 3178 ixgbe->unicst_total = hw->mac.num_rar_entries; 3179 ixgbe->unicst_avail = ixgbe->unicst_total; 3180 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3181 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3182 bzero(mac_addr, ETHERADDRL); 3183 (void) ixgbe_set_rar(hw, slot, mac_addr, 0, 0); 3184 ixgbe->unicst_addr[slot].mac.set = 0; 3185 } 3186 ixgbe->unicst_init = B_TRUE; 3187 } else { 3188 /* Re-configure the RAR registers */ 3189 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3190 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3191 if (ixgbe->unicst_addr[slot].mac.set == 1) { 3192 (void) ixgbe_set_rar(hw, slot, mac_addr, 3193 ixgbe->unicst_addr[slot].mac.group_index, 3194 IXGBE_RAH_AV); 3195 } else { 3196 bzero(mac_addr, ETHERADDRL); 3197 (void) ixgbe_set_rar(hw, slot, mac_addr, 0, 0); 3198 } 3199 } 3200 } 3201 } 3202 3203 /* 3204 * ixgbe_unicst_find - Find the slot for the specified unicast address 3205 */ 3206 int 3207 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr) 3208 { 3209 int slot; 3210 3211 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3212 3213 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3214 if (bcmp(ixgbe->unicst_addr[slot].mac.addr, 3215 mac_addr, ETHERADDRL) == 0) 3216 return (slot); 3217 } 3218 3219 return (-1); 3220 } 3221 3222 /* 3223 * Restore the HW state to match the SW state during restart. 3224 */ 3225 static int 3226 ixgbe_init_vlan(ixgbe_t *ixgbe) 3227 { 3228 /* 3229 * The device is starting for the first time; there is nothing 3230 * to do. 3231 */ 3232 if (!ixgbe->vlft_init) { 3233 ixgbe->vlft_init = B_TRUE; 3234 return (IXGBE_SUCCESS); 3235 } 3236 3237 for (uint_t i = 0; i < ixgbe->num_rx_groups; i++) { 3238 int ret; 3239 boolean_t vlvf_bypass; 3240 ixgbe_rx_group_t *rxg = &ixgbe->rx_groups[i]; 3241 struct ixgbe_hw *hw = &ixgbe->hw; 3242 3243 if (rxg->aupe) { 3244 uint32_t vml2flt; 3245 3246 vml2flt = IXGBE_READ_REG(hw, IXGBE_VMOLR(rxg->index)); 3247 vml2flt |= IXGBE_VMOLR_AUPE; 3248 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(rxg->index), vml2flt); 3249 } 3250 3251 vlvf_bypass = (rxg->index == ixgbe->rx_def_group); 3252 for (ixgbe_vlan_t *vlp = list_head(&rxg->vlans); vlp != NULL; 3253 vlp = list_next(&rxg->vlans, vlp)) { 3254 ret = ixgbe_set_vfta(hw, vlp->ixvl_vid, rxg->index, 3255 B_TRUE, vlvf_bypass); 3256 3257 if (ret != IXGBE_SUCCESS) { 3258 ixgbe_error(ixgbe, "Failed to program VFTA" 3259 " for group %u, VID: %u, ret: %d.", 3260 rxg->index, vlp->ixvl_vid, ret); 3261 return (IXGBE_FAILURE); 3262 } 3263 } 3264 } 3265 3266 return (IXGBE_SUCCESS); 3267 } 3268 3269 /* 3270 * ixgbe_multicst_add - Add a multicst address. 3271 */ 3272 int 3273 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3274 { 3275 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3276 3277 if ((multiaddr[0] & 01) == 0) { 3278 return (EINVAL); 3279 } 3280 3281 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 3282 return (ENOENT); 3283 } 3284 3285 bcopy(multiaddr, 3286 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 3287 ixgbe->mcast_count++; 3288 3289 /* 3290 * Update the multicast table in the hardware 3291 */ 3292 ixgbe_setup_multicst(ixgbe); 3293 3294 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3295 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3296 return (EIO); 3297 } 3298 3299 return (0); 3300 } 3301 3302 /* 3303 * ixgbe_multicst_remove - Remove a multicst address. 3304 */ 3305 int 3306 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3307 { 3308 int i; 3309 3310 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3311 3312 for (i = 0; i < ixgbe->mcast_count; i++) { 3313 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 3314 ETHERADDRL) == 0) { 3315 for (i++; i < ixgbe->mcast_count; i++) { 3316 ixgbe->mcast_table[i - 1] = 3317 ixgbe->mcast_table[i]; 3318 } 3319 ixgbe->mcast_count--; 3320 break; 3321 } 3322 } 3323 3324 /* 3325 * Update the multicast table in the hardware 3326 */ 3327 ixgbe_setup_multicst(ixgbe); 3328 3329 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3330 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3331 return (EIO); 3332 } 3333 3334 return (0); 3335 } 3336 3337 /* 3338 * ixgbe_setup_multicast - Setup multicast data structures. 3339 * 3340 * This routine initializes all of the multicast related structures 3341 * and save them in the hardware registers. 3342 */ 3343 static void 3344 ixgbe_setup_multicst(ixgbe_t *ixgbe) 3345 { 3346 uint8_t *mc_addr_list; 3347 uint32_t mc_addr_count; 3348 struct ixgbe_hw *hw = &ixgbe->hw; 3349 3350 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3351 3352 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 3353 3354 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 3355 mc_addr_count = ixgbe->mcast_count; 3356 3357 /* 3358 * Update the multicast addresses to the MTA registers 3359 */ 3360 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 3361 ixgbe_mc_table_itr, TRUE); 3362 } 3363 3364 /* 3365 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode). 3366 * 3367 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers. 3368 * Different chipsets may have different allowed configuration of vmdq and rss. 3369 */ 3370 static void 3371 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe) 3372 { 3373 struct ixgbe_hw *hw = &ixgbe->hw; 3374 uint32_t ring_per_group; 3375 3376 switch (hw->mac.type) { 3377 case ixgbe_mac_82598EB: 3378 /* 3379 * 82598 supports the following combination: 3380 * vmdq no. x rss no. 3381 * [5..16] x 1 3382 * [1..4] x [1..16] 3383 * However 8 rss queue per pool (vmdq) is sufficient for 3384 * most cases. 3385 */ 3386 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3387 if (ixgbe->num_rx_groups > 4) { 3388 ixgbe->num_rx_rings = ixgbe->num_rx_groups; 3389 } else { 3390 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3391 min(8, ring_per_group); 3392 } 3393 3394 break; 3395 3396 case ixgbe_mac_82599EB: 3397 case ixgbe_mac_X540: 3398 case ixgbe_mac_X550: 3399 case ixgbe_mac_X550EM_x: 3400 case ixgbe_mac_X550EM_a: 3401 /* 3402 * 82599 supports the following combination: 3403 * vmdq no. x rss no. 3404 * [33..64] x [1..2] 3405 * [2..32] x [1..4] 3406 * 1 x [1..16] 3407 * However 8 rss queue per pool (vmdq) is sufficient for 3408 * most cases. 3409 * 3410 * For now, treat X540 and X550 like the 82599. 3411 */ 3412 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3413 if (ixgbe->num_rx_groups == 1) { 3414 ixgbe->num_rx_rings = min(8, ring_per_group); 3415 } else if (ixgbe->num_rx_groups <= 32) { 3416 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3417 min(4, ring_per_group); 3418 } else if (ixgbe->num_rx_groups <= 64) { 3419 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3420 min(2, ring_per_group); 3421 } 3422 break; 3423 3424 default: 3425 break; 3426 } 3427 3428 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3429 3430 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) { 3431 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3432 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) { 3433 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ; 3434 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) { 3435 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS; 3436 } else { 3437 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS; 3438 } 3439 3440 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d", 3441 ixgbe->num_rx_groups, ixgbe->num_rx_rings); 3442 } 3443 3444 /* 3445 * ixgbe_get_conf - Get driver configurations set in driver.conf. 3446 * 3447 * This routine gets user-configured values out of the configuration 3448 * file ixgbe.conf. 3449 * 3450 * For each configurable value, there is a minimum, a maximum, and a 3451 * default. 3452 * If user does not configure a value, use the default. 3453 * If user configures below the minimum, use the minumum. 3454 * If user configures above the maximum, use the maxumum. 3455 */ 3456 static void 3457 ixgbe_get_conf(ixgbe_t *ixgbe) 3458 { 3459 struct ixgbe_hw *hw = &ixgbe->hw; 3460 uint32_t flow_control; 3461 3462 /* 3463 * ixgbe driver supports the following user configurations: 3464 * 3465 * Jumbo frame configuration: 3466 * default_mtu 3467 * 3468 * Ethernet flow control configuration: 3469 * flow_control 3470 * 3471 * Multiple rings configurations: 3472 * tx_queue_number 3473 * tx_ring_size 3474 * rx_queue_number 3475 * rx_ring_size 3476 * 3477 * Call ixgbe_get_prop() to get the value for a specific 3478 * configuration parameter. 3479 */ 3480 3481 /* 3482 * Jumbo frame configuration - max_frame_size controls host buffer 3483 * allocation, so includes MTU, ethernet header, vlan tag and 3484 * frame check sequence. 3485 */ 3486 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 3487 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU); 3488 3489 ixgbe->max_frame_size = ixgbe->default_mtu + 3490 sizeof (struct ether_vlan_header) + ETHERFCSL; 3491 3492 /* 3493 * Ethernet flow control configuration 3494 */ 3495 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 3496 ixgbe_fc_none, 3, ixgbe_fc_none); 3497 if (flow_control == 3) 3498 flow_control = ixgbe_fc_default; 3499 3500 /* 3501 * fc.requested mode is what the user requests. After autoneg, 3502 * fc.current_mode will be the flow_control mode that was negotiated. 3503 */ 3504 hw->fc.requested_mode = flow_control; 3505 3506 /* 3507 * Multiple rings configurations 3508 */ 3509 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 3510 ixgbe->capab->min_tx_que_num, 3511 ixgbe->capab->max_tx_que_num, 3512 ixgbe->capab->def_tx_que_num); 3513 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 3514 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 3515 3516 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 3517 ixgbe->capab->min_rx_que_num, 3518 ixgbe->capab->max_rx_que_num, 3519 ixgbe->capab->def_rx_que_num); 3520 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 3521 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 3522 3523 /* 3524 * Multiple groups configuration 3525 */ 3526 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM, 3527 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num, 3528 ixgbe->capab->def_rx_grp_num); 3529 3530 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE, 3531 0, 1, DEFAULT_MR_ENABLE); 3532 3533 if (ixgbe->mr_enable == B_FALSE) { 3534 ixgbe->num_tx_rings = 1; 3535 ixgbe->num_rx_rings = 1; 3536 ixgbe->num_rx_groups = 1; 3537 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3538 } else { 3539 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3540 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1); 3541 /* 3542 * The combination of num_rx_rings and num_rx_groups 3543 * may be not supported by h/w. We need to adjust 3544 * them to appropriate values. 3545 */ 3546 ixgbe_setup_vmdq_rss_conf(ixgbe); 3547 } 3548 3549 /* 3550 * Tunable used to force an interrupt type. The only use is 3551 * for testing of the lesser interrupt types. 3552 * 0 = don't force interrupt type 3553 * 1 = force interrupt type MSI-X 3554 * 2 = force interrupt type MSI 3555 * 3 = force interrupt type Legacy 3556 */ 3557 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 3558 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 3559 3560 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 3561 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 3562 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 3563 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 3564 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 3565 0, 1, DEFAULT_LSO_ENABLE); 3566 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE, 3567 0, 1, DEFAULT_LRO_ENABLE); 3568 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 3569 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 3570 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe, 3571 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE); 3572 3573 /* Head Write Back not recommended for 82599, X540 and X550 */ 3574 if (hw->mac.type == ixgbe_mac_82599EB || 3575 hw->mac.type == ixgbe_mac_X540 || 3576 hw->mac.type == ixgbe_mac_X550 || 3577 hw->mac.type == ixgbe_mac_X550EM_x || 3578 hw->mac.type == ixgbe_mac_X550EM_a) { 3579 ixgbe->tx_head_wb_enable = B_FALSE; 3580 } 3581 3582 /* 3583 * ixgbe LSO needs the tx h/w checksum support. 3584 * LSO will be disabled if tx h/w checksum is not 3585 * enabled. 3586 */ 3587 if (ixgbe->tx_hcksum_enable == B_FALSE) { 3588 ixgbe->lso_enable = B_FALSE; 3589 } 3590 3591 /* 3592 * ixgbe LRO needs the rx h/w checksum support. 3593 * LRO will be disabled if rx h/w checksum is not 3594 * enabled. 3595 */ 3596 if (ixgbe->rx_hcksum_enable == B_FALSE) { 3597 ixgbe->lro_enable = B_FALSE; 3598 } 3599 3600 /* 3601 * ixgbe LRO only supported by 82599, X540 and X550 3602 */ 3603 if (hw->mac.type == ixgbe_mac_82598EB) { 3604 ixgbe->lro_enable = B_FALSE; 3605 } 3606 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 3607 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 3608 DEFAULT_TX_COPY_THRESHOLD); 3609 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 3610 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 3611 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 3612 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 3613 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 3614 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 3615 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 3616 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 3617 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 3618 3619 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 3620 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 3621 DEFAULT_RX_COPY_THRESHOLD); 3622 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 3623 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 3624 DEFAULT_RX_LIMIT_PER_INTR); 3625 3626 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 3627 ixgbe->capab->min_intr_throttle, 3628 ixgbe->capab->max_intr_throttle, 3629 ixgbe->capab->def_intr_throttle); 3630 /* 3631 * 82599, X540 and X550 require the interrupt throttling rate is 3632 * a multiple of 8. This is enforced by the register definiton. 3633 */ 3634 if (hw->mac.type == ixgbe_mac_82599EB || 3635 hw->mac.type == ixgbe_mac_X540 || 3636 hw->mac.type == ixgbe_mac_X550 || 3637 hw->mac.type == ixgbe_mac_X550EM_x || 3638 hw->mac.type == ixgbe_mac_X550EM_a) 3639 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; 3640 3641 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe, 3642 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP); 3643 } 3644 3645 static void 3646 ixgbe_init_params(ixgbe_t *ixgbe) 3647 { 3648 struct ixgbe_hw *hw = &ixgbe->hw; 3649 ixgbe_link_speed speeds_supported = 0; 3650 bool negotiate; 3651 3652 /* 3653 * Get a list of speeds the adapter supports. If the hw struct hasn't 3654 * been populated with this information yet, retrieve it from the 3655 * adapter and save it to our own variable. 3656 * 3657 * On certain adapters, such as ones which use SFPs, the contents of 3658 * hw->phy.speeds_supported (and hw->phy.autoneg_advertised) are not 3659 * updated, so we must rely on calling ixgbe_get_link_capabilities() 3660 * in order to ascertain the speeds which we are capable of supporting, 3661 * and in the case of SFP-equipped adapters, which speed we are 3662 * advertising. If ixgbe_get_link_capabilities() fails for some reason, 3663 * we'll go with a default list of speeds as a last resort. 3664 */ 3665 speeds_supported = hw->phy.speeds_supported; 3666 3667 if (speeds_supported == 0) { 3668 if (ixgbe_get_link_capabilities(hw, &speeds_supported, 3669 &negotiate) != IXGBE_SUCCESS) { 3670 if (hw->mac.type == ixgbe_mac_82598EB) { 3671 speeds_supported = 3672 IXGBE_LINK_SPEED_82598_AUTONEG; 3673 } else { 3674 speeds_supported = 3675 IXGBE_LINK_SPEED_82599_AUTONEG; 3676 } 3677 } 3678 } 3679 ixgbe->speeds_supported = speeds_supported; 3680 3681 /* 3682 * By default, all supported speeds are enabled and advertised. 3683 */ 3684 if (speeds_supported & IXGBE_LINK_SPEED_10GB_FULL) { 3685 ixgbe->param_en_10000fdx_cap = 1; 3686 ixgbe->param_adv_10000fdx_cap = 1; 3687 } else { 3688 ixgbe->param_en_10000fdx_cap = 0; 3689 ixgbe->param_adv_10000fdx_cap = 0; 3690 } 3691 3692 if (speeds_supported & IXGBE_LINK_SPEED_5GB_FULL) { 3693 ixgbe->param_en_5000fdx_cap = 1; 3694 ixgbe->param_adv_5000fdx_cap = 1; 3695 } else { 3696 ixgbe->param_en_5000fdx_cap = 0; 3697 ixgbe->param_adv_5000fdx_cap = 0; 3698 } 3699 3700 if (speeds_supported & IXGBE_LINK_SPEED_2_5GB_FULL) { 3701 ixgbe->param_en_2500fdx_cap = 1; 3702 ixgbe->param_adv_2500fdx_cap = 1; 3703 } else { 3704 ixgbe->param_en_2500fdx_cap = 0; 3705 ixgbe->param_adv_2500fdx_cap = 0; 3706 } 3707 3708 if (speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) { 3709 ixgbe->param_en_1000fdx_cap = 1; 3710 ixgbe->param_adv_1000fdx_cap = 1; 3711 } else { 3712 ixgbe->param_en_1000fdx_cap = 0; 3713 ixgbe->param_adv_1000fdx_cap = 0; 3714 } 3715 3716 if (speeds_supported & IXGBE_LINK_SPEED_100_FULL) { 3717 ixgbe->param_en_100fdx_cap = 1; 3718 ixgbe->param_adv_100fdx_cap = 1; 3719 } else { 3720 ixgbe->param_en_100fdx_cap = 0; 3721 ixgbe->param_adv_100fdx_cap = 0; 3722 } 3723 3724 ixgbe->param_pause_cap = 1; 3725 ixgbe->param_asym_pause_cap = 1; 3726 ixgbe->param_rem_fault = 0; 3727 3728 ixgbe->param_adv_autoneg_cap = 1; 3729 ixgbe->param_adv_pause_cap = 1; 3730 ixgbe->param_adv_asym_pause_cap = 1; 3731 ixgbe->param_adv_rem_fault = 0; 3732 3733 ixgbe->param_lp_10000fdx_cap = 0; 3734 ixgbe->param_lp_5000fdx_cap = 0; 3735 ixgbe->param_lp_2500fdx_cap = 0; 3736 ixgbe->param_lp_1000fdx_cap = 0; 3737 ixgbe->param_lp_100fdx_cap = 0; 3738 ixgbe->param_lp_autoneg_cap = 0; 3739 ixgbe->param_lp_pause_cap = 0; 3740 ixgbe->param_lp_asym_pause_cap = 0; 3741 ixgbe->param_lp_rem_fault = 0; 3742 } 3743 3744 /* 3745 * ixgbe_get_prop - Get a property value out of the configuration file 3746 * ixgbe.conf. 3747 * 3748 * Caller provides the name of the property, a default value, a minimum 3749 * value, and a maximum value. 3750 * 3751 * Return configured value of the property, with default, minimum and 3752 * maximum properly applied. 3753 */ 3754 static int 3755 ixgbe_get_prop(ixgbe_t *ixgbe, 3756 char *propname, /* name of the property */ 3757 int minval, /* minimum acceptable value */ 3758 int maxval, /* maximim acceptable value */ 3759 int defval) /* default value */ 3760 { 3761 int value; 3762 3763 /* 3764 * Call ddi_prop_get_int() to read the conf settings 3765 */ 3766 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 3767 DDI_PROP_DONTPASS, propname, defval); 3768 if (value > maxval) 3769 value = maxval; 3770 3771 if (value < minval) 3772 value = minval; 3773 3774 return (value); 3775 } 3776 3777 /* 3778 * ixgbe_driver_setup_link - Using the link properties to setup the link. 3779 */ 3780 int 3781 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 3782 { 3783 struct ixgbe_hw *hw = &ixgbe->hw; 3784 ixgbe_link_speed advertised = 0; 3785 3786 /* 3787 * Assemble a list of enabled speeds to auto-negotiate with. 3788 */ 3789 if (ixgbe->param_en_10000fdx_cap == 1) 3790 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 3791 3792 if (ixgbe->param_en_5000fdx_cap == 1) 3793 advertised |= IXGBE_LINK_SPEED_5GB_FULL; 3794 3795 if (ixgbe->param_en_2500fdx_cap == 1) 3796 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; 3797 3798 if (ixgbe->param_en_1000fdx_cap == 1) 3799 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 3800 3801 if (ixgbe->param_en_100fdx_cap == 1) 3802 advertised |= IXGBE_LINK_SPEED_100_FULL; 3803 3804 /* 3805 * As a last resort, autoneg with a default list of speeds. 3806 */ 3807 if (ixgbe->param_adv_autoneg_cap == 1 && advertised == 0) { 3808 ixgbe_notice(ixgbe, "Invalid link settings. Setting link " 3809 "to autonegotiate with full capabilities."); 3810 3811 if (hw->mac.type == ixgbe_mac_82598EB) 3812 advertised = IXGBE_LINK_SPEED_82598_AUTONEG; 3813 else 3814 advertised = IXGBE_LINK_SPEED_82599_AUTONEG; 3815 } 3816 3817 if (setup_hw) { 3818 if (ixgbe_setup_link(&ixgbe->hw, advertised, 3819 ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) { 3820 ixgbe_notice(ixgbe, "Setup link failed on this " 3821 "device."); 3822 return (IXGBE_FAILURE); 3823 } 3824 } 3825 3826 return (IXGBE_SUCCESS); 3827 } 3828 3829 /* 3830 * ixgbe_driver_link_check - Link status processing. 3831 * 3832 * This function can be called in both kernel context and interrupt context 3833 */ 3834 static void 3835 ixgbe_driver_link_check(ixgbe_t *ixgbe) 3836 { 3837 struct ixgbe_hw *hw = &ixgbe->hw; 3838 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 3839 bool link_up = false; 3840 boolean_t link_changed = B_FALSE; 3841 3842 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3843 3844 (void) ixgbe_check_link(hw, &speed, &link_up, false); 3845 if (link_up) { 3846 ixgbe->link_check_complete = B_TRUE; 3847 3848 /* Link is up, enable flow control settings */ 3849 (void) ixgbe_fc_enable(hw); 3850 3851 /* 3852 * The Link is up, check whether it was marked as down earlier 3853 */ 3854 if (ixgbe->link_state != LINK_STATE_UP) { 3855 switch (speed) { 3856 case IXGBE_LINK_SPEED_10GB_FULL: 3857 ixgbe->link_speed = SPEED_10GB; 3858 break; 3859 case IXGBE_LINK_SPEED_5GB_FULL: 3860 ixgbe->link_speed = SPEED_5GB; 3861 break; 3862 case IXGBE_LINK_SPEED_2_5GB_FULL: 3863 ixgbe->link_speed = SPEED_2_5GB; 3864 break; 3865 case IXGBE_LINK_SPEED_1GB_FULL: 3866 ixgbe->link_speed = SPEED_1GB; 3867 break; 3868 case IXGBE_LINK_SPEED_100_FULL: 3869 ixgbe->link_speed = SPEED_100; 3870 } 3871 ixgbe->link_duplex = LINK_DUPLEX_FULL; 3872 ixgbe->link_state = LINK_STATE_UP; 3873 link_changed = B_TRUE; 3874 } 3875 } else { 3876 if (ixgbe->link_check_complete == B_TRUE || 3877 (ixgbe->link_check_complete == B_FALSE && 3878 gethrtime() >= ixgbe->link_check_hrtime)) { 3879 /* 3880 * The link is really down 3881 */ 3882 ixgbe->link_check_complete = B_TRUE; 3883 3884 if (ixgbe->link_state != LINK_STATE_DOWN) { 3885 ixgbe->link_speed = 0; 3886 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN; 3887 ixgbe->link_state = LINK_STATE_DOWN; 3888 link_changed = B_TRUE; 3889 } 3890 } 3891 } 3892 3893 /* 3894 * If we are in an interrupt context, need to re-enable the 3895 * interrupt, which was automasked 3896 */ 3897 if (servicing_interrupt() != 0) { 3898 ixgbe->eims |= IXGBE_EICR_LSC; 3899 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3900 } 3901 3902 if (link_changed) { 3903 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3904 } 3905 } 3906 3907 /* 3908 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599. 3909 */ 3910 static void 3911 ixgbe_sfp_check(void *arg) 3912 { 3913 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3914 uint32_t eicr = ixgbe->eicr; 3915 struct ixgbe_hw *hw = &ixgbe->hw; 3916 3917 mutex_enter(&ixgbe->gen_lock); 3918 (void) hw->phy.ops.identify_sfp(hw); 3919 if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) { 3920 /* clear the interrupt */ 3921 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3922 3923 /* if link up, do multispeed fiber setup */ 3924 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3925 true); 3926 ixgbe_driver_link_check(ixgbe); 3927 ixgbe_get_hw_state(ixgbe); 3928 } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) { 3929 /* clear the interrupt */ 3930 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw)); 3931 3932 /* if link up, do sfp module setup */ 3933 (void) hw->mac.ops.setup_sfp(hw); 3934 3935 /* do multispeed fiber setup */ 3936 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3937 true); 3938 ixgbe_driver_link_check(ixgbe); 3939 ixgbe_get_hw_state(ixgbe); 3940 } 3941 mutex_exit(&ixgbe->gen_lock); 3942 3943 /* 3944 * We need to fully re-check the link later. 3945 */ 3946 ixgbe->link_check_complete = B_FALSE; 3947 ixgbe->link_check_hrtime = gethrtime() + 3948 (IXGBE_LINK_UP_TIME * 100000000ULL); 3949 } 3950 3951 /* 3952 * ixgbe_overtemp_check - overtemp module processing done in taskq 3953 * 3954 * This routine will only be called on adapters with temperature sensor. 3955 * The indication of over-temperature can be either SDP0 interrupt or the link 3956 * status change interrupt. 3957 */ 3958 static void 3959 ixgbe_overtemp_check(void *arg) 3960 { 3961 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3962 struct ixgbe_hw *hw = &ixgbe->hw; 3963 uint32_t eicr = ixgbe->eicr; 3964 ixgbe_link_speed speed; 3965 bool link_up; 3966 3967 mutex_enter(&ixgbe->gen_lock); 3968 3969 /* make sure we know current state of link */ 3970 (void) ixgbe_check_link(hw, &speed, &link_up, false); 3971 3972 /* check over-temp condition */ 3973 if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) || 3974 (eicr & IXGBE_EICR_LSC)) { 3975 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) { 3976 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3977 3978 /* 3979 * Disable the adapter interrupts 3980 */ 3981 ixgbe_disable_adapter_interrupts(ixgbe); 3982 3983 /* 3984 * Disable Rx/Tx units 3985 */ 3986 (void) ixgbe_stop_adapter(hw); 3987 3988 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3989 ixgbe_error(ixgbe, 3990 "Problem: Network adapter has been stopped " 3991 "because it has overheated"); 3992 ixgbe_error(ixgbe, 3993 "Action: Restart the computer. " 3994 "If the problem persists, power off the system " 3995 "and replace the adapter"); 3996 } 3997 } 3998 3999 /* write to clear the interrupt */ 4000 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 4001 4002 mutex_exit(&ixgbe->gen_lock); 4003 } 4004 4005 /* 4006 * ixgbe_phy_check - taskq to process interrupts from an external PHY 4007 * 4008 * This routine will only be called on adapters with external PHYs 4009 * (such as X550) that may be trying to raise our attention to some event. 4010 * Currently, this is limited to claiming PHY overtemperature and link status 4011 * change (LSC) events, however this may expand to include other things in 4012 * future adapters. 4013 */ 4014 static void 4015 ixgbe_phy_check(void *arg) 4016 { 4017 ixgbe_t *ixgbe = (ixgbe_t *)arg; 4018 struct ixgbe_hw *hw = &ixgbe->hw; 4019 int rv; 4020 4021 mutex_enter(&ixgbe->gen_lock); 4022 4023 /* 4024 * X550 baseT PHY overtemp and LSC events are handled here. 4025 * 4026 * If an overtemp event occurs, it will be reflected in the 4027 * return value of phy.ops.handle_lasi() and the common code will 4028 * automatically power off the baseT PHY. This is our cue to trigger 4029 * an FMA event. 4030 * 4031 * If a link status change event occurs, phy.ops.handle_lasi() will 4032 * automatically initiate a link setup between the integrated KR PHY 4033 * and the external X557 PHY to ensure that the link speed between 4034 * them matches the link speed of the baseT link. 4035 */ 4036 rv = ixgbe_handle_lasi(hw); 4037 4038 if (rv == IXGBE_ERR_OVERTEMP) { 4039 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 4040 4041 /* 4042 * Disable the adapter interrupts 4043 */ 4044 ixgbe_disable_adapter_interrupts(ixgbe); 4045 4046 /* 4047 * Disable Rx/Tx units 4048 */ 4049 (void) ixgbe_stop_adapter(hw); 4050 4051 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 4052 ixgbe_error(ixgbe, 4053 "Problem: Network adapter has been stopped due to a " 4054 "overtemperature event being detected."); 4055 ixgbe_error(ixgbe, 4056 "Action: Shut down or restart the computer. If the issue " 4057 "persists, please take action in accordance with the " 4058 "recommendations from your system vendor."); 4059 } 4060 4061 mutex_exit(&ixgbe->gen_lock); 4062 } 4063 4064 /* 4065 * ixgbe_link_timer - timer for link status detection 4066 */ 4067 static void 4068 ixgbe_link_timer(void *arg) 4069 { 4070 ixgbe_t *ixgbe = (ixgbe_t *)arg; 4071 4072 mutex_enter(&ixgbe->gen_lock); 4073 ixgbe_driver_link_check(ixgbe); 4074 mutex_exit(&ixgbe->gen_lock); 4075 } 4076 4077 /* 4078 * ixgbe_local_timer - Driver watchdog function. 4079 * 4080 * This function will handle the transmit stall check and other routines. 4081 */ 4082 static void 4083 ixgbe_local_timer(void *arg) 4084 { 4085 ixgbe_t *ixgbe = (ixgbe_t *)arg; 4086 4087 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP) 4088 goto out; 4089 4090 if (ixgbe->ixgbe_state & IXGBE_ERROR) { 4091 ixgbe->reset_count++; 4092 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 4093 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 4094 goto out; 4095 } 4096 4097 if (ixgbe_stall_check(ixgbe)) { 4098 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL); 4099 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4100 4101 ixgbe->reset_count++; 4102 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 4103 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 4104 } 4105 4106 out: 4107 ixgbe_restart_watchdog_timer(ixgbe); 4108 } 4109 4110 /* 4111 * ixgbe_stall_check - Check for transmit stall. 4112 * 4113 * This function checks if the adapter is stalled (in transmit). 4114 * 4115 * It is called each time the watchdog timeout is invoked. 4116 * If the transmit descriptor reclaim continuously fails, 4117 * the watchdog value will increment by 1. If the watchdog 4118 * value exceeds the threshold, the ixgbe is assumed to 4119 * have stalled and need to be reset. 4120 */ 4121 static boolean_t 4122 ixgbe_stall_check(ixgbe_t *ixgbe) 4123 { 4124 ixgbe_tx_ring_t *tx_ring; 4125 boolean_t result; 4126 int i; 4127 4128 if (ixgbe->link_state != LINK_STATE_UP) 4129 return (B_FALSE); 4130 4131 /* 4132 * If any tx ring is stalled, we'll reset the chipset 4133 */ 4134 result = B_FALSE; 4135 for (i = 0; i < ixgbe->num_tx_rings; i++) { 4136 tx_ring = &ixgbe->tx_rings[i]; 4137 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) { 4138 tx_ring->tx_recycle(tx_ring); 4139 } 4140 4141 if (tx_ring->recycle_fail > 0) 4142 tx_ring->stall_watchdog++; 4143 else 4144 tx_ring->stall_watchdog = 0; 4145 4146 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 4147 result = B_TRUE; 4148 break; 4149 } 4150 } 4151 4152 if (result) { 4153 tx_ring->stall_watchdog = 0; 4154 tx_ring->recycle_fail = 0; 4155 } 4156 4157 return (result); 4158 } 4159 4160 4161 /* 4162 * is_valid_mac_addr - Check if the mac address is valid. 4163 */ 4164 static boolean_t 4165 is_valid_mac_addr(uint8_t *mac_addr) 4166 { 4167 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 4168 const uint8_t addr_test2[6] = 4169 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4170 4171 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 4172 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 4173 return (B_FALSE); 4174 4175 return (B_TRUE); 4176 } 4177 4178 static boolean_t 4179 ixgbe_find_mac_address(ixgbe_t *ixgbe) 4180 { 4181 #ifdef __sparc 4182 struct ixgbe_hw *hw = &ixgbe->hw; 4183 uchar_t *bytes; 4184 struct ether_addr sysaddr; 4185 uint_t nelts; 4186 int err; 4187 boolean_t found = B_FALSE; 4188 4189 /* 4190 * The "vendor's factory-set address" may already have 4191 * been extracted from the chip, but if the property 4192 * "local-mac-address" is set we use that instead. 4193 * 4194 * We check whether it looks like an array of 6 4195 * bytes (which it should, if OBP set it). If we can't 4196 * make sense of it this way, we'll ignore it. 4197 */ 4198 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4199 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 4200 if (err == DDI_PROP_SUCCESS) { 4201 if (nelts == ETHERADDRL) { 4202 while (nelts--) 4203 hw->mac.addr[nelts] = bytes[nelts]; 4204 found = B_TRUE; 4205 } 4206 ddi_prop_free(bytes); 4207 } 4208 4209 /* 4210 * Look up the OBP property "local-mac-address?". If the user has set 4211 * 'local-mac-address? = false', use "the system address" instead. 4212 */ 4213 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 4214 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 4215 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 4216 if (localetheraddr(NULL, &sysaddr) != 0) { 4217 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 4218 found = B_TRUE; 4219 } 4220 } 4221 ddi_prop_free(bytes); 4222 } 4223 4224 /* 4225 * Finally(!), if there's a valid "mac-address" property (created 4226 * if we netbooted from this interface), we must use this instead 4227 * of any of the above to ensure that the NFS/install server doesn't 4228 * get confused by the address changing as illumos takes over! 4229 */ 4230 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4231 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 4232 if (err == DDI_PROP_SUCCESS) { 4233 if (nelts == ETHERADDRL) { 4234 while (nelts--) 4235 hw->mac.addr[nelts] = bytes[nelts]; 4236 found = B_TRUE; 4237 } 4238 ddi_prop_free(bytes); 4239 } 4240 4241 if (found) { 4242 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 4243 return (B_TRUE); 4244 } 4245 #else 4246 _NOTE(ARGUNUSED(ixgbe)); 4247 #endif 4248 4249 return (B_TRUE); 4250 } 4251 4252 static void 4253 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 4254 { 4255 /* 4256 * Fire a watchdog timer 4257 */ 4258 ixgbe->watchdog_tid = 4259 timeout(ixgbe_local_timer, 4260 (void *)ixgbe, 1 * drv_usectohz(1000000)); 4261 4262 } 4263 4264 /* 4265 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 4266 */ 4267 void 4268 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 4269 { 4270 mutex_enter(&ixgbe->watchdog_lock); 4271 4272 if (!ixgbe->watchdog_enable) { 4273 ixgbe->watchdog_enable = B_TRUE; 4274 ixgbe->watchdog_start = B_TRUE; 4275 ixgbe_arm_watchdog_timer(ixgbe); 4276 } 4277 4278 mutex_exit(&ixgbe->watchdog_lock); 4279 } 4280 4281 /* 4282 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 4283 */ 4284 void 4285 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 4286 { 4287 timeout_id_t tid; 4288 4289 mutex_enter(&ixgbe->watchdog_lock); 4290 4291 ixgbe->watchdog_enable = B_FALSE; 4292 ixgbe->watchdog_start = B_FALSE; 4293 tid = ixgbe->watchdog_tid; 4294 ixgbe->watchdog_tid = 0; 4295 4296 mutex_exit(&ixgbe->watchdog_lock); 4297 4298 if (tid != 0) 4299 (void) untimeout(tid); 4300 } 4301 4302 /* 4303 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 4304 */ 4305 void 4306 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 4307 { 4308 mutex_enter(&ixgbe->watchdog_lock); 4309 4310 if (ixgbe->watchdog_enable) { 4311 if (!ixgbe->watchdog_start) { 4312 ixgbe->watchdog_start = B_TRUE; 4313 ixgbe_arm_watchdog_timer(ixgbe); 4314 } 4315 } 4316 4317 mutex_exit(&ixgbe->watchdog_lock); 4318 } 4319 4320 /* 4321 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 4322 */ 4323 static void 4324 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 4325 { 4326 mutex_enter(&ixgbe->watchdog_lock); 4327 4328 if (ixgbe->watchdog_start) 4329 ixgbe_arm_watchdog_timer(ixgbe); 4330 4331 mutex_exit(&ixgbe->watchdog_lock); 4332 } 4333 4334 /* 4335 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 4336 */ 4337 void 4338 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 4339 { 4340 timeout_id_t tid; 4341 4342 mutex_enter(&ixgbe->watchdog_lock); 4343 4344 ixgbe->watchdog_start = B_FALSE; 4345 tid = ixgbe->watchdog_tid; 4346 ixgbe->watchdog_tid = 0; 4347 4348 mutex_exit(&ixgbe->watchdog_lock); 4349 4350 if (tid != 0) 4351 (void) untimeout(tid); 4352 } 4353 4354 /* 4355 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 4356 */ 4357 static void 4358 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 4359 { 4360 struct ixgbe_hw *hw = &ixgbe->hw; 4361 4362 /* 4363 * mask all interrupts off 4364 */ 4365 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 4366 4367 /* 4368 * for MSI-X, also disable autoclear 4369 */ 4370 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4371 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 4372 } 4373 4374 IXGBE_WRITE_FLUSH(hw); 4375 } 4376 4377 /* 4378 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 4379 */ 4380 static void 4381 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 4382 { 4383 struct ixgbe_hw *hw = &ixgbe->hw; 4384 uint32_t eiac, eiam; 4385 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 4386 4387 /* interrupt types to enable */ 4388 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 4389 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 4390 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */ 4391 4392 /* enable automask on "other" causes that this adapter can generate */ 4393 eiam = ixgbe->capab->other_intr; 4394 4395 /* 4396 * msi-x mode 4397 */ 4398 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4399 /* enable autoclear but not on bits 29:20 */ 4400 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR); 4401 4402 /* general purpose interrupt enable */ 4403 gpie |= (IXGBE_GPIE_MSIX_MODE 4404 | IXGBE_GPIE_PBA_SUPPORT 4405 | IXGBE_GPIE_OCD 4406 | IXGBE_GPIE_EIAME); 4407 /* 4408 * non-msi-x mode 4409 */ 4410 } else { 4411 4412 /* disable autoclear, leave gpie at default */ 4413 eiac = 0; 4414 4415 /* 4416 * General purpose interrupt enable. 4417 * For 82599, X540 and X550, extended interrupt 4418 * automask enable only in MSI or MSI-X mode 4419 */ 4420 if ((hw->mac.type == ixgbe_mac_82598EB) || 4421 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) { 4422 gpie |= IXGBE_GPIE_EIAME; 4423 } 4424 } 4425 4426 /* Enable specific "other" interrupt types */ 4427 switch (hw->mac.type) { 4428 case ixgbe_mac_82598EB: 4429 gpie |= ixgbe->capab->other_gpie; 4430 break; 4431 4432 case ixgbe_mac_82599EB: 4433 case ixgbe_mac_X540: 4434 case ixgbe_mac_X550: 4435 case ixgbe_mac_X550EM_x: 4436 case ixgbe_mac_X550EM_a: 4437 gpie |= ixgbe->capab->other_gpie; 4438 4439 /* Enable RSC Delay 8us when LRO enabled */ 4440 if (ixgbe->lro_enable) { 4441 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT); 4442 } 4443 break; 4444 4445 default: 4446 break; 4447 } 4448 4449 /* write to interrupt control registers */ 4450 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4451 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 4452 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam); 4453 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 4454 IXGBE_WRITE_FLUSH(hw); 4455 } 4456 4457 /* 4458 * ixgbe_loopback_ioctl - Loopback support. 4459 */ 4460 enum ioc_reply 4461 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 4462 { 4463 lb_info_sz_t *lbsp; 4464 lb_property_t *lbpp; 4465 uint32_t *lbmp; 4466 uint32_t size; 4467 uint32_t value; 4468 4469 if (mp->b_cont == NULL) 4470 return (IOC_INVAL); 4471 4472 switch (iocp->ioc_cmd) { 4473 default: 4474 return (IOC_INVAL); 4475 4476 case LB_GET_INFO_SIZE: 4477 size = sizeof (lb_info_sz_t); 4478 if (iocp->ioc_count != size) 4479 return (IOC_INVAL); 4480 4481 value = sizeof (lb_normal); 4482 value += sizeof (lb_mac); 4483 value += sizeof (lb_external); 4484 4485 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 4486 *lbsp = value; 4487 break; 4488 4489 case LB_GET_INFO: 4490 value = sizeof (lb_normal); 4491 value += sizeof (lb_mac); 4492 value += sizeof (lb_external); 4493 4494 size = value; 4495 if (iocp->ioc_count != size) 4496 return (IOC_INVAL); 4497 4498 value = 0; 4499 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 4500 4501 lbpp[value++] = lb_normal; 4502 lbpp[value++] = lb_mac; 4503 lbpp[value++] = lb_external; 4504 break; 4505 4506 case LB_GET_MODE: 4507 size = sizeof (uint32_t); 4508 if (iocp->ioc_count != size) 4509 return (IOC_INVAL); 4510 4511 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4512 *lbmp = ixgbe->loopback_mode; 4513 break; 4514 4515 case LB_SET_MODE: 4516 size = 0; 4517 if (iocp->ioc_count != sizeof (uint32_t)) 4518 return (IOC_INVAL); 4519 4520 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4521 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 4522 return (IOC_INVAL); 4523 break; 4524 } 4525 4526 iocp->ioc_count = size; 4527 iocp->ioc_error = 0; 4528 4529 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4530 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4531 return (IOC_INVAL); 4532 } 4533 4534 return (IOC_REPLY); 4535 } 4536 4537 /* 4538 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 4539 */ 4540 static boolean_t 4541 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 4542 { 4543 if (mode == ixgbe->loopback_mode) 4544 return (B_TRUE); 4545 4546 ixgbe->loopback_mode = mode; 4547 4548 if (mode == IXGBE_LB_NONE) { 4549 /* 4550 * Reset the chip 4551 */ 4552 (void) ixgbe_reset(ixgbe); 4553 return (B_TRUE); 4554 } 4555 4556 mutex_enter(&ixgbe->gen_lock); 4557 4558 switch (mode) { 4559 default: 4560 mutex_exit(&ixgbe->gen_lock); 4561 return (B_FALSE); 4562 4563 case IXGBE_LB_EXTERNAL: 4564 break; 4565 4566 case IXGBE_LB_INTERNAL_MAC: 4567 ixgbe_set_internal_mac_loopback(ixgbe); 4568 break; 4569 } 4570 4571 mutex_exit(&ixgbe->gen_lock); 4572 4573 return (B_TRUE); 4574 } 4575 4576 /* 4577 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 4578 */ 4579 static void 4580 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 4581 { 4582 struct ixgbe_hw *hw; 4583 uint32_t reg; 4584 uint8_t atlas; 4585 4586 hw = &ixgbe->hw; 4587 4588 /* 4589 * Setup MAC loopback 4590 */ 4591 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 4592 reg |= IXGBE_HLREG0_LPBK; 4593 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 4594 4595 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4596 reg &= ~IXGBE_AUTOC_LMS_MASK; 4597 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4598 4599 /* 4600 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 4601 */ 4602 switch (hw->mac.type) { 4603 case ixgbe_mac_82598EB: 4604 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4605 &atlas); 4606 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 4607 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4608 atlas); 4609 4610 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4611 &atlas); 4612 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 4613 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4614 atlas); 4615 4616 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4617 &atlas); 4618 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 4619 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4620 atlas); 4621 4622 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4623 &atlas); 4624 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 4625 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4626 atlas); 4627 break; 4628 4629 case ixgbe_mac_82599EB: 4630 case ixgbe_mac_X540: 4631 case ixgbe_mac_X550: 4632 case ixgbe_mac_X550EM_x: 4633 case ixgbe_mac_X550EM_a: 4634 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4635 reg |= (IXGBE_AUTOC_FLU | 4636 IXGBE_AUTOC_10G_KX4); 4637 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4638 4639 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL, 4640 false); 4641 break; 4642 4643 default: 4644 break; 4645 } 4646 } 4647 4648 /* 4649 * ixgbe_intr_rx_work - RX processing of ISR. 4650 */ 4651 static void 4652 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 4653 { 4654 mblk_t *mp; 4655 4656 mutex_enter(&rx_ring->rx_lock); 4657 4658 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4659 mutex_exit(&rx_ring->rx_lock); 4660 4661 if (mp != NULL) 4662 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4663 rx_ring->ring_gen_num); 4664 } 4665 4666 /* 4667 * ixgbe_intr_tx_work - TX processing of ISR. 4668 */ 4669 static void 4670 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 4671 { 4672 ixgbe_t *ixgbe = tx_ring->ixgbe; 4673 4674 /* 4675 * Recycle the tx descriptors 4676 */ 4677 tx_ring->tx_recycle(tx_ring); 4678 4679 /* 4680 * Schedule the re-transmit 4681 */ 4682 if (tx_ring->reschedule && 4683 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) { 4684 tx_ring->reschedule = B_FALSE; 4685 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl, 4686 tx_ring->ring_handle); 4687 tx_ring->stat_reschedule++; 4688 } 4689 } 4690 4691 /* 4692 * ixgbe_intr_other_work - Process interrupt types other than tx/rx 4693 */ 4694 static void 4695 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr) 4696 { 4697 struct ixgbe_hw *hw = &ixgbe->hw; 4698 4699 ASSERT(mutex_owned(&ixgbe->gen_lock)); 4700 4701 /* 4702 * handle link status change 4703 */ 4704 if (eicr & IXGBE_EICR_LSC) { 4705 ixgbe_driver_link_check(ixgbe); 4706 ixgbe_get_hw_state(ixgbe); 4707 } 4708 4709 /* 4710 * check for fan failure on adapters with fans 4711 */ 4712 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 4713 (eicr & IXGBE_EICR_GPI_SDP1)) { 4714 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 4715 4716 /* 4717 * Disable the adapter interrupts 4718 */ 4719 ixgbe_disable_adapter_interrupts(ixgbe); 4720 4721 /* 4722 * Disable Rx/Tx units 4723 */ 4724 (void) ixgbe_stop_adapter(&ixgbe->hw); 4725 4726 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 4727 ixgbe_error(ixgbe, 4728 "Problem: Network adapter has been stopped " 4729 "because the fan has stopped.\n"); 4730 ixgbe_error(ixgbe, 4731 "Action: Replace the adapter.\n"); 4732 4733 /* re-enable the interrupt, which was automasked */ 4734 ixgbe->eims |= IXGBE_EICR_GPI_SDP1; 4735 } 4736 4737 /* 4738 * Do SFP check for adapters with hot-plug capability 4739 */ 4740 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) && 4741 ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) || 4742 (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) { 4743 ixgbe->eicr = eicr; 4744 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq, 4745 ixgbe_sfp_check, (void *)ixgbe, 4746 DDI_NOSLEEP)) != DDI_SUCCESS) { 4747 ixgbe_log(ixgbe, "No memory available to dispatch " 4748 "taskq for SFP check"); 4749 } 4750 } 4751 4752 /* 4753 * Do over-temperature check for adapters with temp sensor 4754 */ 4755 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) && 4756 ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) || 4757 (eicr & IXGBE_EICR_LSC))) { 4758 ixgbe->eicr = eicr; 4759 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq, 4760 ixgbe_overtemp_check, (void *)ixgbe, 4761 DDI_NOSLEEP)) != DDI_SUCCESS) { 4762 ixgbe_log(ixgbe, "No memory available to dispatch " 4763 "taskq for overtemp check"); 4764 } 4765 } 4766 4767 /* 4768 * Process an external PHY interrupt 4769 */ 4770 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 4771 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 4772 ixgbe->eicr = eicr; 4773 if ((ddi_taskq_dispatch(ixgbe->phy_taskq, 4774 ixgbe_phy_check, (void *)ixgbe, 4775 DDI_NOSLEEP)) != DDI_SUCCESS) { 4776 ixgbe_log(ixgbe, "No memory available to dispatch " 4777 "taskq for PHY check"); 4778 } 4779 } 4780 } 4781 4782 /* 4783 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 4784 */ 4785 static uint_t 4786 ixgbe_intr_legacy(void *arg1, void *arg2) 4787 { 4788 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4789 struct ixgbe_hw *hw = &ixgbe->hw; 4790 ixgbe_tx_ring_t *tx_ring; 4791 ixgbe_rx_ring_t *rx_ring; 4792 uint32_t eicr; 4793 mblk_t *mp; 4794 boolean_t tx_reschedule; 4795 uint_t result; 4796 4797 _NOTE(ARGUNUSED(arg2)); 4798 4799 mutex_enter(&ixgbe->gen_lock); 4800 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 4801 mutex_exit(&ixgbe->gen_lock); 4802 return (DDI_INTR_UNCLAIMED); 4803 } 4804 4805 mp = NULL; 4806 tx_reschedule = B_FALSE; 4807 4808 /* 4809 * Any bit set in eicr: claim this interrupt 4810 */ 4811 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4812 4813 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4814 mutex_exit(&ixgbe->gen_lock); 4815 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4816 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4817 return (DDI_INTR_CLAIMED); 4818 } 4819 4820 if (eicr) { 4821 /* 4822 * For legacy interrupt, we have only one interrupt, 4823 * so we have only one rx ring and one tx ring enabled. 4824 */ 4825 ASSERT(ixgbe->num_rx_rings == 1); 4826 ASSERT(ixgbe->num_tx_rings == 1); 4827 4828 /* 4829 * For legacy interrupt, rx rings[0] will use RTxQ[0]. 4830 */ 4831 if (eicr & 0x1) { 4832 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE; 4833 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4834 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4835 /* 4836 * Clean the rx descriptors 4837 */ 4838 rx_ring = &ixgbe->rx_rings[0]; 4839 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4840 } 4841 4842 /* 4843 * For legacy interrupt, tx rings[0] will use RTxQ[1]. 4844 */ 4845 if (eicr & 0x2) { 4846 /* 4847 * Recycle the tx descriptors 4848 */ 4849 tx_ring = &ixgbe->tx_rings[0]; 4850 tx_ring->tx_recycle(tx_ring); 4851 4852 /* 4853 * Schedule the re-transmit 4854 */ 4855 tx_reschedule = (tx_ring->reschedule && 4856 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)); 4857 } 4858 4859 /* any interrupt type other than tx/rx */ 4860 if (eicr & ixgbe->capab->other_intr) { 4861 switch (hw->mac.type) { 4862 case ixgbe_mac_82598EB: 4863 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4864 break; 4865 4866 case ixgbe_mac_82599EB: 4867 case ixgbe_mac_X540: 4868 case ixgbe_mac_X550: 4869 case ixgbe_mac_X550EM_x: 4870 case ixgbe_mac_X550EM_a: 4871 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4872 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4873 break; 4874 4875 default: 4876 break; 4877 } 4878 ixgbe_intr_other_work(ixgbe, eicr); 4879 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4880 } 4881 4882 mutex_exit(&ixgbe->gen_lock); 4883 4884 result = DDI_INTR_CLAIMED; 4885 } else { 4886 mutex_exit(&ixgbe->gen_lock); 4887 4888 /* 4889 * No interrupt cause bits set: don't claim this interrupt. 4890 */ 4891 result = DDI_INTR_UNCLAIMED; 4892 } 4893 4894 /* re-enable the interrupts which were automasked */ 4895 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4896 4897 /* 4898 * Do the following work outside of the gen_lock 4899 */ 4900 if (mp != NULL) { 4901 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4902 rx_ring->ring_gen_num); 4903 } 4904 4905 if (tx_reschedule) { 4906 tx_ring->reschedule = B_FALSE; 4907 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle); 4908 tx_ring->stat_reschedule++; 4909 } 4910 4911 return (result); 4912 } 4913 4914 /* 4915 * ixgbe_intr_msi - Interrupt handler for MSI. 4916 */ 4917 static uint_t 4918 ixgbe_intr_msi(void *arg1, void *arg2) 4919 { 4920 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4921 struct ixgbe_hw *hw = &ixgbe->hw; 4922 uint32_t eicr; 4923 4924 _NOTE(ARGUNUSED(arg2)); 4925 4926 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4927 4928 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4929 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4930 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4931 return (DDI_INTR_CLAIMED); 4932 } 4933 4934 /* 4935 * For MSI interrupt, we have only one vector, 4936 * so we have only one rx ring and one tx ring enabled. 4937 */ 4938 ASSERT(ixgbe->num_rx_rings == 1); 4939 ASSERT(ixgbe->num_tx_rings == 1); 4940 4941 /* 4942 * For MSI interrupt, rx rings[0] will use RTxQ[0]. 4943 */ 4944 if (eicr & 0x1) { 4945 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 4946 } 4947 4948 /* 4949 * For MSI interrupt, tx rings[0] will use RTxQ[1]. 4950 */ 4951 if (eicr & 0x2) { 4952 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 4953 } 4954 4955 /* any interrupt type other than tx/rx */ 4956 if (eicr & ixgbe->capab->other_intr) { 4957 mutex_enter(&ixgbe->gen_lock); 4958 switch (hw->mac.type) { 4959 case ixgbe_mac_82598EB: 4960 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4961 break; 4962 4963 case ixgbe_mac_82599EB: 4964 case ixgbe_mac_X540: 4965 case ixgbe_mac_X550: 4966 case ixgbe_mac_X550EM_x: 4967 case ixgbe_mac_X550EM_a: 4968 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4969 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4970 break; 4971 4972 default: 4973 break; 4974 } 4975 ixgbe_intr_other_work(ixgbe, eicr); 4976 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4977 mutex_exit(&ixgbe->gen_lock); 4978 } 4979 4980 /* re-enable the interrupts which were automasked */ 4981 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4982 4983 return (DDI_INTR_CLAIMED); 4984 } 4985 4986 /* 4987 * ixgbe_intr_msix - Interrupt handler for MSI-X. 4988 */ 4989 static uint_t 4990 ixgbe_intr_msix(void *arg1, void *arg2) 4991 { 4992 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1; 4993 ixgbe_t *ixgbe = vect->ixgbe; 4994 struct ixgbe_hw *hw = &ixgbe->hw; 4995 uint32_t eicr; 4996 int r_idx = 0; 4997 4998 _NOTE(ARGUNUSED(arg2)); 4999 5000 /* 5001 * Clean each rx ring that has its bit set in the map 5002 */ 5003 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 5004 while (r_idx >= 0) { 5005 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 5006 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 5007 (ixgbe->num_rx_rings - 1)); 5008 } 5009 5010 /* 5011 * Clean each tx ring that has its bit set in the map 5012 */ 5013 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1)); 5014 while (r_idx >= 0) { 5015 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]); 5016 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 5017 (ixgbe->num_tx_rings - 1)); 5018 } 5019 5020 5021 /* 5022 * Clean other interrupt (link change) that has its bit set in the map 5023 */ 5024 if (BT_TEST(vect->other_map, 0) == 1) { 5025 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 5026 5027 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != 5028 DDI_FM_OK) { 5029 ddi_fm_service_impact(ixgbe->dip, 5030 DDI_SERVICE_DEGRADED); 5031 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 5032 return (DDI_INTR_CLAIMED); 5033 } 5034 5035 /* 5036 * Check "other" cause bits: any interrupt type other than tx/rx 5037 */ 5038 if (eicr & ixgbe->capab->other_intr) { 5039 mutex_enter(&ixgbe->gen_lock); 5040 switch (hw->mac.type) { 5041 case ixgbe_mac_82598EB: 5042 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 5043 ixgbe_intr_other_work(ixgbe, eicr); 5044 break; 5045 5046 case ixgbe_mac_82599EB: 5047 case ixgbe_mac_X540: 5048 case ixgbe_mac_X550: 5049 case ixgbe_mac_X550EM_x: 5050 case ixgbe_mac_X550EM_a: 5051 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 5052 ixgbe_intr_other_work(ixgbe, eicr); 5053 break; 5054 5055 default: 5056 break; 5057 } 5058 mutex_exit(&ixgbe->gen_lock); 5059 } 5060 5061 /* re-enable the interrupts which were automasked */ 5062 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 5063 } 5064 5065 return (DDI_INTR_CLAIMED); 5066 } 5067 5068 /* 5069 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 5070 * 5071 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 5072 * if not successful, try Legacy. 5073 * ixgbe->intr_force can be used to force sequence to start with 5074 * any of the 3 types. 5075 * If MSI-X is not used, number of tx/rx rings is forced to 1. 5076 */ 5077 static int 5078 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 5079 { 5080 dev_info_t *devinfo; 5081 int intr_types; 5082 int rc; 5083 5084 devinfo = ixgbe->dip; 5085 5086 /* 5087 * Get supported interrupt types 5088 */ 5089 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 5090 5091 if (rc != DDI_SUCCESS) { 5092 ixgbe_log(ixgbe, 5093 "Get supported interrupt types failed: %d", rc); 5094 return (IXGBE_FAILURE); 5095 } 5096 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 5097 5098 ixgbe->intr_type = 0; 5099 5100 /* 5101 * Install MSI-X interrupts 5102 */ 5103 if ((intr_types & DDI_INTR_TYPE_MSIX) && 5104 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 5105 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 5106 if (rc == IXGBE_SUCCESS) 5107 return (IXGBE_SUCCESS); 5108 5109 ixgbe_log(ixgbe, 5110 "Allocate MSI-X failed, trying MSI interrupts..."); 5111 } 5112 5113 /* 5114 * MSI-X not used, force rings and groups to 1 5115 */ 5116 ixgbe->num_rx_rings = 1; 5117 ixgbe->num_rx_groups = 1; 5118 ixgbe->num_tx_rings = 1; 5119 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 5120 ixgbe_log(ixgbe, 5121 "MSI-X not used, force rings and groups number to 1"); 5122 5123 /* 5124 * Install MSI interrupts 5125 */ 5126 if ((intr_types & DDI_INTR_TYPE_MSI) && 5127 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 5128 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 5129 if (rc == IXGBE_SUCCESS) 5130 return (IXGBE_SUCCESS); 5131 5132 ixgbe_log(ixgbe, 5133 "Allocate MSI failed, trying Legacy interrupts..."); 5134 } 5135 5136 /* 5137 * Install legacy interrupts 5138 */ 5139 if (intr_types & DDI_INTR_TYPE_FIXED) { 5140 /* 5141 * Disallow legacy interrupts for X550. X550 has a silicon 5142 * bug which prevents Shared Legacy interrupts from working. 5143 * For details, please reference: 5144 * 5145 * Intel Ethernet Controller X550 Specification Update rev. 2.1 5146 * May 2016, erratum 22: PCIe Interrupt Status Bit 5147 */ 5148 if (ixgbe->hw.mac.type == ixgbe_mac_X550 || 5149 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x || 5150 ixgbe->hw.mac.type == ixgbe_mac_X550EM_a || 5151 ixgbe->hw.mac.type == ixgbe_mac_X550_vf || 5152 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x_vf || 5153 ixgbe->hw.mac.type == ixgbe_mac_X550EM_a_vf) { 5154 ixgbe_log(ixgbe, 5155 "Legacy interrupts are not supported on this " 5156 "adapter. Please use MSI or MSI-X instead."); 5157 return (IXGBE_FAILURE); 5158 } 5159 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 5160 if (rc == IXGBE_SUCCESS) 5161 return (IXGBE_SUCCESS); 5162 5163 ixgbe_log(ixgbe, 5164 "Allocate Legacy interrupts failed"); 5165 } 5166 5167 /* 5168 * If none of the 3 types succeeded, return failure 5169 */ 5170 return (IXGBE_FAILURE); 5171 } 5172 5173 /* 5174 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 5175 * 5176 * For legacy and MSI, only 1 handle is needed. For MSI-X, 5177 * if fewer than 2 handles are available, return failure. 5178 * Upon success, this maps the vectors to rx and tx rings for 5179 * interrupts. 5180 */ 5181 static int 5182 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 5183 { 5184 dev_info_t *devinfo; 5185 int request, count, actual; 5186 int minimum; 5187 int rc; 5188 uint32_t ring_per_group; 5189 5190 devinfo = ixgbe->dip; 5191 5192 switch (intr_type) { 5193 case DDI_INTR_TYPE_FIXED: 5194 request = 1; /* Request 1 legacy interrupt handle */ 5195 minimum = 1; 5196 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 5197 break; 5198 5199 case DDI_INTR_TYPE_MSI: 5200 request = 1; /* Request 1 MSI interrupt handle */ 5201 minimum = 1; 5202 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 5203 break; 5204 5205 case DDI_INTR_TYPE_MSIX: 5206 /* 5207 * Best number of vectors for the adapter is 5208 * (# rx rings + # tx rings), however we will 5209 * limit the request number. 5210 */ 5211 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings); 5212 if (request > ixgbe->capab->max_ring_vect) 5213 request = ixgbe->capab->max_ring_vect; 5214 minimum = 1; 5215 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 5216 break; 5217 5218 default: 5219 ixgbe_log(ixgbe, 5220 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 5221 intr_type); 5222 return (IXGBE_FAILURE); 5223 } 5224 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 5225 request, minimum); 5226 5227 /* 5228 * Get number of supported interrupts 5229 */ 5230 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 5231 if ((rc != DDI_SUCCESS) || (count < minimum)) { 5232 ixgbe_log(ixgbe, 5233 "Get interrupt number failed. Return: %d, count: %d", 5234 rc, count); 5235 return (IXGBE_FAILURE); 5236 } 5237 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 5238 5239 actual = 0; 5240 ixgbe->intr_cnt = 0; 5241 ixgbe->intr_cnt_max = 0; 5242 ixgbe->intr_cnt_min = 0; 5243 5244 /* 5245 * Allocate an array of interrupt handles 5246 */ 5247 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 5248 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 5249 5250 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 5251 request, &actual, DDI_INTR_ALLOC_NORMAL); 5252 if (rc != DDI_SUCCESS) { 5253 ixgbe_log(ixgbe, "Allocate interrupts failed. " 5254 "return: %d, request: %d, actual: %d", 5255 rc, request, actual); 5256 goto alloc_handle_fail; 5257 } 5258 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 5259 5260 /* 5261 * upper/lower limit of interrupts 5262 */ 5263 ixgbe->intr_cnt = actual; 5264 ixgbe->intr_cnt_max = request; 5265 ixgbe->intr_cnt_min = minimum; 5266 5267 /* 5268 * rss number per group should not exceed the rx interrupt number, 5269 * else need to adjust rx ring number. 5270 */ 5271 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5272 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0); 5273 if (actual < ring_per_group) { 5274 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual; 5275 ixgbe_setup_vmdq_rss_conf(ixgbe); 5276 } 5277 5278 /* 5279 * Now we know the actual number of vectors. Here we map the vector 5280 * to other, rx rings and tx ring. 5281 */ 5282 if (actual < minimum) { 5283 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 5284 actual); 5285 goto alloc_handle_fail; 5286 } 5287 5288 /* 5289 * Get priority for first vector, assume remaining are all the same 5290 */ 5291 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 5292 if (rc != DDI_SUCCESS) { 5293 ixgbe_log(ixgbe, 5294 "Get interrupt priority failed: %d", rc); 5295 goto alloc_handle_fail; 5296 } 5297 5298 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 5299 if (rc != DDI_SUCCESS) { 5300 ixgbe_log(ixgbe, 5301 "Get interrupt cap failed: %d", rc); 5302 goto alloc_handle_fail; 5303 } 5304 5305 ixgbe->intr_type = intr_type; 5306 5307 return (IXGBE_SUCCESS); 5308 5309 alloc_handle_fail: 5310 ixgbe_rem_intrs(ixgbe); 5311 5312 return (IXGBE_FAILURE); 5313 } 5314 5315 /* 5316 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 5317 * 5318 * Before adding the interrupt handlers, the interrupt vectors have 5319 * been allocated, and the rx/tx rings have also been allocated. 5320 */ 5321 static int 5322 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 5323 { 5324 int vector = 0; 5325 int rc; 5326 5327 switch (ixgbe->intr_type) { 5328 case DDI_INTR_TYPE_MSIX: 5329 /* 5330 * Add interrupt handler for all vectors 5331 */ 5332 for (vector = 0; vector < ixgbe->intr_cnt; vector++) { 5333 /* 5334 * install pointer to vect_map[vector] 5335 */ 5336 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5337 (ddi_intr_handler_t *)ixgbe_intr_msix, 5338 (void *)&ixgbe->vect_map[vector], NULL); 5339 5340 if (rc != DDI_SUCCESS) { 5341 ixgbe_log(ixgbe, 5342 "Add interrupt handler failed. " 5343 "return: %d, vector: %d", rc, vector); 5344 for (vector--; vector >= 0; vector--) { 5345 (void) ddi_intr_remove_handler( 5346 ixgbe->htable[vector]); 5347 } 5348 return (IXGBE_FAILURE); 5349 } 5350 } 5351 5352 break; 5353 5354 case DDI_INTR_TYPE_MSI: 5355 /* 5356 * Add interrupt handlers for the only vector 5357 */ 5358 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5359 (ddi_intr_handler_t *)ixgbe_intr_msi, 5360 (void *)ixgbe, NULL); 5361 5362 if (rc != DDI_SUCCESS) { 5363 ixgbe_log(ixgbe, 5364 "Add MSI interrupt handler failed: %d", rc); 5365 return (IXGBE_FAILURE); 5366 } 5367 5368 break; 5369 5370 case DDI_INTR_TYPE_FIXED: 5371 /* 5372 * Add interrupt handlers for the only vector 5373 */ 5374 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5375 (ddi_intr_handler_t *)ixgbe_intr_legacy, 5376 (void *)ixgbe, NULL); 5377 5378 if (rc != DDI_SUCCESS) { 5379 ixgbe_log(ixgbe, 5380 "Add legacy interrupt handler failed: %d", rc); 5381 return (IXGBE_FAILURE); 5382 } 5383 5384 break; 5385 5386 default: 5387 return (IXGBE_FAILURE); 5388 } 5389 5390 return (IXGBE_SUCCESS); 5391 } 5392 5393 /* 5394 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 5395 */ 5396 static void 5397 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 5398 { 5399 /* 5400 * Set bit in map 5401 */ 5402 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 5403 5404 /* 5405 * Count bits set 5406 */ 5407 ixgbe->vect_map[v_idx].rxr_cnt++; 5408 5409 /* 5410 * Remember bit position 5411 */ 5412 ixgbe->rx_rings[r_idx].intr_vector = v_idx; 5413 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 5414 } 5415 5416 /* 5417 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 5418 */ 5419 static void 5420 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 5421 { 5422 /* 5423 * Set bit in map 5424 */ 5425 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 5426 5427 /* 5428 * Count bits set 5429 */ 5430 ixgbe->vect_map[v_idx].txr_cnt++; 5431 5432 /* 5433 * Remember bit position 5434 */ 5435 ixgbe->tx_rings[t_idx].intr_vector = v_idx; 5436 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 5437 } 5438 5439 /* 5440 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector 5441 * allocation register (IVAR). 5442 * cause: 5443 * -1 : other cause 5444 * 0 : rx 5445 * 1 : tx 5446 */ 5447 static void 5448 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector, 5449 int8_t cause) 5450 { 5451 struct ixgbe_hw *hw = &ixgbe->hw; 5452 u32 ivar, index; 5453 5454 switch (hw->mac.type) { 5455 case ixgbe_mac_82598EB: 5456 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5457 if (cause == -1) { 5458 cause = 0; 5459 } 5460 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5461 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5462 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3))); 5463 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3))); 5464 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5465 break; 5466 5467 case ixgbe_mac_82599EB: 5468 case ixgbe_mac_X540: 5469 case ixgbe_mac_X550: 5470 case ixgbe_mac_X550EM_x: 5471 case ixgbe_mac_X550EM_a: 5472 if (cause == -1) { 5473 /* other causes */ 5474 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5475 index = (intr_alloc_entry & 1) * 8; 5476 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5477 ivar &= ~(0xFF << index); 5478 ivar |= (msix_vector << index); 5479 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5480 } else { 5481 /* tx or rx causes */ 5482 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5483 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5484 ivar = IXGBE_READ_REG(hw, 5485 IXGBE_IVAR(intr_alloc_entry >> 1)); 5486 ivar &= ~(0xFF << index); 5487 ivar |= (msix_vector << index); 5488 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5489 ivar); 5490 } 5491 break; 5492 5493 default: 5494 break; 5495 } 5496 } 5497 5498 /* 5499 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of 5500 * given interrupt vector allocation register (IVAR). 5501 * cause: 5502 * -1 : other cause 5503 * 0 : rx 5504 * 1 : tx 5505 */ 5506 static void 5507 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5508 { 5509 struct ixgbe_hw *hw = &ixgbe->hw; 5510 u32 ivar, index; 5511 5512 switch (hw->mac.type) { 5513 case ixgbe_mac_82598EB: 5514 if (cause == -1) { 5515 cause = 0; 5516 } 5517 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5518 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5519 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 * 5520 (intr_alloc_entry & 0x3))); 5521 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5522 break; 5523 5524 case ixgbe_mac_82599EB: 5525 case ixgbe_mac_X540: 5526 case ixgbe_mac_X550: 5527 case ixgbe_mac_X550EM_x: 5528 case ixgbe_mac_X550EM_a: 5529 if (cause == -1) { 5530 /* other causes */ 5531 index = (intr_alloc_entry & 1) * 8; 5532 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5533 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5534 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5535 } else { 5536 /* tx or rx causes */ 5537 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5538 ivar = IXGBE_READ_REG(hw, 5539 IXGBE_IVAR(intr_alloc_entry >> 1)); 5540 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5541 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5542 ivar); 5543 } 5544 break; 5545 5546 default: 5547 break; 5548 } 5549 } 5550 5551 /* 5552 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of 5553 * given interrupt vector allocation register (IVAR). 5554 * cause: 5555 * -1 : other cause 5556 * 0 : rx 5557 * 1 : tx 5558 */ 5559 static void 5560 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5561 { 5562 struct ixgbe_hw *hw = &ixgbe->hw; 5563 u32 ivar, index; 5564 5565 switch (hw->mac.type) { 5566 case ixgbe_mac_82598EB: 5567 if (cause == -1) { 5568 cause = 0; 5569 } 5570 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5571 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5572 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 * 5573 (intr_alloc_entry & 0x3))); 5574 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5575 break; 5576 5577 case ixgbe_mac_82599EB: 5578 case ixgbe_mac_X540: 5579 case ixgbe_mac_X550: 5580 case ixgbe_mac_X550EM_x: 5581 case ixgbe_mac_X550EM_a: 5582 if (cause == -1) { 5583 /* other causes */ 5584 index = (intr_alloc_entry & 1) * 8; 5585 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5586 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5587 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5588 } else { 5589 /* tx or rx causes */ 5590 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5591 ivar = IXGBE_READ_REG(hw, 5592 IXGBE_IVAR(intr_alloc_entry >> 1)); 5593 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5594 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5595 ivar); 5596 } 5597 break; 5598 5599 default: 5600 break; 5601 } 5602 } 5603 5604 /* 5605 * Convert the rx ring index driver maintained to the rx ring index 5606 * in h/w. 5607 */ 5608 static uint32_t 5609 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index) 5610 { 5611 5612 struct ixgbe_hw *hw = &ixgbe->hw; 5613 uint32_t rx_ring_per_group, hw_rx_index; 5614 5615 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS || 5616 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) { 5617 return (sw_rx_index); 5618 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) { 5619 switch (hw->mac.type) { 5620 case ixgbe_mac_82598EB: 5621 return (sw_rx_index); 5622 5623 case ixgbe_mac_82599EB: 5624 case ixgbe_mac_X540: 5625 case ixgbe_mac_X550: 5626 case ixgbe_mac_X550EM_x: 5627 case ixgbe_mac_X550EM_a: 5628 return (sw_rx_index * 2); 5629 5630 default: 5631 break; 5632 } 5633 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) { 5634 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5635 5636 switch (hw->mac.type) { 5637 case ixgbe_mac_82598EB: 5638 hw_rx_index = (sw_rx_index / rx_ring_per_group) * 5639 16 + (sw_rx_index % rx_ring_per_group); 5640 return (hw_rx_index); 5641 5642 case ixgbe_mac_82599EB: 5643 case ixgbe_mac_X540: 5644 case ixgbe_mac_X550: 5645 case ixgbe_mac_X550EM_x: 5646 case ixgbe_mac_X550EM_a: 5647 if (ixgbe->num_rx_groups > 32) { 5648 hw_rx_index = (sw_rx_index / 5649 rx_ring_per_group) * 2 + 5650 (sw_rx_index % rx_ring_per_group); 5651 } else { 5652 hw_rx_index = (sw_rx_index / 5653 rx_ring_per_group) * 4 + 5654 (sw_rx_index % rx_ring_per_group); 5655 } 5656 return (hw_rx_index); 5657 5658 default: 5659 break; 5660 } 5661 } 5662 5663 /* 5664 * Should never reach. Just to make compiler happy. 5665 */ 5666 return (sw_rx_index); 5667 } 5668 5669 /* 5670 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors. 5671 * 5672 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt 5673 * to vector[0 - (intr_cnt -1)]. 5674 */ 5675 static int 5676 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe) 5677 { 5678 int i, vector = 0; 5679 5680 /* initialize vector map */ 5681 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 5682 for (i = 0; i < ixgbe->intr_cnt; i++) { 5683 ixgbe->vect_map[i].ixgbe = ixgbe; 5684 } 5685 5686 /* 5687 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0], 5688 * tx rings[0] on RTxQ[1]. 5689 */ 5690 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5691 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 5692 ixgbe_map_txring_to_vector(ixgbe, 0, 1); 5693 return (IXGBE_SUCCESS); 5694 } 5695 5696 /* 5697 * Interrupts/vectors mapping for MSI-X 5698 */ 5699 5700 /* 5701 * Map other interrupt to vector 0, 5702 * Set bit in map and count the bits set. 5703 */ 5704 BT_SET(ixgbe->vect_map[vector].other_map, 0); 5705 ixgbe->vect_map[vector].other_cnt++; 5706 5707 /* 5708 * Map rx ring interrupts to vectors 5709 */ 5710 for (i = 0; i < ixgbe->num_rx_rings; i++) { 5711 ixgbe_map_rxring_to_vector(ixgbe, i, vector); 5712 vector = (vector +1) % ixgbe->intr_cnt; 5713 } 5714 5715 /* 5716 * Map tx ring interrupts to vectors 5717 */ 5718 for (i = 0; i < ixgbe->num_tx_rings; i++) { 5719 ixgbe_map_txring_to_vector(ixgbe, i, vector); 5720 vector = (vector +1) % ixgbe->intr_cnt; 5721 } 5722 5723 return (IXGBE_SUCCESS); 5724 } 5725 5726 /* 5727 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 5728 * 5729 * This relies on ring/vector mapping already set up in the 5730 * vect_map[] structures 5731 */ 5732 static void 5733 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 5734 { 5735 struct ixgbe_hw *hw = &ixgbe->hw; 5736 ixgbe_intr_vector_t *vect; /* vector bitmap */ 5737 int r_idx; /* ring index */ 5738 int v_idx; /* vector index */ 5739 uint32_t hw_index; 5740 5741 /* 5742 * Clear any previous entries 5743 */ 5744 switch (hw->mac.type) { 5745 case ixgbe_mac_82598EB: 5746 for (v_idx = 0; v_idx < 25; v_idx++) 5747 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5748 break; 5749 5750 case ixgbe_mac_82599EB: 5751 case ixgbe_mac_X540: 5752 case ixgbe_mac_X550: 5753 case ixgbe_mac_X550EM_x: 5754 case ixgbe_mac_X550EM_a: 5755 for (v_idx = 0; v_idx < 64; v_idx++) 5756 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5757 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0); 5758 break; 5759 5760 default: 5761 break; 5762 } 5763 5764 /* 5765 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and 5766 * tx rings[0] will use RTxQ[1]. 5767 */ 5768 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5769 ixgbe_setup_ivar(ixgbe, 0, 0, 0); 5770 ixgbe_setup_ivar(ixgbe, 0, 1, 1); 5771 return; 5772 } 5773 5774 /* 5775 * For MSI-X interrupt, "Other" is always on vector[0]. 5776 */ 5777 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1); 5778 5779 /* 5780 * For each interrupt vector, populate the IVAR table 5781 */ 5782 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 5783 vect = &ixgbe->vect_map[v_idx]; 5784 5785 /* 5786 * For each rx ring bit set 5787 */ 5788 r_idx = bt_getlowbit(vect->rx_map, 0, 5789 (ixgbe->num_rx_rings - 1)); 5790 5791 while (r_idx >= 0) { 5792 hw_index = ixgbe->rx_rings[r_idx].hw_index; 5793 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0); 5794 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 5795 (ixgbe->num_rx_rings - 1)); 5796 } 5797 5798 /* 5799 * For each tx ring bit set 5800 */ 5801 r_idx = bt_getlowbit(vect->tx_map, 0, 5802 (ixgbe->num_tx_rings - 1)); 5803 5804 while (r_idx >= 0) { 5805 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1); 5806 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 5807 (ixgbe->num_tx_rings - 1)); 5808 } 5809 } 5810 } 5811 5812 /* 5813 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 5814 */ 5815 static void 5816 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 5817 { 5818 int i; 5819 int rc; 5820 5821 for (i = 0; i < ixgbe->intr_cnt; i++) { 5822 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 5823 if (rc != DDI_SUCCESS) { 5824 IXGBE_DEBUGLOG_1(ixgbe, 5825 "Remove intr handler failed: %d", rc); 5826 } 5827 } 5828 } 5829 5830 /* 5831 * ixgbe_rem_intrs - Remove the allocated interrupts. 5832 */ 5833 static void 5834 ixgbe_rem_intrs(ixgbe_t *ixgbe) 5835 { 5836 int i; 5837 int rc; 5838 5839 for (i = 0; i < ixgbe->intr_cnt; i++) { 5840 rc = ddi_intr_free(ixgbe->htable[i]); 5841 if (rc != DDI_SUCCESS) { 5842 IXGBE_DEBUGLOG_1(ixgbe, 5843 "Free intr failed: %d", rc); 5844 } 5845 } 5846 5847 kmem_free(ixgbe->htable, ixgbe->intr_size); 5848 ixgbe->htable = NULL; 5849 } 5850 5851 /* 5852 * ixgbe_enable_intrs - Enable all the ddi interrupts. 5853 */ 5854 static int 5855 ixgbe_enable_intrs(ixgbe_t *ixgbe) 5856 { 5857 int i; 5858 int rc; 5859 5860 /* 5861 * Enable interrupts 5862 */ 5863 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5864 /* 5865 * Call ddi_intr_block_enable() for MSI 5866 */ 5867 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 5868 if (rc != DDI_SUCCESS) { 5869 ixgbe_log(ixgbe, 5870 "Enable block intr failed: %d", rc); 5871 return (IXGBE_FAILURE); 5872 } 5873 } else { 5874 /* 5875 * Call ddi_intr_enable() for Legacy/MSI non block enable 5876 */ 5877 for (i = 0; i < ixgbe->intr_cnt; i++) { 5878 rc = ddi_intr_enable(ixgbe->htable[i]); 5879 if (rc != DDI_SUCCESS) { 5880 ixgbe_log(ixgbe, 5881 "Enable intr failed: %d", rc); 5882 return (IXGBE_FAILURE); 5883 } 5884 } 5885 } 5886 5887 return (IXGBE_SUCCESS); 5888 } 5889 5890 /* 5891 * ixgbe_disable_intrs - Disable all the interrupts. 5892 */ 5893 static int 5894 ixgbe_disable_intrs(ixgbe_t *ixgbe) 5895 { 5896 int i; 5897 int rc; 5898 5899 /* 5900 * Disable all interrupts 5901 */ 5902 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5903 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 5904 if (rc != DDI_SUCCESS) { 5905 ixgbe_log(ixgbe, 5906 "Disable block intr failed: %d", rc); 5907 return (IXGBE_FAILURE); 5908 } 5909 } else { 5910 for (i = 0; i < ixgbe->intr_cnt; i++) { 5911 rc = ddi_intr_disable(ixgbe->htable[i]); 5912 if (rc != DDI_SUCCESS) { 5913 ixgbe_log(ixgbe, 5914 "Disable intr failed: %d", rc); 5915 return (IXGBE_FAILURE); 5916 } 5917 } 5918 } 5919 5920 return (IXGBE_SUCCESS); 5921 } 5922 5923 /* 5924 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 5925 */ 5926 static void 5927 ixgbe_get_hw_state(ixgbe_t *ixgbe) 5928 { 5929 struct ixgbe_hw *hw = &ixgbe->hw; 5930 ixgbe_link_speed speed = 0; 5931 bool link_up = false; 5932 uint32_t pcs1g_anlp = 0; 5933 5934 ASSERT(mutex_owned(&ixgbe->gen_lock)); 5935 ixgbe->param_lp_1000fdx_cap = 0; 5936 ixgbe->param_lp_100fdx_cap = 0; 5937 5938 /* check for link, don't wait */ 5939 (void) ixgbe_check_link(hw, &speed, &link_up, false); 5940 ixgbe->phys_supported = ixgbe_get_supported_physical_layer(hw); 5941 5942 /* 5943 * Update the observed Link Partner's capabilities. Not all adapters 5944 * can provide full information on the LP's capable speeds, so we 5945 * provide what we can. 5946 */ 5947 if (link_up) { 5948 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 5949 5950 ixgbe->param_lp_1000fdx_cap = 5951 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5952 ixgbe->param_lp_100fdx_cap = 5953 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5954 } 5955 5956 /* 5957 * Update GLD's notion of the adapter's currently advertised speeds. 5958 * Since the common code doesn't always record the current autonegotiate 5959 * settings in the phy struct for all parts (specifically, adapters with 5960 * SFPs) we first test to see if it is 0, and if so, we fall back to 5961 * using the adapter's speed capabilities which we saved during instance 5962 * init in ixgbe_init_params(). 5963 * 5964 * Adapters with SFPs will always be shown as advertising all of their 5965 * supported speeds, and adapters with baseT PHYs (where the phy struct 5966 * is maintained by the common code) will always have a factual view of 5967 * their currently-advertised speeds. In the case of SFPs, this is 5968 * acceptable as we default to advertising all speeds that the adapter 5969 * claims to support, and those properties are immutable; unlike on 5970 * baseT (copper) PHYs, where speeds can be enabled or disabled at will. 5971 */ 5972 speed = hw->phy.autoneg_advertised; 5973 if (speed == 0) 5974 speed = ixgbe->speeds_supported; 5975 5976 ixgbe->param_adv_10000fdx_cap = 5977 (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 1 : 0; 5978 ixgbe->param_adv_5000fdx_cap = 5979 (speed & IXGBE_LINK_SPEED_5GB_FULL) ? 1 : 0; 5980 ixgbe->param_adv_2500fdx_cap = 5981 (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 1 : 0; 5982 ixgbe->param_adv_1000fdx_cap = 5983 (speed & IXGBE_LINK_SPEED_1GB_FULL) ? 1 : 0; 5984 ixgbe->param_adv_100fdx_cap = 5985 (speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0; 5986 } 5987 5988 /* 5989 * ixgbe_get_driver_control - Notify that driver is in control of device. 5990 */ 5991 static void 5992 ixgbe_get_driver_control(struct ixgbe_hw *hw) 5993 { 5994 uint32_t ctrl_ext; 5995 5996 /* 5997 * Notify firmware that driver is in control of device 5998 */ 5999 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 6000 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 6001 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 6002 } 6003 6004 /* 6005 * ixgbe_release_driver_control - Notify that driver is no longer in control 6006 * of device. 6007 */ 6008 static void 6009 ixgbe_release_driver_control(struct ixgbe_hw *hw) 6010 { 6011 uint32_t ctrl_ext; 6012 6013 /* 6014 * Notify firmware that driver is no longer in control of device 6015 */ 6016 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 6017 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 6018 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 6019 } 6020 6021 /* 6022 * ixgbe_atomic_reserve - Atomic decrease operation. 6023 */ 6024 int 6025 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 6026 { 6027 uint32_t oldval; 6028 uint32_t newval; 6029 6030 /* 6031 * ATOMICALLY 6032 */ 6033 do { 6034 oldval = *count_p; 6035 if (oldval < n) 6036 return (-1); 6037 newval = oldval - n; 6038 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 6039 6040 return (newval); 6041 } 6042 6043 /* 6044 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 6045 */ 6046 static uint8_t * 6047 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 6048 { 6049 uint8_t *addr = *upd_ptr; 6050 uint8_t *new_ptr; 6051 6052 _NOTE(ARGUNUSED(hw)); 6053 _NOTE(ARGUNUSED(vmdq)); 6054 6055 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 6056 *upd_ptr = new_ptr; 6057 return (addr); 6058 } 6059 6060 /* 6061 * FMA support 6062 */ 6063 int 6064 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 6065 { 6066 ddi_fm_error_t de; 6067 6068 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 6069 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 6070 return (de.fme_status); 6071 } 6072 6073 int 6074 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 6075 { 6076 ddi_fm_error_t de; 6077 6078 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 6079 return (de.fme_status); 6080 } 6081 6082 /* 6083 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 6084 */ 6085 static int 6086 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6087 { 6088 _NOTE(ARGUNUSED(impl_data)); 6089 /* 6090 * as the driver can always deal with an error in any dma or 6091 * access handle, we can just return the fme_status value. 6092 */ 6093 pci_ereport_post(dip, err, NULL); 6094 return (err->fme_status); 6095 } 6096 6097 static void 6098 ixgbe_fm_init(ixgbe_t *ixgbe) 6099 { 6100 ddi_iblock_cookie_t iblk; 6101 int fma_dma_flag; 6102 6103 /* 6104 * Only register with IO Fault Services if we have some capability 6105 */ 6106 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 6107 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 6108 } else { 6109 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 6110 } 6111 6112 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 6113 fma_dma_flag = 1; 6114 } else { 6115 fma_dma_flag = 0; 6116 } 6117 6118 ixgbe_set_fma_flags(fma_dma_flag); 6119 6120 if (ixgbe->fm_capabilities) { 6121 6122 /* 6123 * Register capabilities with IO Fault Services 6124 */ 6125 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 6126 6127 /* 6128 * Initialize pci ereport capabilities if ereport capable 6129 */ 6130 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 6131 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 6132 pci_ereport_setup(ixgbe->dip); 6133 6134 /* 6135 * Register error callback if error callback capable 6136 */ 6137 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 6138 ddi_fm_handler_register(ixgbe->dip, 6139 ixgbe_fm_error_cb, (void*) ixgbe); 6140 } 6141 } 6142 6143 static void 6144 ixgbe_fm_fini(ixgbe_t *ixgbe) 6145 { 6146 /* 6147 * Only unregister FMA capabilities if they are registered 6148 */ 6149 if (ixgbe->fm_capabilities) { 6150 6151 /* 6152 * Release any resources allocated by pci_ereport_setup() 6153 */ 6154 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 6155 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 6156 pci_ereport_teardown(ixgbe->dip); 6157 6158 /* 6159 * Un-register error callback if error callback capable 6160 */ 6161 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 6162 ddi_fm_handler_unregister(ixgbe->dip); 6163 6164 /* 6165 * Unregister from IO Fault Service 6166 */ 6167 ddi_fm_fini(ixgbe->dip); 6168 } 6169 } 6170 6171 void 6172 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 6173 { 6174 uint64_t ena; 6175 char buf[FM_MAX_CLASS]; 6176 6177 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6178 ena = fm_ena_generate(0, FM_ENA_FMT1); 6179 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 6180 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 6181 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6182 } 6183 } 6184 6185 static int 6186 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 6187 { 6188 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh; 6189 6190 mutex_enter(&rx_ring->rx_lock); 6191 rx_ring->ring_gen_num = mr_gen_num; 6192 mutex_exit(&rx_ring->rx_lock); 6193 return (0); 6194 } 6195 6196 /* 6197 * Get the global ring index by a ring index within a group. 6198 */ 6199 static int 6200 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex) 6201 { 6202 ixgbe_rx_ring_t *rx_ring; 6203 int i; 6204 6205 for (i = 0; i < ixgbe->num_rx_rings; i++) { 6206 rx_ring = &ixgbe->rx_rings[i]; 6207 if (rx_ring->group_index == gindex) 6208 rindex--; 6209 if (rindex < 0) 6210 return (i); 6211 } 6212 6213 return (-1); 6214 } 6215 6216 /* 6217 * Callback funtion for MAC layer to register all rings. 6218 */ 6219 /* ARGSUSED */ 6220 void 6221 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index, 6222 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 6223 { 6224 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6225 mac_intr_t *mintr = &infop->mri_intr; 6226 6227 switch (rtype) { 6228 case MAC_RING_TYPE_RX: { 6229 /* 6230 * 'index' is the ring index within the group. 6231 * Need to get the global ring index by searching in groups. 6232 */ 6233 int global_ring_index = ixgbe_get_rx_ring_index( 6234 ixgbe, group_index, ring_index); 6235 6236 ASSERT(global_ring_index >= 0); 6237 6238 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index]; 6239 rx_ring->ring_handle = rh; 6240 6241 infop->mri_driver = (mac_ring_driver_t)rx_ring; 6242 infop->mri_start = ixgbe_ring_start; 6243 infop->mri_stop = NULL; 6244 infop->mri_poll = ixgbe_ring_rx_poll; 6245 infop->mri_stat = ixgbe_rx_ring_stat; 6246 6247 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 6248 mintr->mi_enable = ixgbe_rx_ring_intr_enable; 6249 mintr->mi_disable = ixgbe_rx_ring_intr_disable; 6250 if (ixgbe->intr_type & 6251 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6252 mintr->mi_ddi_handle = 6253 ixgbe->htable[rx_ring->intr_vector]; 6254 } 6255 6256 break; 6257 } 6258 case MAC_RING_TYPE_TX: { 6259 ASSERT(group_index == -1); 6260 ASSERT(ring_index < ixgbe->num_tx_rings); 6261 6262 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index]; 6263 tx_ring->ring_handle = rh; 6264 6265 infop->mri_driver = (mac_ring_driver_t)tx_ring; 6266 infop->mri_start = NULL; 6267 infop->mri_stop = NULL; 6268 infop->mri_tx = ixgbe_ring_tx; 6269 infop->mri_stat = ixgbe_tx_ring_stat; 6270 if (ixgbe->intr_type & 6271 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6272 mintr->mi_ddi_handle = 6273 ixgbe->htable[tx_ring->intr_vector]; 6274 } 6275 break; 6276 } 6277 default: 6278 break; 6279 } 6280 } 6281 6282 /* 6283 * Callback funtion for MAC layer to register all groups. 6284 */ 6285 void 6286 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index, 6287 mac_group_info_t *infop, mac_group_handle_t gh) 6288 { 6289 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6290 struct ixgbe_hw *hw = &ixgbe->hw; 6291 6292 switch (rtype) { 6293 case MAC_RING_TYPE_RX: { 6294 ixgbe_rx_group_t *rx_group; 6295 6296 rx_group = &ixgbe->rx_groups[index]; 6297 rx_group->group_handle = gh; 6298 6299 infop->mgi_driver = (mac_group_driver_t)rx_group; 6300 infop->mgi_start = NULL; 6301 infop->mgi_stop = NULL; 6302 infop->mgi_addmac = ixgbe_addmac; 6303 infop->mgi_remmac = ixgbe_remmac; 6304 6305 if ((ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ || 6306 ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) && 6307 (hw->mac.type == ixgbe_mac_82599EB || 6308 hw->mac.type == ixgbe_mac_X540 || 6309 hw->mac.type == ixgbe_mac_X550 || 6310 hw->mac.type == ixgbe_mac_X550EM_x)) { 6311 infop->mgi_addvlan = ixgbe_addvlan; 6312 infop->mgi_remvlan = ixgbe_remvlan; 6313 } else { 6314 infop->mgi_addvlan = NULL; 6315 infop->mgi_remvlan = NULL; 6316 } 6317 6318 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups); 6319 6320 break; 6321 } 6322 case MAC_RING_TYPE_TX: 6323 break; 6324 default: 6325 break; 6326 } 6327 } 6328 6329 /* 6330 * Enable interrupt on the specificed rx ring. 6331 */ 6332 int 6333 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh) 6334 { 6335 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6336 ixgbe_t *ixgbe = rx_ring->ixgbe; 6337 int r_idx = rx_ring->index; 6338 int hw_r_idx = rx_ring->hw_index; 6339 int v_idx = rx_ring->intr_vector; 6340 6341 mutex_enter(&ixgbe->gen_lock); 6342 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6343 mutex_exit(&ixgbe->gen_lock); 6344 /* 6345 * Simply return 0. 6346 * Interrupts are being adjusted. ixgbe_intr_adjust() 6347 * will eventually re-enable the interrupt when it's 6348 * done with the adjustment. 6349 */ 6350 return (0); 6351 } 6352 6353 /* 6354 * To enable interrupt by setting the VAL bit of given interrupt 6355 * vector allocation register (IVAR). 6356 */ 6357 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0); 6358 6359 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 6360 6361 /* 6362 * Trigger a Rx interrupt on this ring 6363 */ 6364 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx)); 6365 IXGBE_WRITE_FLUSH(&ixgbe->hw); 6366 6367 mutex_exit(&ixgbe->gen_lock); 6368 6369 return (0); 6370 } 6371 6372 /* 6373 * Disable interrupt on the specificed rx ring. 6374 */ 6375 int 6376 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh) 6377 { 6378 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6379 ixgbe_t *ixgbe = rx_ring->ixgbe; 6380 int r_idx = rx_ring->index; 6381 int hw_r_idx = rx_ring->hw_index; 6382 int v_idx = rx_ring->intr_vector; 6383 6384 mutex_enter(&ixgbe->gen_lock); 6385 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6386 mutex_exit(&ixgbe->gen_lock); 6387 /* 6388 * Simply return 0. 6389 * In the rare case where an interrupt is being 6390 * disabled while interrupts are being adjusted, 6391 * we don't fail the operation. No interrupts will 6392 * be generated while they are adjusted, and 6393 * ixgbe_intr_adjust() will cause the interrupts 6394 * to be re-enabled once it completes. Note that 6395 * in this case, packets may be delivered to the 6396 * stack via interrupts before xgbe_rx_ring_intr_enable() 6397 * is called again. This is acceptable since interrupt 6398 * adjustment is infrequent, and the stack will be 6399 * able to handle these packets. 6400 */ 6401 return (0); 6402 } 6403 6404 /* 6405 * To disable interrupt by clearing the VAL bit of given interrupt 6406 * vector allocation register (IVAR). 6407 */ 6408 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0); 6409 6410 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx); 6411 6412 mutex_exit(&ixgbe->gen_lock); 6413 6414 return (0); 6415 } 6416 6417 static ixgbe_vlan_t * 6418 ixgbe_find_vlan(ixgbe_rx_group_t *rx_group, uint16_t vid) 6419 { 6420 for (ixgbe_vlan_t *vlp = list_head(&rx_group->vlans); vlp != NULL; 6421 vlp = list_next(&rx_group->vlans, vlp)) { 6422 if (vlp->ixvl_vid == vid) 6423 return (vlp); 6424 } 6425 6426 return (NULL); 6427 } 6428 6429 /* 6430 * Attempt to use a VLAN HW filter for this group. If the group is 6431 * interested in untagged packets then set AUPE only. If the group is 6432 * the default then only set the VFTA. Leave the VLVF slots open for 6433 * reserved groups to guarantee their use of HW filtering. 6434 */ 6435 static int 6436 ixgbe_addvlan(mac_group_driver_t gdriver, uint16_t vid) 6437 { 6438 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)gdriver; 6439 ixgbe_t *ixgbe = rx_group->ixgbe; 6440 struct ixgbe_hw *hw = &ixgbe->hw; 6441 ixgbe_vlan_t *vlp; 6442 int ret; 6443 boolean_t is_def_grp; 6444 6445 mutex_enter(&ixgbe->gen_lock); 6446 6447 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6448 mutex_exit(&ixgbe->gen_lock); 6449 return (ECANCELED); 6450 } 6451 6452 /* 6453 * Let's be sure VLAN filtering is enabled. 6454 */ 6455 VERIFY3B(ixgbe->vlft_enabled, ==, B_TRUE); 6456 is_def_grp = (rx_group->index == ixgbe->rx_def_group); 6457 6458 /* 6459 * VLAN filtering is enabled but we want to receive untagged 6460 * traffic on this group -- set the AUPE bit on the group and 6461 * leave the VLAN tables alone. 6462 */ 6463 if (vid == MAC_VLAN_UNTAGGED) { 6464 /* 6465 * We never enable AUPE on the default group; it is 6466 * redundant. Untagged traffic which passes L2 6467 * filtering is delivered to the default group if no 6468 * other group is interested. 6469 */ 6470 if (!is_def_grp) { 6471 uint32_t vml2flt; 6472 6473 vml2flt = IXGBE_READ_REG(hw, 6474 IXGBE_VMOLR(rx_group->index)); 6475 vml2flt |= IXGBE_VMOLR_AUPE; 6476 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(rx_group->index), 6477 vml2flt); 6478 rx_group->aupe = B_TRUE; 6479 } 6480 6481 mutex_exit(&ixgbe->gen_lock); 6482 return (0); 6483 } 6484 6485 vlp = ixgbe_find_vlan(rx_group, vid); 6486 if (vlp != NULL) { 6487 /* Only the default group supports multiple clients. */ 6488 VERIFY3B(is_def_grp, ==, B_TRUE); 6489 vlp->ixvl_refs++; 6490 mutex_exit(&ixgbe->gen_lock); 6491 return (0); 6492 } 6493 6494 /* 6495 * The default group doesn't require a VLVF entry, only a VFTA 6496 * entry. All traffic passing L2 filtering (MPSAR + VFTA) is 6497 * delivered to the default group if no other group is 6498 * interested. The fourth argument, vlvf_bypass, tells the 6499 * ixgbe common code to avoid using a VLVF slot if one isn't 6500 * already allocated to this VLAN. 6501 * 6502 * This logic is meant to reserve VLVF slots for use by 6503 * reserved groups: guaranteeing their use of HW filtering. 6504 */ 6505 ret = ixgbe_set_vfta(hw, vid, rx_group->index, true, is_def_grp); 6506 6507 if (ret == IXGBE_SUCCESS) { 6508 vlp = kmem_zalloc(sizeof (ixgbe_vlan_t), KM_SLEEP); 6509 vlp->ixvl_vid = vid; 6510 vlp->ixvl_refs = 1; 6511 list_insert_tail(&rx_group->vlans, vlp); 6512 mutex_exit(&ixgbe->gen_lock); 6513 return (0); 6514 } 6515 6516 /* 6517 * We should actually never return ENOSPC because we've set 6518 * things up so that every reserved group is guaranteed to 6519 * have a VLVF slot. 6520 */ 6521 if (ret == IXGBE_ERR_PARAM) 6522 ret = EINVAL; 6523 else if (ret == IXGBE_ERR_NO_SPACE) 6524 ret = ENOSPC; 6525 else 6526 ret = EIO; 6527 6528 mutex_exit(&ixgbe->gen_lock); 6529 return (ret); 6530 } 6531 6532 /* 6533 * Attempt to remove the VLAN HW filter associated with this group. If 6534 * we are removing a HW filter for the default group then we know only 6535 * the VFTA was set (VLVF is reserved for non-default/reserved 6536 * groups). If the group wishes to stop receiving untagged traffic 6537 * then clear the AUPE but leave the VLAN filters alone. 6538 */ 6539 static int 6540 ixgbe_remvlan(mac_group_driver_t gdriver, uint16_t vid) 6541 { 6542 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)gdriver; 6543 ixgbe_t *ixgbe = rx_group->ixgbe; 6544 struct ixgbe_hw *hw = &ixgbe->hw; 6545 int ret; 6546 ixgbe_vlan_t *vlp; 6547 boolean_t is_def_grp; 6548 6549 mutex_enter(&ixgbe->gen_lock); 6550 6551 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6552 mutex_exit(&ixgbe->gen_lock); 6553 return (ECANCELED); 6554 } 6555 6556 is_def_grp = (rx_group->index == ixgbe->rx_def_group); 6557 6558 /* See the AUPE comment in ixgbe_addvlan(). */ 6559 if (vid == MAC_VLAN_UNTAGGED) { 6560 if (!is_def_grp) { 6561 uint32_t vml2flt; 6562 6563 vml2flt = IXGBE_READ_REG(hw, 6564 IXGBE_VMOLR(rx_group->index)); 6565 vml2flt &= ~IXGBE_VMOLR_AUPE; 6566 IXGBE_WRITE_REG(hw, 6567 IXGBE_VMOLR(rx_group->index), vml2flt); 6568 rx_group->aupe = B_FALSE; 6569 } 6570 mutex_exit(&ixgbe->gen_lock); 6571 return (0); 6572 } 6573 6574 vlp = ixgbe_find_vlan(rx_group, vid); 6575 if (vlp == NULL) { 6576 mutex_exit(&ixgbe->gen_lock); 6577 return (ENOENT); 6578 } 6579 6580 /* 6581 * See the comment in ixgbe_addvlan() about is_def_grp and 6582 * vlvf_bypass. 6583 */ 6584 if (vlp->ixvl_refs == 1) { 6585 ret = ixgbe_set_vfta(hw, vid, rx_group->index, false, 6586 is_def_grp); 6587 } else { 6588 /* 6589 * Only the default group can have multiple clients. 6590 * If there is more than one client, leave the 6591 * VFTA[vid] bit alone. 6592 */ 6593 VERIFY3B(is_def_grp, ==, B_TRUE); 6594 VERIFY3U(vlp->ixvl_refs, >, 1); 6595 vlp->ixvl_refs--; 6596 mutex_exit(&ixgbe->gen_lock); 6597 return (0); 6598 } 6599 6600 if (ret != IXGBE_SUCCESS) { 6601 mutex_exit(&ixgbe->gen_lock); 6602 /* IXGBE_ERR_PARAM should be the only possible error here. */ 6603 if (ret == IXGBE_ERR_PARAM) 6604 return (EINVAL); 6605 else 6606 return (EIO); 6607 } 6608 6609 VERIFY3U(vlp->ixvl_refs, ==, 1); 6610 vlp->ixvl_refs = 0; 6611 list_remove(&rx_group->vlans, vlp); 6612 kmem_free(vlp, sizeof (ixgbe_vlan_t)); 6613 6614 /* 6615 * Calling ixgbe_set_vfta() on a non-default group may have 6616 * cleared the VFTA[vid] bit even though the default group 6617 * still has clients using the vid. This happens because the 6618 * ixgbe common code doesn't ref count the use of VLANs. Check 6619 * for any use of vid on the default group and make sure the 6620 * VFTA[vid] bit is set. This operation is idempotent: setting 6621 * VFTA[vid] to true if already true won't hurt anything. 6622 */ 6623 if (!is_def_grp) { 6624 ixgbe_rx_group_t *defgrp; 6625 6626 defgrp = &ixgbe->rx_groups[ixgbe->rx_def_group]; 6627 vlp = ixgbe_find_vlan(defgrp, vid); 6628 if (vlp != NULL) { 6629 /* This shouldn't fail, but if it does return EIO. */ 6630 ret = ixgbe_set_vfta(hw, vid, rx_group->index, true, 6631 true); 6632 if (ret != IXGBE_SUCCESS) { 6633 mutex_exit(&ixgbe->gen_lock); 6634 return (EIO); 6635 } 6636 } 6637 } 6638 6639 mutex_exit(&ixgbe->gen_lock); 6640 return (0); 6641 } 6642 6643 /* 6644 * Add a mac address. 6645 */ 6646 static int 6647 ixgbe_addmac(void *arg, const uint8_t *mac_addr) 6648 { 6649 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6650 ixgbe_t *ixgbe = rx_group->ixgbe; 6651 struct ixgbe_hw *hw = &ixgbe->hw; 6652 int slot, i; 6653 6654 mutex_enter(&ixgbe->gen_lock); 6655 6656 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6657 mutex_exit(&ixgbe->gen_lock); 6658 return (ECANCELED); 6659 } 6660 6661 if (ixgbe->unicst_avail == 0) { 6662 /* no slots available */ 6663 mutex_exit(&ixgbe->gen_lock); 6664 return (ENOSPC); 6665 } 6666 6667 /* 6668 * The first ixgbe->num_rx_groups slots are reserved for each respective 6669 * group. The rest slots are shared by all groups. While adding a 6670 * MAC address, reserved slots are firstly checked then the shared 6671 * slots are searched. 6672 */ 6673 slot = -1; 6674 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) { 6675 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) { 6676 if (ixgbe->unicst_addr[i].mac.set == 0) { 6677 slot = i; 6678 break; 6679 } 6680 } 6681 } else { 6682 slot = rx_group->index; 6683 } 6684 6685 if (slot == -1) { 6686 /* no slots available */ 6687 mutex_exit(&ixgbe->gen_lock); 6688 return (ENOSPC); 6689 } 6690 6691 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6692 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr, 6693 rx_group->index, IXGBE_RAH_AV); 6694 ixgbe->unicst_addr[slot].mac.set = 1; 6695 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index; 6696 ixgbe->unicst_avail--; 6697 6698 mutex_exit(&ixgbe->gen_lock); 6699 6700 return (0); 6701 } 6702 6703 /* 6704 * Remove a mac address. 6705 */ 6706 static int 6707 ixgbe_remmac(void *arg, const uint8_t *mac_addr) 6708 { 6709 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6710 ixgbe_t *ixgbe = rx_group->ixgbe; 6711 struct ixgbe_hw *hw = &ixgbe->hw; 6712 int slot; 6713 6714 mutex_enter(&ixgbe->gen_lock); 6715 6716 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6717 mutex_exit(&ixgbe->gen_lock); 6718 return (ECANCELED); 6719 } 6720 6721 slot = ixgbe_unicst_find(ixgbe, mac_addr); 6722 if (slot == -1) { 6723 mutex_exit(&ixgbe->gen_lock); 6724 return (EINVAL); 6725 } 6726 6727 if (ixgbe->unicst_addr[slot].mac.set == 0) { 6728 mutex_exit(&ixgbe->gen_lock); 6729 return (EINVAL); 6730 } 6731 6732 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6733 (void) ixgbe_clear_rar(hw, slot); 6734 ixgbe->unicst_addr[slot].mac.set = 0; 6735 ixgbe->unicst_avail++; 6736 6737 mutex_exit(&ixgbe->gen_lock); 6738 6739 return (0); 6740 } 6741 6742 static int 6743 ixgbe_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 6744 ddi_ufm_image_t *imgp) 6745 { 6746 ixgbe_t *ixgbe = arg; 6747 const char *type; 6748 6749 if (imgno != 0) { 6750 return (EINVAL); 6751 } 6752 6753 ddi_ufm_image_set_desc(imgp, "NVM"); 6754 ddi_ufm_image_set_nslots(imgp, 1); 6755 switch (ixgbe->hw.eeprom.type) { 6756 case ixgbe_eeprom_spi: 6757 type = "SPI EEPROM"; 6758 break; 6759 case ixgbe_flash: 6760 type = "Flash"; 6761 break; 6762 default: 6763 type = NULL; 6764 break; 6765 } 6766 6767 if (type != NULL) { 6768 nvlist_t *nvl; 6769 6770 nvl = fnvlist_alloc(); 6771 fnvlist_add_string(nvl, "image-type", type); 6772 /* 6773 * The DDI takes ownership of the nvlist_t at this point. 6774 */ 6775 ddi_ufm_image_set_misc(imgp, nvl); 6776 } 6777 6778 return (0); 6779 } 6780 6781 static int 6782 ixgbe_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 6783 uint_t slotno, ddi_ufm_slot_t *slotp) 6784 { 6785 ixgbe_t *ixgbe = arg; 6786 6787 if (imgno != 0 || slotno != 0) { 6788 return (EINVAL); 6789 } 6790 6791 /* 6792 * Unfortunately there is no generic versioning in the ixgbe family 6793 * eeprom parts. 6794 */ 6795 ddi_ufm_slot_set_version(slotp, "unknown"); 6796 ddi_ufm_slot_set_attrs(slotp, DDI_UFM_ATTR_ACTIVE | 6797 DDI_UFM_ATTR_READABLE | DDI_UFM_ATTR_WRITEABLE); 6798 ddi_ufm_slot_set_imgsize(slotp, ixgbe->hw.eeprom.word_size * 2); 6799 6800 return (0); 6801 } 6802 6803 static int 6804 ixgbe_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps) 6805 { 6806 ixgbe_t *ixgbe = arg; 6807 6808 *caps = 0; 6809 switch (ixgbe->hw.eeprom.type) { 6810 case ixgbe_eeprom_spi: 6811 case ixgbe_flash: 6812 *caps |= DDI_UFM_CAP_REPORT; 6813 if (ixgbe->hw.eeprom.ops.read_buffer != NULL) { 6814 *caps |= DDI_UFM_CAP_READIMG; 6815 } 6816 break; 6817 default: 6818 break; 6819 } 6820 6821 return (0); 6822 } 6823 6824 static int 6825 ixgbe_ufm_readimg(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 6826 uint_t slotno, uint64_t len, uint64_t offset, void *buf, uint64_t *nread) 6827 { 6828 int ret; 6829 uint16_t wordoff, nwords, *buf16 = buf; 6830 ixgbe_t *ixgbe = arg; 6831 uint32_t imgsize = ixgbe->hw.eeprom.word_size * 2; 6832 6833 if (imgno != 0 || slotno != 0) { 6834 return (EINVAL); 6835 } 6836 6837 if (len > imgsize || offset > imgsize || len + offset > imgsize) { 6838 return (EINVAL); 6839 } 6840 6841 if (ixgbe->hw.eeprom.ops.read_buffer == NULL) { 6842 return (ENOTSUP); 6843 } 6844 6845 /* 6846 * Hardware provides us a means to read 16-bit words. For the time 6847 * being, restrict offset and length to be 2 byte aligned. We should 6848 * probably reduce this restriction. We could probably just use a bounce 6849 * buffer. 6850 */ 6851 if ((offset % 2) != 0 || (len % 2) != 0) { 6852 return (EINVAL); 6853 } 6854 6855 wordoff = offset >> 1; 6856 nwords = len >> 1; 6857 mutex_enter(&ixgbe->gen_lock); 6858 ret = ixgbe_read_eeprom_buffer(&ixgbe->hw, wordoff, nwords, buf16); 6859 mutex_exit(&ixgbe->gen_lock); 6860 6861 if (ret == 0) { 6862 uint16_t i; 6863 *nread = len; 6864 for (i = 0; i < nwords; i++) { 6865 buf16[i] = LE_16(buf16[i]); 6866 } 6867 } else { 6868 ret = EIO; 6869 } 6870 6871 return (ret); 6872 } 6873