1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 28 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved. 31 */ 32 33 #include "ixgbe_sw.h" 34 35 static char ixgbe_ident[] = "Intel 10Gb Ethernet"; 36 static char ixgbe_version[] = "ixgbe 1.1.7"; 37 38 /* 39 * Local function protoypes 40 */ 41 static int ixgbe_register_mac(ixgbe_t *); 42 static int ixgbe_identify_hardware(ixgbe_t *); 43 static int ixgbe_regs_map(ixgbe_t *); 44 static void ixgbe_init_properties(ixgbe_t *); 45 static int ixgbe_init_driver_settings(ixgbe_t *); 46 static void ixgbe_init_locks(ixgbe_t *); 47 static void ixgbe_destroy_locks(ixgbe_t *); 48 static int ixgbe_init(ixgbe_t *); 49 static int ixgbe_chip_start(ixgbe_t *); 50 static void ixgbe_chip_stop(ixgbe_t *); 51 static int ixgbe_reset(ixgbe_t *); 52 static void ixgbe_tx_clean(ixgbe_t *); 53 static boolean_t ixgbe_tx_drain(ixgbe_t *); 54 static boolean_t ixgbe_rx_drain(ixgbe_t *); 55 static int ixgbe_alloc_rings(ixgbe_t *); 56 static void ixgbe_free_rings(ixgbe_t *); 57 static int ixgbe_alloc_rx_data(ixgbe_t *); 58 static void ixgbe_free_rx_data(ixgbe_t *); 59 static void ixgbe_setup_rings(ixgbe_t *); 60 static void ixgbe_setup_rx(ixgbe_t *); 61 static void ixgbe_setup_tx(ixgbe_t *); 62 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 63 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 64 static void ixgbe_setup_rss(ixgbe_t *); 65 static void ixgbe_setup_vmdq(ixgbe_t *); 66 static void ixgbe_setup_vmdq_rss(ixgbe_t *); 67 static void ixgbe_init_unicst(ixgbe_t *); 68 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); 69 static void ixgbe_setup_multicst(ixgbe_t *); 70 static void ixgbe_get_hw_state(ixgbe_t *); 71 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); 72 static void ixgbe_get_conf(ixgbe_t *); 73 static void ixgbe_init_params(ixgbe_t *); 74 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 75 static void ixgbe_driver_link_check(ixgbe_t *); 76 static void ixgbe_sfp_check(void *); 77 static void ixgbe_overtemp_check(void *); 78 static void ixgbe_link_timer(void *); 79 static void ixgbe_local_timer(void *); 80 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 81 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 82 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 83 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 84 static boolean_t is_valid_mac_addr(uint8_t *); 85 static boolean_t ixgbe_stall_check(ixgbe_t *); 86 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 87 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 88 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 89 static int ixgbe_alloc_intrs(ixgbe_t *); 90 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 91 static int ixgbe_add_intr_handlers(ixgbe_t *); 92 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 93 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 94 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t); 95 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t); 96 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t); 97 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index); 98 static int ixgbe_map_intrs_to_vectors(ixgbe_t *); 99 static void ixgbe_setup_adapter_vector(ixgbe_t *); 100 static void ixgbe_rem_intr_handlers(ixgbe_t *); 101 static void ixgbe_rem_intrs(ixgbe_t *); 102 static int ixgbe_enable_intrs(ixgbe_t *); 103 static int ixgbe_disable_intrs(ixgbe_t *); 104 static uint_t ixgbe_intr_legacy(void *, void *); 105 static uint_t ixgbe_intr_msi(void *, void *); 106 static uint_t ixgbe_intr_msix(void *, void *); 107 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 108 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 109 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); 110 static void ixgbe_get_driver_control(struct ixgbe_hw *); 111 static int ixgbe_addmac(void *, const uint8_t *); 112 static int ixgbe_remmac(void *, const uint8_t *); 113 static void ixgbe_release_driver_control(struct ixgbe_hw *); 114 115 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 116 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 117 static int ixgbe_resume(dev_info_t *); 118 static int ixgbe_suspend(dev_info_t *); 119 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 120 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 121 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *); 122 static int ixgbe_intr_cb_register(ixgbe_t *); 123 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int); 124 125 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 126 const void *impl_data); 127 static void ixgbe_fm_init(ixgbe_t *); 128 static void ixgbe_fm_fini(ixgbe_t *); 129 130 char *ixgbe_priv_props[] = { 131 "_tx_copy_thresh", 132 "_tx_recycle_thresh", 133 "_tx_overload_thresh", 134 "_tx_resched_thresh", 135 "_rx_copy_thresh", 136 "_rx_limit_per_intr", 137 "_intr_throttling", 138 "_adv_pause_cap", 139 "_adv_asym_pause_cap", 140 NULL 141 }; 142 143 #define IXGBE_MAX_PRIV_PROPS \ 144 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t)) 145 146 static struct cb_ops ixgbe_cb_ops = { 147 nulldev, /* cb_open */ 148 nulldev, /* cb_close */ 149 nodev, /* cb_strategy */ 150 nodev, /* cb_print */ 151 nodev, /* cb_dump */ 152 nodev, /* cb_read */ 153 nodev, /* cb_write */ 154 nodev, /* cb_ioctl */ 155 nodev, /* cb_devmap */ 156 nodev, /* cb_mmap */ 157 nodev, /* cb_segmap */ 158 nochpoll, /* cb_chpoll */ 159 ddi_prop_op, /* cb_prop_op */ 160 NULL, /* cb_stream */ 161 D_MP | D_HOTPLUG, /* cb_flag */ 162 CB_REV, /* cb_rev */ 163 nodev, /* cb_aread */ 164 nodev /* cb_awrite */ 165 }; 166 167 static struct dev_ops ixgbe_dev_ops = { 168 DEVO_REV, /* devo_rev */ 169 0, /* devo_refcnt */ 170 NULL, /* devo_getinfo */ 171 nulldev, /* devo_identify */ 172 nulldev, /* devo_probe */ 173 ixgbe_attach, /* devo_attach */ 174 ixgbe_detach, /* devo_detach */ 175 nodev, /* devo_reset */ 176 &ixgbe_cb_ops, /* devo_cb_ops */ 177 NULL, /* devo_bus_ops */ 178 ddi_power, /* devo_power */ 179 ddi_quiesce_not_supported, /* devo_quiesce */ 180 }; 181 182 static struct modldrv ixgbe_modldrv = { 183 &mod_driverops, /* Type of module. This one is a driver */ 184 ixgbe_ident, /* Discription string */ 185 &ixgbe_dev_ops /* driver ops */ 186 }; 187 188 static struct modlinkage ixgbe_modlinkage = { 189 MODREV_1, &ixgbe_modldrv, NULL 190 }; 191 192 /* 193 * Access attributes for register mapping 194 */ 195 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 196 DDI_DEVICE_ATTR_V1, 197 DDI_STRUCTURE_LE_ACC, 198 DDI_STRICTORDER_ACC, 199 DDI_FLAGERR_ACC 200 }; 201 202 /* 203 * Loopback property 204 */ 205 static lb_property_t lb_normal = { 206 normal, "normal", IXGBE_LB_NONE 207 }; 208 209 static lb_property_t lb_mac = { 210 internal, "MAC", IXGBE_LB_INTERNAL_MAC 211 }; 212 213 static lb_property_t lb_external = { 214 external, "External", IXGBE_LB_EXTERNAL 215 }; 216 217 #define IXGBE_M_CALLBACK_FLAGS \ 218 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 219 220 static mac_callbacks_t ixgbe_m_callbacks = { 221 IXGBE_M_CALLBACK_FLAGS, 222 ixgbe_m_stat, 223 ixgbe_m_start, 224 ixgbe_m_stop, 225 ixgbe_m_promisc, 226 ixgbe_m_multicst, 227 NULL, 228 NULL, 229 NULL, 230 ixgbe_m_ioctl, 231 ixgbe_m_getcapab, 232 NULL, 233 NULL, 234 ixgbe_m_setprop, 235 ixgbe_m_getprop, 236 ixgbe_m_propinfo 237 }; 238 239 /* 240 * Initialize capabilities of each supported adapter type 241 */ 242 static adapter_info_t ixgbe_82598eb_cap = { 243 64, /* maximum number of rx queues */ 244 1, /* minimum number of rx queues */ 245 64, /* default number of rx queues */ 246 16, /* maximum number of rx groups */ 247 1, /* minimum number of rx groups */ 248 1, /* default number of rx groups */ 249 32, /* maximum number of tx queues */ 250 1, /* minimum number of tx queues */ 251 8, /* default number of tx queues */ 252 16366, /* maximum MTU size */ 253 0xFFFF, /* maximum interrupt throttle rate */ 254 0, /* minimum interrupt throttle rate */ 255 200, /* default interrupt throttle rate */ 256 18, /* maximum total msix vectors */ 257 16, /* maximum number of ring vectors */ 258 2, /* maximum number of other vectors */ 259 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 260 0, /* "other" interrupt types enable mask */ 261 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 262 | IXGBE_FLAG_RSS_CAPABLE 263 | IXGBE_FLAG_VMDQ_CAPABLE) 264 }; 265 266 static adapter_info_t ixgbe_82599eb_cap = { 267 128, /* maximum number of rx queues */ 268 1, /* minimum number of rx queues */ 269 128, /* default number of rx queues */ 270 64, /* maximum number of rx groups */ 271 1, /* minimum number of rx groups */ 272 1, /* default number of rx groups */ 273 128, /* maximum number of tx queues */ 274 1, /* minimum number of tx queues */ 275 8, /* default number of tx queues */ 276 15500, /* maximum MTU size */ 277 0xFF8, /* maximum interrupt throttle rate */ 278 0, /* minimum interrupt throttle rate */ 279 200, /* default interrupt throttle rate */ 280 64, /* maximum total msix vectors */ 281 16, /* maximum number of ring vectors */ 282 2, /* maximum number of other vectors */ 283 (IXGBE_EICR_LSC 284 | IXGBE_EICR_GPI_SDP1 285 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */ 286 287 (IXGBE_SDP1_GPIEN 288 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */ 289 290 (IXGBE_FLAG_DCA_CAPABLE 291 | IXGBE_FLAG_RSS_CAPABLE 292 | IXGBE_FLAG_VMDQ_CAPABLE 293 | IXGBE_FLAG_RSC_CAPABLE 294 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */ 295 }; 296 297 static adapter_info_t ixgbe_X540_cap = { 298 128, /* maximum number of rx queues */ 299 1, /* minimum number of rx queues */ 300 128, /* default number of rx queues */ 301 64, /* maximum number of rx groups */ 302 1, /* minimum number of rx groups */ 303 1, /* default number of rx groups */ 304 128, /* maximum number of tx queues */ 305 1, /* minimum number of tx queues */ 306 8, /* default number of tx queues */ 307 15500, /* maximum MTU size */ 308 0xFF8, /* maximum interrupt throttle rate */ 309 0, /* minimum interrupt throttle rate */ 310 200, /* default interrupt throttle rate */ 311 64, /* maximum total msix vectors */ 312 16, /* maximum number of ring vectors */ 313 2, /* maximum number of other vectors */ 314 (IXGBE_EICR_LSC 315 | IXGBE_EICR_GPI_SDP1 316 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */ 317 318 (IXGBE_SDP1_GPIEN 319 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */ 320 321 (IXGBE_FLAG_DCA_CAPABLE 322 | IXGBE_FLAG_RSS_CAPABLE 323 | IXGBE_FLAG_VMDQ_CAPABLE 324 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 325 }; 326 327 /* 328 * Module Initialization Functions. 329 */ 330 331 int 332 _init(void) 333 { 334 int status; 335 336 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 337 338 status = mod_install(&ixgbe_modlinkage); 339 340 if (status != DDI_SUCCESS) { 341 mac_fini_ops(&ixgbe_dev_ops); 342 } 343 344 return (status); 345 } 346 347 int 348 _fini(void) 349 { 350 int status; 351 352 status = mod_remove(&ixgbe_modlinkage); 353 354 if (status == DDI_SUCCESS) { 355 mac_fini_ops(&ixgbe_dev_ops); 356 } 357 358 return (status); 359 } 360 361 int 362 _info(struct modinfo *modinfop) 363 { 364 int status; 365 366 status = mod_info(&ixgbe_modlinkage, modinfop); 367 368 return (status); 369 } 370 371 /* 372 * ixgbe_attach - Driver attach. 373 * 374 * This function is the device specific initialization entry 375 * point. This entry point is required and must be written. 376 * The DDI_ATTACH command must be provided in the attach entry 377 * point. When attach() is called with cmd set to DDI_ATTACH, 378 * all normal kernel services (such as kmem_alloc(9F)) are 379 * available for use by the driver. 380 * 381 * The attach() function will be called once for each instance 382 * of the device on the system with cmd set to DDI_ATTACH. 383 * Until attach() succeeds, the only driver entry points which 384 * may be called are open(9E) and getinfo(9E). 385 */ 386 static int 387 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 388 { 389 ixgbe_t *ixgbe; 390 struct ixgbe_osdep *osdep; 391 struct ixgbe_hw *hw; 392 int instance; 393 char taskqname[32]; 394 395 /* 396 * Check the command and perform corresponding operations 397 */ 398 switch (cmd) { 399 default: 400 return (DDI_FAILURE); 401 402 case DDI_RESUME: 403 return (ixgbe_resume(devinfo)); 404 405 case DDI_ATTACH: 406 break; 407 } 408 409 /* Get the device instance */ 410 instance = ddi_get_instance(devinfo); 411 412 /* Allocate memory for the instance data structure */ 413 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 414 415 ixgbe->dip = devinfo; 416 ixgbe->instance = instance; 417 418 hw = &ixgbe->hw; 419 osdep = &ixgbe->osdep; 420 hw->back = osdep; 421 osdep->ixgbe = ixgbe; 422 423 /* Attach the instance pointer to the dev_info data structure */ 424 ddi_set_driver_private(devinfo, ixgbe); 425 426 /* 427 * Initialize for fma support 428 */ 429 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 430 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 431 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 432 ixgbe_fm_init(ixgbe); 433 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 434 435 /* 436 * Map PCI config space registers 437 */ 438 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 439 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 440 goto attach_fail; 441 } 442 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 443 444 /* 445 * Identify the chipset family 446 */ 447 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 448 ixgbe_error(ixgbe, "Failed to identify hardware"); 449 goto attach_fail; 450 } 451 452 /* 453 * Map device registers 454 */ 455 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 456 ixgbe_error(ixgbe, "Failed to map device registers"); 457 goto attach_fail; 458 } 459 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 460 461 /* 462 * Initialize driver parameters 463 */ 464 ixgbe_init_properties(ixgbe); 465 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 466 467 /* 468 * Register interrupt callback 469 */ 470 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) { 471 ixgbe_error(ixgbe, "Failed to register interrupt callback"); 472 goto attach_fail; 473 } 474 475 /* 476 * Allocate interrupts 477 */ 478 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 479 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 480 goto attach_fail; 481 } 482 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 483 484 /* 485 * Allocate rx/tx rings based on the ring numbers. 486 * The actual numbers of rx/tx rings are decided by the number of 487 * allocated interrupt vectors, so we should allocate the rings after 488 * interrupts are allocated. 489 */ 490 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 491 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 492 goto attach_fail; 493 } 494 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 495 496 /* 497 * Map rings to interrupt vectors 498 */ 499 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 500 ixgbe_error(ixgbe, "Failed to map interrupts to vectors"); 501 goto attach_fail; 502 } 503 504 /* 505 * Add interrupt handlers 506 */ 507 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 508 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 509 goto attach_fail; 510 } 511 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 512 513 /* 514 * Create a taskq for sfp-change 515 */ 516 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance); 517 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname, 518 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 519 ixgbe_error(ixgbe, "sfp_taskq create failed"); 520 goto attach_fail; 521 } 522 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ; 523 524 /* 525 * Create a taskq for over-temp 526 */ 527 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance); 528 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname, 529 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 530 ixgbe_error(ixgbe, "overtemp_taskq create failed"); 531 goto attach_fail; 532 } 533 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ; 534 535 /* 536 * Initialize driver parameters 537 */ 538 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 539 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 540 goto attach_fail; 541 } 542 543 /* 544 * Initialize mutexes for this device. 545 * Do this before enabling the interrupt handler and 546 * register the softint to avoid the condition where 547 * interrupt handler can try using uninitialized mutex. 548 */ 549 ixgbe_init_locks(ixgbe); 550 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 551 552 /* 553 * Initialize chipset hardware 554 */ 555 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 556 ixgbe_error(ixgbe, "Failed to initialize adapter"); 557 goto attach_fail; 558 } 559 ixgbe->link_check_complete = B_FALSE; 560 ixgbe->link_check_hrtime = gethrtime() + 561 (IXGBE_LINK_UP_TIME * 100000000ULL); 562 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 563 564 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 565 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 566 goto attach_fail; 567 } 568 569 /* 570 * Initialize statistics 571 */ 572 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 573 ixgbe_error(ixgbe, "Failed to initialize statistics"); 574 goto attach_fail; 575 } 576 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 577 578 /* 579 * Register the driver to the MAC 580 */ 581 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 582 ixgbe_error(ixgbe, "Failed to register MAC"); 583 goto attach_fail; 584 } 585 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 586 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 587 588 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe, 589 IXGBE_CYCLIC_PERIOD, DDI_IPL_0); 590 if (ixgbe->periodic_id == 0) { 591 ixgbe_error(ixgbe, "Failed to add the link check timer"); 592 goto attach_fail; 593 } 594 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER; 595 596 /* 597 * Now that mutex locks are initialized, and the chip is also 598 * initialized, enable interrupts. 599 */ 600 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 601 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 602 goto attach_fail; 603 } 604 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 605 606 ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version); 607 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED); 608 609 return (DDI_SUCCESS); 610 611 attach_fail: 612 ixgbe_unconfigure(devinfo, ixgbe); 613 return (DDI_FAILURE); 614 } 615 616 /* 617 * ixgbe_detach - Driver detach. 618 * 619 * The detach() function is the complement of the attach routine. 620 * If cmd is set to DDI_DETACH, detach() is used to remove the 621 * state associated with a given instance of a device node 622 * prior to the removal of that instance from the system. 623 * 624 * The detach() function will be called once for each instance 625 * of the device for which there has been a successful attach() 626 * once there are no longer any opens on the device. 627 * 628 * Interrupts routine are disabled, All memory allocated by this 629 * driver are freed. 630 */ 631 static int 632 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 633 { 634 ixgbe_t *ixgbe; 635 636 /* 637 * Check detach command 638 */ 639 switch (cmd) { 640 default: 641 return (DDI_FAILURE); 642 643 case DDI_SUSPEND: 644 return (ixgbe_suspend(devinfo)); 645 646 case DDI_DETACH: 647 break; 648 } 649 650 /* 651 * Get the pointer to the driver private data structure 652 */ 653 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 654 if (ixgbe == NULL) 655 return (DDI_FAILURE); 656 657 /* 658 * If the device is still running, it needs to be stopped first. 659 * This check is necessary because under some specific circumstances, 660 * the detach routine can be called without stopping the interface 661 * first. 662 */ 663 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 664 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 665 mutex_enter(&ixgbe->gen_lock); 666 ixgbe_stop(ixgbe, B_TRUE); 667 mutex_exit(&ixgbe->gen_lock); 668 /* Disable and stop the watchdog timer */ 669 ixgbe_disable_watchdog_timer(ixgbe); 670 } 671 672 /* 673 * Check if there are still rx buffers held by the upper layer. 674 * If so, fail the detach. 675 */ 676 if (!ixgbe_rx_drain(ixgbe)) 677 return (DDI_FAILURE); 678 679 /* 680 * Do the remaining unconfigure routines 681 */ 682 ixgbe_unconfigure(devinfo, ixgbe); 683 684 return (DDI_SUCCESS); 685 } 686 687 static void 688 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 689 { 690 /* 691 * Disable interrupt 692 */ 693 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 694 (void) ixgbe_disable_intrs(ixgbe); 695 } 696 697 /* 698 * remove the link check timer 699 */ 700 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) { 701 if (ixgbe->periodic_id != NULL) { 702 ddi_periodic_delete(ixgbe->periodic_id); 703 ixgbe->periodic_id = NULL; 704 } 705 } 706 707 /* 708 * Unregister MAC 709 */ 710 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 711 (void) mac_unregister(ixgbe->mac_hdl); 712 } 713 714 /* 715 * Free statistics 716 */ 717 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 718 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 719 } 720 721 /* 722 * Remove interrupt handlers 723 */ 724 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 725 ixgbe_rem_intr_handlers(ixgbe); 726 } 727 728 /* 729 * Remove taskq for sfp-status-change 730 */ 731 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) { 732 ddi_taskq_destroy(ixgbe->sfp_taskq); 733 } 734 735 /* 736 * Remove taskq for over-temp 737 */ 738 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) { 739 ddi_taskq_destroy(ixgbe->overtemp_taskq); 740 } 741 742 /* 743 * Remove interrupts 744 */ 745 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 746 ixgbe_rem_intrs(ixgbe); 747 } 748 749 /* 750 * Unregister interrupt callback handler 751 */ 752 (void) ddi_cb_unregister(ixgbe->cb_hdl); 753 754 /* 755 * Remove driver properties 756 */ 757 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 758 (void) ddi_prop_remove_all(devinfo); 759 } 760 761 /* 762 * Stop the chipset 763 */ 764 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 765 mutex_enter(&ixgbe->gen_lock); 766 ixgbe_chip_stop(ixgbe); 767 mutex_exit(&ixgbe->gen_lock); 768 } 769 770 /* 771 * Free register handle 772 */ 773 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 774 if (ixgbe->osdep.reg_handle != NULL) 775 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 776 } 777 778 /* 779 * Free PCI config handle 780 */ 781 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 782 if (ixgbe->osdep.cfg_handle != NULL) 783 pci_config_teardown(&ixgbe->osdep.cfg_handle); 784 } 785 786 /* 787 * Free locks 788 */ 789 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 790 ixgbe_destroy_locks(ixgbe); 791 } 792 793 /* 794 * Free the rx/tx rings 795 */ 796 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 797 ixgbe_free_rings(ixgbe); 798 } 799 800 /* 801 * Unregister FMA capabilities 802 */ 803 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 804 ixgbe_fm_fini(ixgbe); 805 } 806 807 /* 808 * Free the driver data structure 809 */ 810 kmem_free(ixgbe, sizeof (ixgbe_t)); 811 812 ddi_set_driver_private(devinfo, NULL); 813 } 814 815 /* 816 * ixgbe_register_mac - Register the driver and its function pointers with 817 * the GLD interface. 818 */ 819 static int 820 ixgbe_register_mac(ixgbe_t *ixgbe) 821 { 822 struct ixgbe_hw *hw = &ixgbe->hw; 823 mac_register_t *mac; 824 int status; 825 826 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 827 return (IXGBE_FAILURE); 828 829 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 830 mac->m_driver = ixgbe; 831 mac->m_dip = ixgbe->dip; 832 mac->m_src_addr = hw->mac.addr; 833 mac->m_callbacks = &ixgbe_m_callbacks; 834 mac->m_min_sdu = 0; 835 mac->m_max_sdu = ixgbe->default_mtu; 836 mac->m_margin = VLAN_TAGSZ; 837 mac->m_priv_props = ixgbe_priv_props; 838 mac->m_v12n = MAC_VIRT_LEVEL1; 839 840 status = mac_register(mac, &ixgbe->mac_hdl); 841 842 mac_free(mac); 843 844 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 845 } 846 847 /* 848 * ixgbe_identify_hardware - Identify the type of the chipset. 849 */ 850 static int 851 ixgbe_identify_hardware(ixgbe_t *ixgbe) 852 { 853 struct ixgbe_hw *hw = &ixgbe->hw; 854 struct ixgbe_osdep *osdep = &ixgbe->osdep; 855 856 /* 857 * Get the device id 858 */ 859 hw->vendor_id = 860 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 861 hw->device_id = 862 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 863 hw->revision_id = 864 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 865 hw->subsystem_device_id = 866 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 867 hw->subsystem_vendor_id = 868 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 869 870 /* 871 * Set the mac type of the adapter based on the device id 872 */ 873 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) { 874 return (IXGBE_FAILURE); 875 } 876 877 /* 878 * Install adapter capabilities 879 */ 880 switch (hw->mac.type) { 881 case ixgbe_mac_82598EB: 882 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n"); 883 ixgbe->capab = &ixgbe_82598eb_cap; 884 885 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { 886 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 887 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1; 888 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN; 889 } 890 break; 891 892 case ixgbe_mac_82599EB: 893 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n"); 894 ixgbe->capab = &ixgbe_82599eb_cap; 895 896 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) { 897 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE; 898 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0; 899 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN; 900 } 901 break; 902 903 case ixgbe_mac_X540: 904 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n"); 905 ixgbe->capab = &ixgbe_X540_cap; 906 /* 907 * For now, X540 is all set in its capab structure. 908 * As other X540 variants show up, things can change here. 909 */ 910 break; 911 912 default: 913 IXGBE_DEBUGLOG_1(ixgbe, 914 "adapter not supported in ixgbe_identify_hardware(): %d\n", 915 hw->mac.type); 916 return (IXGBE_FAILURE); 917 } 918 919 return (IXGBE_SUCCESS); 920 } 921 922 /* 923 * ixgbe_regs_map - Map the device registers. 924 * 925 */ 926 static int 927 ixgbe_regs_map(ixgbe_t *ixgbe) 928 { 929 dev_info_t *devinfo = ixgbe->dip; 930 struct ixgbe_hw *hw = &ixgbe->hw; 931 struct ixgbe_osdep *osdep = &ixgbe->osdep; 932 off_t mem_size; 933 934 /* 935 * First get the size of device registers to be mapped. 936 */ 937 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size) 938 != DDI_SUCCESS) { 939 return (IXGBE_FAILURE); 940 } 941 942 /* 943 * Call ddi_regs_map_setup() to map registers 944 */ 945 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET, 946 (caddr_t *)&hw->hw_addr, 0, 947 mem_size, &ixgbe_regs_acc_attr, 948 &osdep->reg_handle)) != DDI_SUCCESS) { 949 return (IXGBE_FAILURE); 950 } 951 952 return (IXGBE_SUCCESS); 953 } 954 955 /* 956 * ixgbe_init_properties - Initialize driver properties. 957 */ 958 static void 959 ixgbe_init_properties(ixgbe_t *ixgbe) 960 { 961 /* 962 * Get conf file properties, including link settings 963 * jumbo frames, ring number, descriptor number, etc. 964 */ 965 ixgbe_get_conf(ixgbe); 966 967 ixgbe_init_params(ixgbe); 968 } 969 970 /* 971 * ixgbe_init_driver_settings - Initialize driver settings. 972 * 973 * The settings include hardware function pointers, bus information, 974 * rx/tx rings settings, link state, and any other parameters that 975 * need to be setup during driver initialization. 976 */ 977 static int 978 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 979 { 980 struct ixgbe_hw *hw = &ixgbe->hw; 981 dev_info_t *devinfo = ixgbe->dip; 982 ixgbe_rx_ring_t *rx_ring; 983 ixgbe_rx_group_t *rx_group; 984 ixgbe_tx_ring_t *tx_ring; 985 uint32_t rx_size; 986 uint32_t tx_size; 987 uint32_t ring_per_group; 988 int i; 989 990 /* 991 * Initialize chipset specific hardware function pointers 992 */ 993 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 994 return (IXGBE_FAILURE); 995 } 996 997 /* 998 * Get the system page size 999 */ 1000 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1); 1001 1002 /* 1003 * Set rx buffer size 1004 * 1005 * The IP header alignment room is counted in the calculation. 1006 * The rx buffer size is in unit of 1K that is required by the 1007 * chipset hardware. 1008 */ 1009 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 1010 ixgbe->rx_buf_size = ((rx_size >> 10) + 1011 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1012 1013 /* 1014 * Set tx buffer size 1015 */ 1016 tx_size = ixgbe->max_frame_size; 1017 ixgbe->tx_buf_size = ((tx_size >> 10) + 1018 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1019 1020 /* 1021 * Initialize rx/tx rings/groups parameters 1022 */ 1023 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 1024 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1025 rx_ring = &ixgbe->rx_rings[i]; 1026 rx_ring->index = i; 1027 rx_ring->ixgbe = ixgbe; 1028 rx_ring->group_index = i / ring_per_group; 1029 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i); 1030 } 1031 1032 for (i = 0; i < ixgbe->num_rx_groups; i++) { 1033 rx_group = &ixgbe->rx_groups[i]; 1034 rx_group->index = i; 1035 rx_group->ixgbe = ixgbe; 1036 } 1037 1038 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1039 tx_ring = &ixgbe->tx_rings[i]; 1040 tx_ring->index = i; 1041 tx_ring->ixgbe = ixgbe; 1042 if (ixgbe->tx_head_wb_enable) 1043 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 1044 else 1045 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 1046 1047 tx_ring->ring_size = ixgbe->tx_ring_size; 1048 tx_ring->free_list_size = ixgbe->tx_ring_size + 1049 (ixgbe->tx_ring_size >> 1); 1050 } 1051 1052 /* 1053 * Initialize values of interrupt throttling rate 1054 */ 1055 for (i = 1; i < MAX_INTR_VECTOR; i++) 1056 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 1057 1058 /* 1059 * The initial link state should be "unknown" 1060 */ 1061 ixgbe->link_state = LINK_STATE_UNKNOWN; 1062 1063 return (IXGBE_SUCCESS); 1064 } 1065 1066 /* 1067 * ixgbe_init_locks - Initialize locks. 1068 */ 1069 static void 1070 ixgbe_init_locks(ixgbe_t *ixgbe) 1071 { 1072 ixgbe_rx_ring_t *rx_ring; 1073 ixgbe_tx_ring_t *tx_ring; 1074 int i; 1075 1076 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1077 rx_ring = &ixgbe->rx_rings[i]; 1078 mutex_init(&rx_ring->rx_lock, NULL, 1079 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1080 } 1081 1082 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1083 tx_ring = &ixgbe->tx_rings[i]; 1084 mutex_init(&tx_ring->tx_lock, NULL, 1085 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1086 mutex_init(&tx_ring->recycle_lock, NULL, 1087 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1088 mutex_init(&tx_ring->tcb_head_lock, NULL, 1089 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1090 mutex_init(&tx_ring->tcb_tail_lock, NULL, 1091 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1092 } 1093 1094 mutex_init(&ixgbe->gen_lock, NULL, 1095 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1096 1097 mutex_init(&ixgbe->watchdog_lock, NULL, 1098 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1099 } 1100 1101 /* 1102 * ixgbe_destroy_locks - Destroy locks. 1103 */ 1104 static void 1105 ixgbe_destroy_locks(ixgbe_t *ixgbe) 1106 { 1107 ixgbe_rx_ring_t *rx_ring; 1108 ixgbe_tx_ring_t *tx_ring; 1109 int i; 1110 1111 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1112 rx_ring = &ixgbe->rx_rings[i]; 1113 mutex_destroy(&rx_ring->rx_lock); 1114 } 1115 1116 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1117 tx_ring = &ixgbe->tx_rings[i]; 1118 mutex_destroy(&tx_ring->tx_lock); 1119 mutex_destroy(&tx_ring->recycle_lock); 1120 mutex_destroy(&tx_ring->tcb_head_lock); 1121 mutex_destroy(&tx_ring->tcb_tail_lock); 1122 } 1123 1124 mutex_destroy(&ixgbe->gen_lock); 1125 mutex_destroy(&ixgbe->watchdog_lock); 1126 } 1127 1128 static int 1129 ixgbe_resume(dev_info_t *devinfo) 1130 { 1131 ixgbe_t *ixgbe; 1132 int i; 1133 1134 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1135 if (ixgbe == NULL) 1136 return (DDI_FAILURE); 1137 1138 mutex_enter(&ixgbe->gen_lock); 1139 1140 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1141 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1142 mutex_exit(&ixgbe->gen_lock); 1143 return (DDI_FAILURE); 1144 } 1145 1146 /* 1147 * Enable and start the watchdog timer 1148 */ 1149 ixgbe_enable_watchdog_timer(ixgbe); 1150 } 1151 1152 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED); 1153 1154 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1155 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1156 mac_tx_ring_update(ixgbe->mac_hdl, 1157 ixgbe->tx_rings[i].ring_handle); 1158 } 1159 } 1160 1161 mutex_exit(&ixgbe->gen_lock); 1162 1163 return (DDI_SUCCESS); 1164 } 1165 1166 static int 1167 ixgbe_suspend(dev_info_t *devinfo) 1168 { 1169 ixgbe_t *ixgbe; 1170 1171 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1172 if (ixgbe == NULL) 1173 return (DDI_FAILURE); 1174 1175 mutex_enter(&ixgbe->gen_lock); 1176 1177 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED); 1178 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1179 mutex_exit(&ixgbe->gen_lock); 1180 return (DDI_SUCCESS); 1181 } 1182 ixgbe_stop(ixgbe, B_FALSE); 1183 1184 mutex_exit(&ixgbe->gen_lock); 1185 1186 /* 1187 * Disable and stop the watchdog timer 1188 */ 1189 ixgbe_disable_watchdog_timer(ixgbe); 1190 1191 return (DDI_SUCCESS); 1192 } 1193 1194 /* 1195 * ixgbe_init - Initialize the device. 1196 */ 1197 static int 1198 ixgbe_init(ixgbe_t *ixgbe) 1199 { 1200 struct ixgbe_hw *hw = &ixgbe->hw; 1201 1202 mutex_enter(&ixgbe->gen_lock); 1203 1204 /* 1205 * Reset chipset to put the hardware in a known state 1206 * before we try to do anything with the eeprom. 1207 */ 1208 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) { 1209 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1210 goto init_fail; 1211 } 1212 1213 /* 1214 * Need to init eeprom before validating the checksum. 1215 */ 1216 if (ixgbe_init_eeprom_params(hw) < 0) { 1217 ixgbe_error(ixgbe, 1218 "Unable to intitialize the eeprom interface."); 1219 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1220 goto init_fail; 1221 } 1222 1223 /* 1224 * NVM validation 1225 */ 1226 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1227 /* 1228 * Some PCI-E parts fail the first check due to 1229 * the link being in sleep state. Call it again, 1230 * if it fails a second time it's a real issue. 1231 */ 1232 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1233 ixgbe_error(ixgbe, 1234 "Invalid NVM checksum. Please contact " 1235 "the vendor to update the NVM."); 1236 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1237 goto init_fail; 1238 } 1239 } 1240 1241 /* 1242 * Setup default flow control thresholds - enable/disable 1243 * & flow control type is controlled by ixgbe.conf 1244 */ 1245 hw->fc.high_water[0] = DEFAULT_FCRTH; 1246 hw->fc.low_water[0] = DEFAULT_FCRTL; 1247 hw->fc.pause_time = DEFAULT_FCPAUSE; 1248 hw->fc.send_xon = B_TRUE; 1249 1250 /* 1251 * Initialize link settings 1252 */ 1253 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1254 1255 /* 1256 * Initialize the chipset hardware 1257 */ 1258 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1259 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1260 goto init_fail; 1261 } 1262 1263 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1264 goto init_fail; 1265 } 1266 1267 mutex_exit(&ixgbe->gen_lock); 1268 return (IXGBE_SUCCESS); 1269 1270 init_fail: 1271 /* 1272 * Reset PHY 1273 */ 1274 (void) ixgbe_reset_phy(hw); 1275 1276 mutex_exit(&ixgbe->gen_lock); 1277 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1278 return (IXGBE_FAILURE); 1279 } 1280 1281 /* 1282 * ixgbe_chip_start - Initialize and start the chipset hardware. 1283 */ 1284 static int 1285 ixgbe_chip_start(ixgbe_t *ixgbe) 1286 { 1287 struct ixgbe_hw *hw = &ixgbe->hw; 1288 int ret_val, i; 1289 1290 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1291 1292 /* 1293 * Get the mac address 1294 * This function should handle SPARC case correctly. 1295 */ 1296 if (!ixgbe_find_mac_address(ixgbe)) { 1297 ixgbe_error(ixgbe, "Failed to get the mac address"); 1298 return (IXGBE_FAILURE); 1299 } 1300 1301 /* 1302 * Validate the mac address 1303 */ 1304 (void) ixgbe_init_rx_addrs(hw); 1305 if (!is_valid_mac_addr(hw->mac.addr)) { 1306 ixgbe_error(ixgbe, "Invalid mac address"); 1307 return (IXGBE_FAILURE); 1308 } 1309 1310 /* 1311 * Configure/Initialize hardware 1312 */ 1313 ret_val = ixgbe_init_hw(hw); 1314 if (ret_val != IXGBE_SUCCESS) { 1315 if (ret_val == IXGBE_ERR_EEPROM_VERSION) { 1316 ixgbe_error(ixgbe, 1317 "This 82599 device is pre-release and contains" 1318 " outdated firmware, please contact your hardware" 1319 " vendor for a replacement."); 1320 } else { 1321 ixgbe_error(ixgbe, "Failed to initialize hardware"); 1322 return (IXGBE_FAILURE); 1323 } 1324 } 1325 1326 /* 1327 * Re-enable relaxed ordering for performance. It is disabled 1328 * by default in the hardware init. 1329 */ 1330 if (ixgbe->relax_order_enable == B_TRUE) 1331 ixgbe_enable_relaxed_ordering(hw); 1332 1333 /* 1334 * Setup adapter interrupt vectors 1335 */ 1336 ixgbe_setup_adapter_vector(ixgbe); 1337 1338 /* 1339 * Initialize unicast addresses. 1340 */ 1341 ixgbe_init_unicst(ixgbe); 1342 1343 /* 1344 * Setup and initialize the mctable structures. 1345 */ 1346 ixgbe_setup_multicst(ixgbe); 1347 1348 /* 1349 * Set interrupt throttling rate 1350 */ 1351 for (i = 0; i < ixgbe->intr_cnt; i++) { 1352 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1353 } 1354 1355 /* 1356 * Save the state of the phy 1357 */ 1358 ixgbe_get_hw_state(ixgbe); 1359 1360 /* 1361 * Make sure driver has control 1362 */ 1363 ixgbe_get_driver_control(hw); 1364 1365 return (IXGBE_SUCCESS); 1366 } 1367 1368 /* 1369 * ixgbe_chip_stop - Stop the chipset hardware 1370 */ 1371 static void 1372 ixgbe_chip_stop(ixgbe_t *ixgbe) 1373 { 1374 struct ixgbe_hw *hw = &ixgbe->hw; 1375 1376 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1377 1378 /* 1379 * Tell firmware driver is no longer in control 1380 */ 1381 ixgbe_release_driver_control(hw); 1382 1383 /* 1384 * Reset the chipset 1385 */ 1386 (void) ixgbe_reset_hw(hw); 1387 1388 /* 1389 * Reset PHY 1390 */ 1391 (void) ixgbe_reset_phy(hw); 1392 } 1393 1394 /* 1395 * ixgbe_reset - Reset the chipset and re-start the driver. 1396 * 1397 * It involves stopping and re-starting the chipset, 1398 * and re-configuring the rx/tx rings. 1399 */ 1400 static int 1401 ixgbe_reset(ixgbe_t *ixgbe) 1402 { 1403 int i; 1404 1405 /* 1406 * Disable and stop the watchdog timer 1407 */ 1408 ixgbe_disable_watchdog_timer(ixgbe); 1409 1410 mutex_enter(&ixgbe->gen_lock); 1411 1412 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1413 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 1414 1415 ixgbe_stop(ixgbe, B_FALSE); 1416 1417 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1418 mutex_exit(&ixgbe->gen_lock); 1419 return (IXGBE_FAILURE); 1420 } 1421 1422 /* 1423 * After resetting, need to recheck the link status. 1424 */ 1425 ixgbe->link_check_complete = B_FALSE; 1426 ixgbe->link_check_hrtime = gethrtime() + 1427 (IXGBE_LINK_UP_TIME * 100000000ULL); 1428 1429 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED); 1430 1431 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) { 1432 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1433 mac_tx_ring_update(ixgbe->mac_hdl, 1434 ixgbe->tx_rings[i].ring_handle); 1435 } 1436 } 1437 1438 mutex_exit(&ixgbe->gen_lock); 1439 1440 /* 1441 * Enable and start the watchdog timer 1442 */ 1443 ixgbe_enable_watchdog_timer(ixgbe); 1444 1445 return (IXGBE_SUCCESS); 1446 } 1447 1448 /* 1449 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1450 */ 1451 static void 1452 ixgbe_tx_clean(ixgbe_t *ixgbe) 1453 { 1454 ixgbe_tx_ring_t *tx_ring; 1455 tx_control_block_t *tcb; 1456 link_list_t pending_list; 1457 uint32_t desc_num; 1458 int i, j; 1459 1460 LINK_LIST_INIT(&pending_list); 1461 1462 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1463 tx_ring = &ixgbe->tx_rings[i]; 1464 1465 mutex_enter(&tx_ring->recycle_lock); 1466 1467 /* 1468 * Clean the pending tx data - the pending packets in the 1469 * work_list that have no chances to be transmitted again. 1470 * 1471 * We must ensure the chipset is stopped or the link is down 1472 * before cleaning the transmit packets. 1473 */ 1474 desc_num = 0; 1475 for (j = 0; j < tx_ring->ring_size; j++) { 1476 tcb = tx_ring->work_list[j]; 1477 if (tcb != NULL) { 1478 desc_num += tcb->desc_num; 1479 1480 tx_ring->work_list[j] = NULL; 1481 1482 ixgbe_free_tcb(tcb); 1483 1484 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1485 } 1486 } 1487 1488 if (desc_num > 0) { 1489 atomic_add_32(&tx_ring->tbd_free, desc_num); 1490 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1491 1492 /* 1493 * Reset the head and tail pointers of the tbd ring; 1494 * Reset the writeback head if it's enable. 1495 */ 1496 tx_ring->tbd_head = 0; 1497 tx_ring->tbd_tail = 0; 1498 if (ixgbe->tx_head_wb_enable) 1499 *tx_ring->tbd_head_wb = 0; 1500 1501 IXGBE_WRITE_REG(&ixgbe->hw, 1502 IXGBE_TDH(tx_ring->index), 0); 1503 IXGBE_WRITE_REG(&ixgbe->hw, 1504 IXGBE_TDT(tx_ring->index), 0); 1505 } 1506 1507 mutex_exit(&tx_ring->recycle_lock); 1508 1509 /* 1510 * Add the tx control blocks in the pending list to 1511 * the free list. 1512 */ 1513 ixgbe_put_free_list(tx_ring, &pending_list); 1514 } 1515 } 1516 1517 /* 1518 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1519 * transmitted. 1520 */ 1521 static boolean_t 1522 ixgbe_tx_drain(ixgbe_t *ixgbe) 1523 { 1524 ixgbe_tx_ring_t *tx_ring; 1525 boolean_t done; 1526 int i, j; 1527 1528 /* 1529 * Wait for a specific time to allow pending tx packets 1530 * to be transmitted. 1531 * 1532 * Check the counter tbd_free to see if transmission is done. 1533 * No lock protection is needed here. 1534 * 1535 * Return B_TRUE if all pending packets have been transmitted; 1536 * Otherwise return B_FALSE; 1537 */ 1538 for (i = 0; i < TX_DRAIN_TIME; i++) { 1539 1540 done = B_TRUE; 1541 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1542 tx_ring = &ixgbe->tx_rings[j]; 1543 done = done && 1544 (tx_ring->tbd_free == tx_ring->ring_size); 1545 } 1546 1547 if (done) 1548 break; 1549 1550 msec_delay(1); 1551 } 1552 1553 return (done); 1554 } 1555 1556 /* 1557 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1558 */ 1559 static boolean_t 1560 ixgbe_rx_drain(ixgbe_t *ixgbe) 1561 { 1562 boolean_t done = B_TRUE; 1563 int i; 1564 1565 /* 1566 * Polling the rx free list to check if those rx buffers held by 1567 * the upper layer are released. 1568 * 1569 * Check the counter rcb_free to see if all pending buffers are 1570 * released. No lock protection is needed here. 1571 * 1572 * Return B_TRUE if all pending buffers have been released; 1573 * Otherwise return B_FALSE; 1574 */ 1575 for (i = 0; i < RX_DRAIN_TIME; i++) { 1576 done = (ixgbe->rcb_pending == 0); 1577 1578 if (done) 1579 break; 1580 1581 msec_delay(1); 1582 } 1583 1584 return (done); 1585 } 1586 1587 /* 1588 * ixgbe_start - Start the driver/chipset. 1589 */ 1590 int 1591 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer) 1592 { 1593 int i; 1594 1595 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1596 1597 if (alloc_buffer) { 1598 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) { 1599 ixgbe_error(ixgbe, 1600 "Failed to allocate software receive rings"); 1601 return (IXGBE_FAILURE); 1602 } 1603 1604 /* Allocate buffers for all the rx/tx rings */ 1605 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) { 1606 ixgbe_error(ixgbe, "Failed to allocate DMA resource"); 1607 return (IXGBE_FAILURE); 1608 } 1609 1610 ixgbe->tx_ring_init = B_TRUE; 1611 } else { 1612 ixgbe->tx_ring_init = B_FALSE; 1613 } 1614 1615 for (i = 0; i < ixgbe->num_rx_rings; i++) 1616 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1617 for (i = 0; i < ixgbe->num_tx_rings; i++) 1618 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1619 1620 /* 1621 * Start the chipset hardware 1622 */ 1623 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1624 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1625 goto start_failure; 1626 } 1627 1628 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1629 goto start_failure; 1630 } 1631 1632 /* 1633 * Setup the rx/tx rings 1634 */ 1635 ixgbe_setup_rings(ixgbe); 1636 1637 /* 1638 * ixgbe_start() will be called when resetting, however if reset 1639 * happens, we need to clear the ERROR, STALL and OVERTEMP flags 1640 * before enabling the interrupts. 1641 */ 1642 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR 1643 | IXGBE_STALL| IXGBE_OVERTEMP)); 1644 1645 /* 1646 * Enable adapter interrupts 1647 * The interrupts must be enabled after the driver state is START 1648 */ 1649 ixgbe_enable_adapter_interrupts(ixgbe); 1650 1651 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1652 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1653 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1654 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1655 1656 return (IXGBE_SUCCESS); 1657 1658 start_failure: 1659 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1660 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1661 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1662 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1663 1664 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1665 1666 return (IXGBE_FAILURE); 1667 } 1668 1669 /* 1670 * ixgbe_stop - Stop the driver/chipset. 1671 */ 1672 void 1673 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer) 1674 { 1675 int i; 1676 1677 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1678 1679 /* 1680 * Disable the adapter interrupts 1681 */ 1682 ixgbe_disable_adapter_interrupts(ixgbe); 1683 1684 /* 1685 * Drain the pending tx packets 1686 */ 1687 (void) ixgbe_tx_drain(ixgbe); 1688 1689 for (i = 0; i < ixgbe->num_rx_rings; i++) 1690 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1691 for (i = 0; i < ixgbe->num_tx_rings; i++) 1692 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1693 1694 /* 1695 * Stop the chipset hardware 1696 */ 1697 ixgbe_chip_stop(ixgbe); 1698 1699 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1700 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1701 } 1702 1703 /* 1704 * Clean the pending tx data/resources 1705 */ 1706 ixgbe_tx_clean(ixgbe); 1707 1708 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1709 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1710 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1711 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1712 1713 if (ixgbe->link_state == LINK_STATE_UP) { 1714 ixgbe->link_state = LINK_STATE_UNKNOWN; 1715 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 1716 } 1717 1718 if (free_buffer) { 1719 /* 1720 * Release the DMA/memory resources of rx/tx rings 1721 */ 1722 ixgbe_free_dma(ixgbe); 1723 ixgbe_free_rx_data(ixgbe); 1724 } 1725 } 1726 1727 /* 1728 * ixgbe_cbfunc - Driver interface for generic DDI callbacks 1729 */ 1730 /* ARGSUSED */ 1731 static int 1732 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg, 1733 void *arg1, void *arg2) 1734 { 1735 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 1736 1737 switch (cbaction) { 1738 /* IRM callback */ 1739 int count; 1740 case DDI_CB_INTR_ADD: 1741 case DDI_CB_INTR_REMOVE: 1742 count = (int)(uintptr_t)cbarg; 1743 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX); 1744 DTRACE_PROBE2(ixgbe__irm__callback, int, count, 1745 int, ixgbe->intr_cnt); 1746 if (ixgbe_intr_adjust(ixgbe, cbaction, count) != 1747 DDI_SUCCESS) { 1748 ixgbe_error(ixgbe, 1749 "IRM CB: Failed to adjust interrupts"); 1750 goto cb_fail; 1751 } 1752 break; 1753 default: 1754 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported", 1755 cbaction); 1756 return (DDI_ENOTSUP); 1757 } 1758 return (DDI_SUCCESS); 1759 cb_fail: 1760 return (DDI_FAILURE); 1761 } 1762 1763 /* 1764 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request. 1765 */ 1766 static int 1767 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count) 1768 { 1769 int i, rc, actual; 1770 1771 if (count == 0) 1772 return (DDI_SUCCESS); 1773 1774 if ((cbaction == DDI_CB_INTR_ADD && 1775 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) || 1776 (cbaction == DDI_CB_INTR_REMOVE && 1777 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)) 1778 return (DDI_FAILURE); 1779 1780 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1781 return (DDI_FAILURE); 1782 } 1783 1784 for (i = 0; i < ixgbe->num_rx_rings; i++) 1785 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL); 1786 for (i = 0; i < ixgbe->num_tx_rings; i++) 1787 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL); 1788 1789 mutex_enter(&ixgbe->gen_lock); 1790 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 1791 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST; 1792 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 1793 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 1794 1795 ixgbe_stop(ixgbe, B_FALSE); 1796 /* 1797 * Disable interrupts 1798 */ 1799 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 1800 rc = ixgbe_disable_intrs(ixgbe); 1801 ASSERT(rc == IXGBE_SUCCESS); 1802 } 1803 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR; 1804 1805 /* 1806 * Remove interrupt handlers 1807 */ 1808 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 1809 ixgbe_rem_intr_handlers(ixgbe); 1810 } 1811 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR; 1812 1813 /* 1814 * Clear vect_map 1815 */ 1816 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 1817 switch (cbaction) { 1818 case DDI_CB_INTR_ADD: 1819 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable, 1820 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual, 1821 DDI_INTR_ALLOC_NORMAL); 1822 if (rc != DDI_SUCCESS || actual != count) { 1823 ixgbe_log(ixgbe, "Adjust interrupts failed." 1824 "return: %d, irm cb size: %d, actual: %d", 1825 rc, count, actual); 1826 goto intr_adjust_fail; 1827 } 1828 ixgbe->intr_cnt += count; 1829 break; 1830 1831 case DDI_CB_INTR_REMOVE: 1832 for (i = ixgbe->intr_cnt - count; 1833 i < ixgbe->intr_cnt; i ++) { 1834 rc = ddi_intr_free(ixgbe->htable[i]); 1835 ixgbe->htable[i] = NULL; 1836 if (rc != DDI_SUCCESS) { 1837 ixgbe_log(ixgbe, "Adjust interrupts failed." 1838 "return: %d, irm cb size: %d, actual: %d", 1839 rc, count, actual); 1840 goto intr_adjust_fail; 1841 } 1842 } 1843 ixgbe->intr_cnt -= count; 1844 break; 1845 } 1846 1847 /* 1848 * Get priority for first vector, assume remaining are all the same 1849 */ 1850 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 1851 if (rc != DDI_SUCCESS) { 1852 ixgbe_log(ixgbe, 1853 "Get interrupt priority failed: %d", rc); 1854 goto intr_adjust_fail; 1855 } 1856 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 1857 if (rc != DDI_SUCCESS) { 1858 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc); 1859 goto intr_adjust_fail; 1860 } 1861 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 1862 1863 /* 1864 * Map rings to interrupt vectors 1865 */ 1866 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 1867 ixgbe_error(ixgbe, 1868 "IRM CB: Failed to map interrupts to vectors"); 1869 goto intr_adjust_fail; 1870 } 1871 1872 /* 1873 * Add interrupt handlers 1874 */ 1875 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 1876 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers"); 1877 goto intr_adjust_fail; 1878 } 1879 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 1880 1881 /* 1882 * Now that mutex locks are initialized, and the chip is also 1883 * initialized, enable interrupts. 1884 */ 1885 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 1886 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts"); 1887 goto intr_adjust_fail; 1888 } 1889 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 1890 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1891 ixgbe_error(ixgbe, "IRM CB: Failed to start"); 1892 goto intr_adjust_fail; 1893 } 1894 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST; 1895 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 1896 ixgbe->ixgbe_state |= IXGBE_STARTED; 1897 mutex_exit(&ixgbe->gen_lock); 1898 1899 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1900 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, 1901 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]); 1902 } 1903 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1904 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, 1905 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]); 1906 } 1907 1908 /* Wakeup all Tx rings */ 1909 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1910 mac_tx_ring_update(ixgbe->mac_hdl, 1911 ixgbe->tx_rings[i].ring_handle); 1912 } 1913 1914 IXGBE_DEBUGLOG_3(ixgbe, 1915 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).", 1916 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max); 1917 return (DDI_SUCCESS); 1918 1919 intr_adjust_fail: 1920 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1921 mutex_exit(&ixgbe->gen_lock); 1922 return (DDI_FAILURE); 1923 } 1924 1925 /* 1926 * ixgbe_intr_cb_register - Register interrupt callback function. 1927 */ 1928 static int 1929 ixgbe_intr_cb_register(ixgbe_t *ixgbe) 1930 { 1931 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc, 1932 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) { 1933 return (IXGBE_FAILURE); 1934 } 1935 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered."); 1936 return (IXGBE_SUCCESS); 1937 } 1938 1939 /* 1940 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 1941 */ 1942 static int 1943 ixgbe_alloc_rings(ixgbe_t *ixgbe) 1944 { 1945 /* 1946 * Allocate memory space for rx rings 1947 */ 1948 ixgbe->rx_rings = kmem_zalloc( 1949 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 1950 KM_NOSLEEP); 1951 1952 if (ixgbe->rx_rings == NULL) { 1953 return (IXGBE_FAILURE); 1954 } 1955 1956 /* 1957 * Allocate memory space for tx rings 1958 */ 1959 ixgbe->tx_rings = kmem_zalloc( 1960 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 1961 KM_NOSLEEP); 1962 1963 if (ixgbe->tx_rings == NULL) { 1964 kmem_free(ixgbe->rx_rings, 1965 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1966 ixgbe->rx_rings = NULL; 1967 return (IXGBE_FAILURE); 1968 } 1969 1970 /* 1971 * Allocate memory space for rx ring groups 1972 */ 1973 ixgbe->rx_groups = kmem_zalloc( 1974 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups, 1975 KM_NOSLEEP); 1976 1977 if (ixgbe->rx_groups == NULL) { 1978 kmem_free(ixgbe->rx_rings, 1979 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1980 kmem_free(ixgbe->tx_rings, 1981 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 1982 ixgbe->rx_rings = NULL; 1983 ixgbe->tx_rings = NULL; 1984 return (IXGBE_FAILURE); 1985 } 1986 1987 return (IXGBE_SUCCESS); 1988 } 1989 1990 /* 1991 * ixgbe_free_rings - Free the memory space of rx/tx rings. 1992 */ 1993 static void 1994 ixgbe_free_rings(ixgbe_t *ixgbe) 1995 { 1996 if (ixgbe->rx_rings != NULL) { 1997 kmem_free(ixgbe->rx_rings, 1998 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1999 ixgbe->rx_rings = NULL; 2000 } 2001 2002 if (ixgbe->tx_rings != NULL) { 2003 kmem_free(ixgbe->tx_rings, 2004 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2005 ixgbe->tx_rings = NULL; 2006 } 2007 2008 if (ixgbe->rx_groups != NULL) { 2009 kmem_free(ixgbe->rx_groups, 2010 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); 2011 ixgbe->rx_groups = NULL; 2012 } 2013 } 2014 2015 static int 2016 ixgbe_alloc_rx_data(ixgbe_t *ixgbe) 2017 { 2018 ixgbe_rx_ring_t *rx_ring; 2019 int i; 2020 2021 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2022 rx_ring = &ixgbe->rx_rings[i]; 2023 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS) 2024 goto alloc_rx_rings_failure; 2025 } 2026 return (IXGBE_SUCCESS); 2027 2028 alloc_rx_rings_failure: 2029 ixgbe_free_rx_data(ixgbe); 2030 return (IXGBE_FAILURE); 2031 } 2032 2033 static void 2034 ixgbe_free_rx_data(ixgbe_t *ixgbe) 2035 { 2036 ixgbe_rx_ring_t *rx_ring; 2037 ixgbe_rx_data_t *rx_data; 2038 int i; 2039 2040 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2041 rx_ring = &ixgbe->rx_rings[i]; 2042 2043 mutex_enter(&ixgbe->rx_pending_lock); 2044 rx_data = rx_ring->rx_data; 2045 2046 if (rx_data != NULL) { 2047 rx_data->flag |= IXGBE_RX_STOPPED; 2048 2049 if (rx_data->rcb_pending == 0) { 2050 ixgbe_free_rx_ring_data(rx_data); 2051 rx_ring->rx_data = NULL; 2052 } 2053 } 2054 2055 mutex_exit(&ixgbe->rx_pending_lock); 2056 } 2057 } 2058 2059 /* 2060 * ixgbe_setup_rings - Setup rx/tx rings. 2061 */ 2062 static void 2063 ixgbe_setup_rings(ixgbe_t *ixgbe) 2064 { 2065 /* 2066 * Setup the rx/tx rings, including the following: 2067 * 2068 * 1. Setup the descriptor ring and the control block buffers; 2069 * 2. Initialize necessary registers for receive/transmit; 2070 * 3. Initialize software pointers/parameters for receive/transmit; 2071 */ 2072 ixgbe_setup_rx(ixgbe); 2073 2074 ixgbe_setup_tx(ixgbe); 2075 } 2076 2077 static void 2078 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 2079 { 2080 ixgbe_t *ixgbe = rx_ring->ixgbe; 2081 ixgbe_rx_data_t *rx_data = rx_ring->rx_data; 2082 struct ixgbe_hw *hw = &ixgbe->hw; 2083 rx_control_block_t *rcb; 2084 union ixgbe_adv_rx_desc *rbd; 2085 uint32_t size; 2086 uint32_t buf_low; 2087 uint32_t buf_high; 2088 uint32_t reg_val; 2089 int i; 2090 2091 ASSERT(mutex_owned(&rx_ring->rx_lock)); 2092 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2093 2094 for (i = 0; i < ixgbe->rx_ring_size; i++) { 2095 rcb = rx_data->work_list[i]; 2096 rbd = &rx_data->rbd_ring[i]; 2097 2098 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 2099 rbd->read.hdr_addr = NULL; 2100 } 2101 2102 /* 2103 * Initialize the length register 2104 */ 2105 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc); 2106 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size); 2107 2108 /* 2109 * Initialize the base address registers 2110 */ 2111 buf_low = (uint32_t)rx_data->rbd_area.dma_address; 2112 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); 2113 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high); 2114 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low); 2115 2116 /* 2117 * Setup head & tail pointers 2118 */ 2119 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index), 2120 rx_data->ring_size - 1); 2121 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0); 2122 2123 rx_data->rbd_next = 0; 2124 rx_data->lro_first = 0; 2125 2126 /* 2127 * Setup the Receive Descriptor Control Register (RXDCTL) 2128 * PTHRESH=32 descriptors (half the internal cache) 2129 * HTHRESH=0 descriptors (to minimize latency on fetch) 2130 * WTHRESH defaults to 1 (writeback each descriptor) 2131 */ 2132 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index)); 2133 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 2134 2135 /* Not a valid value for 82599 or X540 */ 2136 if (hw->mac.type == ixgbe_mac_82598EB) { 2137 reg_val |= 0x0020; /* pthresh */ 2138 } 2139 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); 2140 2141 if (hw->mac.type == ixgbe_mac_82599EB || 2142 hw->mac.type == ixgbe_mac_X540) { 2143 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2144 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); 2145 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2146 } 2147 2148 /* 2149 * Setup the Split and Replication Receive Control Register. 2150 * Set the rx buffer size and the advanced descriptor type. 2151 */ 2152 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 2153 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2154 reg_val |= IXGBE_SRRCTL_DROP_EN; 2155 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); 2156 } 2157 2158 static void 2159 ixgbe_setup_rx(ixgbe_t *ixgbe) 2160 { 2161 ixgbe_rx_ring_t *rx_ring; 2162 struct ixgbe_hw *hw = &ixgbe->hw; 2163 uint32_t reg_val; 2164 uint32_t ring_mapping; 2165 uint32_t i, index; 2166 uint32_t psrtype_rss_bit; 2167 2168 /* PSRTYPE must be configured for 82599 */ 2169 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ && 2170 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) { 2171 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2172 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2173 reg_val |= IXGBE_PSRTYPE_L2HDR; 2174 reg_val |= 0x80000000; 2175 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val); 2176 } else { 2177 if (ixgbe->num_rx_groups > 32) { 2178 psrtype_rss_bit = 0x20000000; 2179 } else { 2180 psrtype_rss_bit = 0x40000000; 2181 } 2182 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) { 2183 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2184 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2185 reg_val |= IXGBE_PSRTYPE_L2HDR; 2186 reg_val |= psrtype_rss_bit; 2187 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val); 2188 } 2189 } 2190 2191 /* 2192 * Set filter control in FCTRL to accept broadcast packets and do 2193 * not pass pause frames to host. Flow control settings are already 2194 * in this register, so preserve them. 2195 */ 2196 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2197 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */ 2198 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */ 2199 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 2200 2201 /* 2202 * Hardware checksum settings 2203 */ 2204 if (ixgbe->rx_hcksum_enable) { 2205 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 2206 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 2207 } 2208 2209 /* 2210 * Setup VMDq and RSS for multiple receive queues 2211 */ 2212 switch (ixgbe->classify_mode) { 2213 case IXGBE_CLASSIFY_RSS: 2214 /* 2215 * One group, only RSS is needed when more than 2216 * one ring enabled. 2217 */ 2218 ixgbe_setup_rss(ixgbe); 2219 break; 2220 2221 case IXGBE_CLASSIFY_VMDQ: 2222 /* 2223 * Multiple groups, each group has one ring, 2224 * only VMDq is needed. 2225 */ 2226 ixgbe_setup_vmdq(ixgbe); 2227 break; 2228 2229 case IXGBE_CLASSIFY_VMDQ_RSS: 2230 /* 2231 * Multiple groups and multiple rings, both 2232 * VMDq and RSS are needed. 2233 */ 2234 ixgbe_setup_vmdq_rss(ixgbe); 2235 break; 2236 2237 default: 2238 break; 2239 } 2240 2241 /* 2242 * Enable the receive unit. This must be done after filter 2243 * control is set in FCTRL. 2244 */ 2245 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */ 2246 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */ 2247 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 2248 2249 /* 2250 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 2251 */ 2252 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2253 rx_ring = &ixgbe->rx_rings[i]; 2254 ixgbe_setup_rx_ring(rx_ring); 2255 } 2256 2257 /* 2258 * Setup the per-ring statistics mapping. 2259 */ 2260 ring_mapping = 0; 2261 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2262 index = ixgbe->rx_rings[i].hw_index; 2263 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2)); 2264 ring_mapping |= (i & 0xF) << (8 * (index & 0x3)); 2265 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping); 2266 } 2267 2268 /* 2269 * The Max Frame Size in MHADD/MAXFRS will be internally increased 2270 * by four bytes if the packet has a VLAN field, so includes MTU, 2271 * ethernet header and frame check sequence. 2272 * Register is MAXFRS in 82599. 2273 */ 2274 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header) 2275 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 2276 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 2277 2278 /* 2279 * Setup Jumbo Frame enable bit 2280 */ 2281 if (ixgbe->default_mtu > ETHERMTU) { 2282 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2283 reg_val |= IXGBE_HLREG0_JUMBOEN; 2284 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2285 } 2286 2287 /* 2288 * Setup RSC for multiple receive queues. 2289 */ 2290 if (ixgbe->lro_enable) { 2291 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2292 /* 2293 * Make sure rx_buf_size * MAXDESC not greater 2294 * than 65535. 2295 * Intel recommends 4 for MAXDESC field value. 2296 */ 2297 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i)); 2298 reg_val |= IXGBE_RSCCTL_RSCEN; 2299 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k) 2300 reg_val |= IXGBE_RSCCTL_MAXDESC_1; 2301 else 2302 reg_val |= IXGBE_RSCCTL_MAXDESC_4; 2303 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val); 2304 } 2305 2306 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU); 2307 reg_val |= IXGBE_RSCDBU_RSCACKDIS; 2308 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val); 2309 2310 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2311 reg_val |= IXGBE_RDRXCTL_RSCACKC; 2312 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX; 2313 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2314 2315 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2316 } 2317 } 2318 2319 static void 2320 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 2321 { 2322 ixgbe_t *ixgbe = tx_ring->ixgbe; 2323 struct ixgbe_hw *hw = &ixgbe->hw; 2324 uint32_t size; 2325 uint32_t buf_low; 2326 uint32_t buf_high; 2327 uint32_t reg_val; 2328 2329 ASSERT(mutex_owned(&tx_ring->tx_lock)); 2330 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2331 2332 /* 2333 * Initialize the length register 2334 */ 2335 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 2336 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 2337 2338 /* 2339 * Initialize the base address registers 2340 */ 2341 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 2342 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 2343 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 2344 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 2345 2346 /* 2347 * Setup head & tail pointers 2348 */ 2349 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 2350 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 2351 2352 /* 2353 * Setup head write-back 2354 */ 2355 if (ixgbe->tx_head_wb_enable) { 2356 /* 2357 * The memory of the head write-back is allocated using 2358 * the extra tbd beyond the tail of the tbd ring. 2359 */ 2360 tx_ring->tbd_head_wb = (uint32_t *) 2361 ((uintptr_t)tx_ring->tbd_area.address + size); 2362 *tx_ring->tbd_head_wb = 0; 2363 2364 buf_low = (uint32_t) 2365 (tx_ring->tbd_area.dma_address + size); 2366 buf_high = (uint32_t) 2367 ((tx_ring->tbd_area.dma_address + size) >> 32); 2368 2369 /* Set the head write-back enable bit */ 2370 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 2371 2372 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 2373 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 2374 2375 /* 2376 * Turn off relaxed ordering for head write back or it will 2377 * cause problems with the tx recycling 2378 */ 2379 2380 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ? 2381 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) : 2382 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index)); 2383 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 2384 if (hw->mac.type == ixgbe_mac_82598EB) { 2385 IXGBE_WRITE_REG(hw, 2386 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 2387 } else { 2388 IXGBE_WRITE_REG(hw, 2389 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val); 2390 } 2391 } else { 2392 tx_ring->tbd_head_wb = NULL; 2393 } 2394 2395 tx_ring->tbd_head = 0; 2396 tx_ring->tbd_tail = 0; 2397 tx_ring->tbd_free = tx_ring->ring_size; 2398 2399 if (ixgbe->tx_ring_init == B_TRUE) { 2400 tx_ring->tcb_head = 0; 2401 tx_ring->tcb_tail = 0; 2402 tx_ring->tcb_free = tx_ring->free_list_size; 2403 } 2404 2405 /* 2406 * Initialize the s/w context structure 2407 */ 2408 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); 2409 } 2410 2411 static void 2412 ixgbe_setup_tx(ixgbe_t *ixgbe) 2413 { 2414 struct ixgbe_hw *hw = &ixgbe->hw; 2415 ixgbe_tx_ring_t *tx_ring; 2416 uint32_t reg_val; 2417 uint32_t ring_mapping; 2418 int i; 2419 2420 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2421 tx_ring = &ixgbe->tx_rings[i]; 2422 ixgbe_setup_tx_ring(tx_ring); 2423 } 2424 2425 /* 2426 * Setup the per-ring statistics mapping. 2427 */ 2428 ring_mapping = 0; 2429 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2430 ring_mapping |= (i & 0xF) << (8 * (i & 0x3)); 2431 if ((i & 0x3) == 0x3) { 2432 switch (hw->mac.type) { 2433 case ixgbe_mac_82598EB: 2434 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 2435 ring_mapping); 2436 break; 2437 2438 case ixgbe_mac_82599EB: 2439 case ixgbe_mac_X540: 2440 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 2441 ring_mapping); 2442 break; 2443 2444 default: 2445 break; 2446 } 2447 2448 ring_mapping = 0; 2449 } 2450 } 2451 if (i & 0x3) { 2452 switch (hw->mac.type) { 2453 case ixgbe_mac_82598EB: 2454 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); 2455 break; 2456 2457 case ixgbe_mac_82599EB: 2458 case ixgbe_mac_X540: 2459 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); 2460 break; 2461 2462 default: 2463 break; 2464 } 2465 } 2466 2467 /* 2468 * Enable CRC appending and TX padding (for short tx frames) 2469 */ 2470 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2471 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 2472 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2473 2474 /* 2475 * enable DMA for 82599 and X540 parts 2476 */ 2477 if (hw->mac.type == ixgbe_mac_82599EB || 2478 hw->mac.type == ixgbe_mac_X540) { 2479 /* DMATXCTL.TE must be set after all Tx config is complete */ 2480 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2481 reg_val |= IXGBE_DMATXCTL_TE; 2482 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); 2483 2484 /* Disable arbiter to set MTQC */ 2485 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2486 reg_val |= IXGBE_RTTDCS_ARBDIS; 2487 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2488 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2489 reg_val &= ~IXGBE_RTTDCS_ARBDIS; 2490 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2491 } 2492 2493 /* 2494 * Enabling tx queues .. 2495 * For 82599 must be done after DMATXCTL.TE is set 2496 */ 2497 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2498 tx_ring = &ixgbe->tx_rings[i]; 2499 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index)); 2500 reg_val |= IXGBE_TXDCTL_ENABLE; 2501 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 2502 } 2503 } 2504 2505 /* 2506 * ixgbe_setup_rss - Setup receive-side scaling feature. 2507 */ 2508 static void 2509 ixgbe_setup_rss(ixgbe_t *ixgbe) 2510 { 2511 struct ixgbe_hw *hw = &ixgbe->hw; 2512 uint32_t i, mrqc, rxcsum; 2513 uint32_t random; 2514 uint32_t reta; 2515 uint32_t ring_per_group; 2516 2517 /* 2518 * Fill out redirection table 2519 */ 2520 reta = 0; 2521 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2522 2523 for (i = 0; i < 128; i++) { 2524 reta = (reta << 8) | (i % ring_per_group) | 2525 ((i % ring_per_group) << 4); 2526 if ((i & 3) == 3) 2527 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 2528 } 2529 2530 /* 2531 * Fill out hash function seeds with a random constant 2532 */ 2533 for (i = 0; i < 10; i++) { 2534 (void) random_get_pseudo_bytes((uint8_t *)&random, 2535 sizeof (uint32_t)); 2536 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 2537 } 2538 2539 /* 2540 * Enable RSS & perform hash on these packet types 2541 */ 2542 mrqc = IXGBE_MRQC_RSSEN | 2543 IXGBE_MRQC_RSS_FIELD_IPV4 | 2544 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2545 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2546 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2547 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2548 IXGBE_MRQC_RSS_FIELD_IPV6 | 2549 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2550 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2551 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2552 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2553 2554 /* 2555 * Disable Packet Checksum to enable RSS for multiple receive queues. 2556 * It is an adapter hardware limitation that Packet Checksum is 2557 * mutually exclusive with RSS. 2558 */ 2559 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2560 rxcsum |= IXGBE_RXCSUM_PCSD; 2561 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 2562 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 2563 } 2564 2565 /* 2566 * ixgbe_setup_vmdq - Setup MAC classification feature 2567 */ 2568 static void 2569 ixgbe_setup_vmdq(ixgbe_t *ixgbe) 2570 { 2571 struct ixgbe_hw *hw = &ixgbe->hw; 2572 uint32_t vmdctl, i, vtctl; 2573 2574 /* 2575 * Setup the VMDq Control register, enable VMDq based on 2576 * packet destination MAC address: 2577 */ 2578 switch (hw->mac.type) { 2579 case ixgbe_mac_82598EB: 2580 /* 2581 * VMDq Enable = 1; 2582 * VMDq Filter = 0; MAC filtering 2583 * Default VMDq output index = 0; 2584 */ 2585 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2586 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2587 break; 2588 2589 case ixgbe_mac_82599EB: 2590 case ixgbe_mac_X540: 2591 /* 2592 * Enable VMDq-only. 2593 */ 2594 vmdctl = IXGBE_MRQC_VMDQEN; 2595 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl); 2596 2597 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2598 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2599 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2600 } 2601 2602 /* 2603 * Enable Virtualization and Replication. 2604 */ 2605 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2606 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2607 2608 /* 2609 * Enable receiving packets to all VFs 2610 */ 2611 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2612 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2613 break; 2614 2615 default: 2616 break; 2617 } 2618 } 2619 2620 /* 2621 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature. 2622 */ 2623 static void 2624 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) 2625 { 2626 struct ixgbe_hw *hw = &ixgbe->hw; 2627 uint32_t i, mrqc, rxcsum; 2628 uint32_t random; 2629 uint32_t reta; 2630 uint32_t ring_per_group; 2631 uint32_t vmdctl, vtctl; 2632 2633 /* 2634 * Fill out redirection table 2635 */ 2636 reta = 0; 2637 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2638 for (i = 0; i < 128; i++) { 2639 reta = (reta << 8) | (i % ring_per_group) | 2640 ((i % ring_per_group) << 4); 2641 if ((i & 3) == 3) 2642 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 2643 } 2644 2645 /* 2646 * Fill out hash function seeds with a random constant 2647 */ 2648 for (i = 0; i < 10; i++) { 2649 (void) random_get_pseudo_bytes((uint8_t *)&random, 2650 sizeof (uint32_t)); 2651 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 2652 } 2653 2654 /* 2655 * Enable and setup RSS and VMDq 2656 */ 2657 switch (hw->mac.type) { 2658 case ixgbe_mac_82598EB: 2659 /* 2660 * Enable RSS & Setup RSS Hash functions 2661 */ 2662 mrqc = IXGBE_MRQC_RSSEN | 2663 IXGBE_MRQC_RSS_FIELD_IPV4 | 2664 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2665 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2666 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2667 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2668 IXGBE_MRQC_RSS_FIELD_IPV6 | 2669 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2670 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2671 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2672 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2673 2674 /* 2675 * Enable and Setup VMDq 2676 * VMDq Filter = 0; MAC filtering 2677 * Default VMDq output index = 0; 2678 */ 2679 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2680 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2681 break; 2682 2683 case ixgbe_mac_82599EB: 2684 case ixgbe_mac_X540: 2685 /* 2686 * Enable RSS & Setup RSS Hash functions 2687 */ 2688 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 | 2689 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2690 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2691 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2692 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2693 IXGBE_MRQC_RSS_FIELD_IPV6 | 2694 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2695 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2696 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2697 2698 /* 2699 * Enable VMDq+RSS. 2700 */ 2701 if (ixgbe->num_rx_groups > 32) { 2702 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN; 2703 } else { 2704 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN; 2705 } 2706 2707 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2708 2709 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2710 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2711 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2712 } 2713 break; 2714 2715 default: 2716 break; 2717 2718 } 2719 2720 /* 2721 * Disable Packet Checksum to enable RSS for multiple receive queues. 2722 * It is an adapter hardware limitation that Packet Checksum is 2723 * mutually exclusive with RSS. 2724 */ 2725 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2726 rxcsum |= IXGBE_RXCSUM_PCSD; 2727 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 2728 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 2729 2730 if (hw->mac.type == ixgbe_mac_82599EB || 2731 hw->mac.type == ixgbe_mac_X540) { 2732 /* 2733 * Enable Virtualization and Replication. 2734 */ 2735 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2736 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2737 2738 /* 2739 * Enable receiving packets to all VFs 2740 */ 2741 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2742 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2743 } 2744 } 2745 2746 /* 2747 * ixgbe_init_unicst - Initialize the unicast addresses. 2748 */ 2749 static void 2750 ixgbe_init_unicst(ixgbe_t *ixgbe) 2751 { 2752 struct ixgbe_hw *hw = &ixgbe->hw; 2753 uint8_t *mac_addr; 2754 int slot; 2755 /* 2756 * Here we should consider two situations: 2757 * 2758 * 1. Chipset is initialized at the first time, 2759 * Clear all the multiple unicast addresses. 2760 * 2761 * 2. Chipset is reset 2762 * Recover the multiple unicast addresses from the 2763 * software data structure to the RAR registers. 2764 */ 2765 if (!ixgbe->unicst_init) { 2766 /* 2767 * Initialize the multiple unicast addresses 2768 */ 2769 ixgbe->unicst_total = hw->mac.num_rar_entries; 2770 ixgbe->unicst_avail = ixgbe->unicst_total; 2771 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 2772 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 2773 bzero(mac_addr, ETHERADDRL); 2774 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL); 2775 ixgbe->unicst_addr[slot].mac.set = 0; 2776 } 2777 ixgbe->unicst_init = B_TRUE; 2778 } else { 2779 /* Re-configure the RAR registers */ 2780 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 2781 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 2782 if (ixgbe->unicst_addr[slot].mac.set == 1) { 2783 (void) ixgbe_set_rar(hw, slot, mac_addr, 2784 ixgbe->unicst_addr[slot].mac.group_index, 2785 IXGBE_RAH_AV); 2786 } else { 2787 bzero(mac_addr, ETHERADDRL); 2788 (void) ixgbe_set_rar(hw, slot, mac_addr, 2789 NULL, NULL); 2790 } 2791 } 2792 } 2793 } 2794 2795 /* 2796 * ixgbe_unicst_find - Find the slot for the specified unicast address 2797 */ 2798 int 2799 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr) 2800 { 2801 int slot; 2802 2803 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2804 2805 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 2806 if (bcmp(ixgbe->unicst_addr[slot].mac.addr, 2807 mac_addr, ETHERADDRL) == 0) 2808 return (slot); 2809 } 2810 2811 return (-1); 2812 } 2813 2814 /* 2815 * ixgbe_multicst_add - Add a multicst address. 2816 */ 2817 int 2818 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2819 { 2820 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2821 2822 if ((multiaddr[0] & 01) == 0) { 2823 return (EINVAL); 2824 } 2825 2826 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 2827 return (ENOENT); 2828 } 2829 2830 bcopy(multiaddr, 2831 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 2832 ixgbe->mcast_count++; 2833 2834 /* 2835 * Update the multicast table in the hardware 2836 */ 2837 ixgbe_setup_multicst(ixgbe); 2838 2839 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2840 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2841 return (EIO); 2842 } 2843 2844 return (0); 2845 } 2846 2847 /* 2848 * ixgbe_multicst_remove - Remove a multicst address. 2849 */ 2850 int 2851 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2852 { 2853 int i; 2854 2855 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2856 2857 for (i = 0; i < ixgbe->mcast_count; i++) { 2858 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 2859 ETHERADDRL) == 0) { 2860 for (i++; i < ixgbe->mcast_count; i++) { 2861 ixgbe->mcast_table[i - 1] = 2862 ixgbe->mcast_table[i]; 2863 } 2864 ixgbe->mcast_count--; 2865 break; 2866 } 2867 } 2868 2869 /* 2870 * Update the multicast table in the hardware 2871 */ 2872 ixgbe_setup_multicst(ixgbe); 2873 2874 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2875 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2876 return (EIO); 2877 } 2878 2879 return (0); 2880 } 2881 2882 /* 2883 * ixgbe_setup_multicast - Setup multicast data structures. 2884 * 2885 * This routine initializes all of the multicast related structures 2886 * and save them in the hardware registers. 2887 */ 2888 static void 2889 ixgbe_setup_multicst(ixgbe_t *ixgbe) 2890 { 2891 uint8_t *mc_addr_list; 2892 uint32_t mc_addr_count; 2893 struct ixgbe_hw *hw = &ixgbe->hw; 2894 2895 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2896 2897 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 2898 2899 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 2900 mc_addr_count = ixgbe->mcast_count; 2901 2902 /* 2903 * Update the multicast addresses to the MTA registers 2904 */ 2905 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 2906 ixgbe_mc_table_itr, TRUE); 2907 } 2908 2909 /* 2910 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode). 2911 * 2912 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers. 2913 * Different chipsets may have different allowed configuration of vmdq and rss. 2914 */ 2915 static void 2916 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe) 2917 { 2918 struct ixgbe_hw *hw = &ixgbe->hw; 2919 uint32_t ring_per_group; 2920 2921 switch (hw->mac.type) { 2922 case ixgbe_mac_82598EB: 2923 /* 2924 * 82598 supports the following combination: 2925 * vmdq no. x rss no. 2926 * [5..16] x 1 2927 * [1..4] x [1..16] 2928 * However 8 rss queue per pool (vmdq) is sufficient for 2929 * most cases. 2930 */ 2931 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2932 if (ixgbe->num_rx_groups > 4) { 2933 ixgbe->num_rx_rings = ixgbe->num_rx_groups; 2934 } else { 2935 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 2936 min(8, ring_per_group); 2937 } 2938 2939 break; 2940 2941 case ixgbe_mac_82599EB: 2942 case ixgbe_mac_X540: 2943 /* 2944 * 82599 supports the following combination: 2945 * vmdq no. x rss no. 2946 * [33..64] x [1..2] 2947 * [2..32] x [1..4] 2948 * 1 x [1..16] 2949 * However 8 rss queue per pool (vmdq) is sufficient for 2950 * most cases. 2951 * 2952 * For now, treat X540 like the 82599. 2953 */ 2954 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2955 if (ixgbe->num_rx_groups == 1) { 2956 ixgbe->num_rx_rings = min(8, ring_per_group); 2957 } else if (ixgbe->num_rx_groups <= 32) { 2958 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 2959 min(4, ring_per_group); 2960 } else if (ixgbe->num_rx_groups <= 64) { 2961 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 2962 min(2, ring_per_group); 2963 } 2964 break; 2965 2966 default: 2967 break; 2968 } 2969 2970 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2971 2972 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) { 2973 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 2974 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) { 2975 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ; 2976 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) { 2977 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS; 2978 } else { 2979 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS; 2980 } 2981 2982 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d", 2983 ixgbe->num_rx_groups, ixgbe->num_rx_rings); 2984 } 2985 2986 /* 2987 * ixgbe_get_conf - Get driver configurations set in driver.conf. 2988 * 2989 * This routine gets user-configured values out of the configuration 2990 * file ixgbe.conf. 2991 * 2992 * For each configurable value, there is a minimum, a maximum, and a 2993 * default. 2994 * If user does not configure a value, use the default. 2995 * If user configures below the minimum, use the minumum. 2996 * If user configures above the maximum, use the maxumum. 2997 */ 2998 static void 2999 ixgbe_get_conf(ixgbe_t *ixgbe) 3000 { 3001 struct ixgbe_hw *hw = &ixgbe->hw; 3002 uint32_t flow_control; 3003 3004 /* 3005 * ixgbe driver supports the following user configurations: 3006 * 3007 * Jumbo frame configuration: 3008 * default_mtu 3009 * 3010 * Ethernet flow control configuration: 3011 * flow_control 3012 * 3013 * Multiple rings configurations: 3014 * tx_queue_number 3015 * tx_ring_size 3016 * rx_queue_number 3017 * rx_ring_size 3018 * 3019 * Call ixgbe_get_prop() to get the value for a specific 3020 * configuration parameter. 3021 */ 3022 3023 /* 3024 * Jumbo frame configuration - max_frame_size controls host buffer 3025 * allocation, so includes MTU, ethernet header, vlan tag and 3026 * frame check sequence. 3027 */ 3028 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 3029 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU); 3030 3031 ixgbe->max_frame_size = ixgbe->default_mtu + 3032 sizeof (struct ether_vlan_header) + ETHERFCSL; 3033 3034 /* 3035 * Ethernet flow control configuration 3036 */ 3037 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 3038 ixgbe_fc_none, 3, ixgbe_fc_none); 3039 if (flow_control == 3) 3040 flow_control = ixgbe_fc_default; 3041 3042 /* 3043 * fc.requested mode is what the user requests. After autoneg, 3044 * fc.current_mode will be the flow_control mode that was negotiated. 3045 */ 3046 hw->fc.requested_mode = flow_control; 3047 3048 /* 3049 * Multiple rings configurations 3050 */ 3051 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 3052 ixgbe->capab->min_tx_que_num, 3053 ixgbe->capab->max_tx_que_num, 3054 ixgbe->capab->def_tx_que_num); 3055 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 3056 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 3057 3058 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 3059 ixgbe->capab->min_rx_que_num, 3060 ixgbe->capab->max_rx_que_num, 3061 ixgbe->capab->def_rx_que_num); 3062 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 3063 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 3064 3065 /* 3066 * Multiple groups configuration 3067 */ 3068 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM, 3069 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num, 3070 ixgbe->capab->def_rx_grp_num); 3071 3072 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE, 3073 0, 1, DEFAULT_MR_ENABLE); 3074 3075 if (ixgbe->mr_enable == B_FALSE) { 3076 ixgbe->num_tx_rings = 1; 3077 ixgbe->num_rx_rings = 1; 3078 ixgbe->num_rx_groups = 1; 3079 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3080 } else { 3081 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3082 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1); 3083 /* 3084 * The combination of num_rx_rings and num_rx_groups 3085 * may be not supported by h/w. We need to adjust 3086 * them to appropriate values. 3087 */ 3088 ixgbe_setup_vmdq_rss_conf(ixgbe); 3089 } 3090 3091 /* 3092 * Tunable used to force an interrupt type. The only use is 3093 * for testing of the lesser interrupt types. 3094 * 0 = don't force interrupt type 3095 * 1 = force interrupt type MSI-X 3096 * 2 = force interrupt type MSI 3097 * 3 = force interrupt type Legacy 3098 */ 3099 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 3100 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 3101 3102 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 3103 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 3104 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 3105 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 3106 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 3107 0, 1, DEFAULT_LSO_ENABLE); 3108 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE, 3109 0, 1, DEFAULT_LRO_ENABLE); 3110 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 3111 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 3112 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe, 3113 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE); 3114 3115 /* Head Write Back not recommended for 82599 and X540 */ 3116 if (hw->mac.type == ixgbe_mac_82599EB || 3117 hw->mac.type == ixgbe_mac_X540) { 3118 ixgbe->tx_head_wb_enable = B_FALSE; 3119 } 3120 3121 /* 3122 * ixgbe LSO needs the tx h/w checksum support. 3123 * LSO will be disabled if tx h/w checksum is not 3124 * enabled. 3125 */ 3126 if (ixgbe->tx_hcksum_enable == B_FALSE) { 3127 ixgbe->lso_enable = B_FALSE; 3128 } 3129 3130 /* 3131 * ixgbe LRO needs the rx h/w checksum support. 3132 * LRO will be disabled if rx h/w checksum is not 3133 * enabled. 3134 */ 3135 if (ixgbe->rx_hcksum_enable == B_FALSE) { 3136 ixgbe->lro_enable = B_FALSE; 3137 } 3138 3139 /* 3140 * ixgbe LRO only been supported by 82599 and X540 now 3141 */ 3142 if (hw->mac.type == ixgbe_mac_82598EB) { 3143 ixgbe->lro_enable = B_FALSE; 3144 } 3145 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 3146 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 3147 DEFAULT_TX_COPY_THRESHOLD); 3148 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 3149 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 3150 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 3151 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 3152 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 3153 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 3154 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 3155 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 3156 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 3157 3158 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 3159 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 3160 DEFAULT_RX_COPY_THRESHOLD); 3161 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 3162 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 3163 DEFAULT_RX_LIMIT_PER_INTR); 3164 3165 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 3166 ixgbe->capab->min_intr_throttle, 3167 ixgbe->capab->max_intr_throttle, 3168 ixgbe->capab->def_intr_throttle); 3169 /* 3170 * 82599 and X540 require the interrupt throttling rate is 3171 * a multiple of 8. This is enforced by the register 3172 * definiton. 3173 */ 3174 if (hw->mac.type == ixgbe_mac_82599EB || hw->mac.type == ixgbe_mac_X540) 3175 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; 3176 3177 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe, 3178 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP); 3179 } 3180 3181 static void 3182 ixgbe_init_params(ixgbe_t *ixgbe) 3183 { 3184 ixgbe->param_en_10000fdx_cap = 1; 3185 ixgbe->param_en_1000fdx_cap = 1; 3186 ixgbe->param_en_100fdx_cap = 1; 3187 ixgbe->param_adv_10000fdx_cap = 1; 3188 ixgbe->param_adv_1000fdx_cap = 1; 3189 ixgbe->param_adv_100fdx_cap = 1; 3190 3191 ixgbe->param_pause_cap = 1; 3192 ixgbe->param_asym_pause_cap = 1; 3193 ixgbe->param_rem_fault = 0; 3194 3195 ixgbe->param_adv_autoneg_cap = 1; 3196 ixgbe->param_adv_pause_cap = 1; 3197 ixgbe->param_adv_asym_pause_cap = 1; 3198 ixgbe->param_adv_rem_fault = 0; 3199 3200 ixgbe->param_lp_10000fdx_cap = 0; 3201 ixgbe->param_lp_1000fdx_cap = 0; 3202 ixgbe->param_lp_100fdx_cap = 0; 3203 ixgbe->param_lp_autoneg_cap = 0; 3204 ixgbe->param_lp_pause_cap = 0; 3205 ixgbe->param_lp_asym_pause_cap = 0; 3206 ixgbe->param_lp_rem_fault = 0; 3207 } 3208 3209 /* 3210 * ixgbe_get_prop - Get a property value out of the configuration file 3211 * ixgbe.conf. 3212 * 3213 * Caller provides the name of the property, a default value, a minimum 3214 * value, and a maximum value. 3215 * 3216 * Return configured value of the property, with default, minimum and 3217 * maximum properly applied. 3218 */ 3219 static int 3220 ixgbe_get_prop(ixgbe_t *ixgbe, 3221 char *propname, /* name of the property */ 3222 int minval, /* minimum acceptable value */ 3223 int maxval, /* maximim acceptable value */ 3224 int defval) /* default value */ 3225 { 3226 int value; 3227 3228 /* 3229 * Call ddi_prop_get_int() to read the conf settings 3230 */ 3231 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 3232 DDI_PROP_DONTPASS, propname, defval); 3233 if (value > maxval) 3234 value = maxval; 3235 3236 if (value < minval) 3237 value = minval; 3238 3239 return (value); 3240 } 3241 3242 /* 3243 * ixgbe_driver_setup_link - Using the link properties to setup the link. 3244 */ 3245 int 3246 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 3247 { 3248 u32 autoneg_advertised = 0; 3249 3250 /* 3251 * No half duplex support with 10Gb parts 3252 */ 3253 if (ixgbe->param_adv_10000fdx_cap == 1) 3254 autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 3255 3256 if (ixgbe->param_adv_1000fdx_cap == 1) 3257 autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 3258 3259 if (ixgbe->param_adv_100fdx_cap == 1) 3260 autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 3261 3262 if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) { 3263 ixgbe_notice(ixgbe, "Invalid link settings. Setup link " 3264 "to autonegotiation with full link capabilities."); 3265 3266 autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL | 3267 IXGBE_LINK_SPEED_1GB_FULL | 3268 IXGBE_LINK_SPEED_100_FULL; 3269 } 3270 3271 if (setup_hw) { 3272 if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised, 3273 ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) { 3274 ixgbe_notice(ixgbe, "Setup link failed on this " 3275 "device."); 3276 return (IXGBE_FAILURE); 3277 } 3278 } 3279 3280 return (IXGBE_SUCCESS); 3281 } 3282 3283 /* 3284 * ixgbe_driver_link_check - Link status processing. 3285 * 3286 * This function can be called in both kernel context and interrupt context 3287 */ 3288 static void 3289 ixgbe_driver_link_check(ixgbe_t *ixgbe) 3290 { 3291 struct ixgbe_hw *hw = &ixgbe->hw; 3292 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 3293 boolean_t link_up = B_FALSE; 3294 boolean_t link_changed = B_FALSE; 3295 3296 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3297 3298 (void) ixgbe_check_link(hw, &speed, &link_up, false); 3299 if (link_up) { 3300 ixgbe->link_check_complete = B_TRUE; 3301 3302 /* Link is up, enable flow control settings */ 3303 (void) ixgbe_fc_enable(hw); 3304 3305 /* 3306 * The Link is up, check whether it was marked as down earlier 3307 */ 3308 if (ixgbe->link_state != LINK_STATE_UP) { 3309 switch (speed) { 3310 case IXGBE_LINK_SPEED_10GB_FULL: 3311 ixgbe->link_speed = SPEED_10GB; 3312 break; 3313 case IXGBE_LINK_SPEED_1GB_FULL: 3314 ixgbe->link_speed = SPEED_1GB; 3315 break; 3316 case IXGBE_LINK_SPEED_100_FULL: 3317 ixgbe->link_speed = SPEED_100; 3318 } 3319 ixgbe->link_duplex = LINK_DUPLEX_FULL; 3320 ixgbe->link_state = LINK_STATE_UP; 3321 link_changed = B_TRUE; 3322 } 3323 } else { 3324 if (ixgbe->link_check_complete == B_TRUE || 3325 (ixgbe->link_check_complete == B_FALSE && 3326 gethrtime() >= ixgbe->link_check_hrtime)) { 3327 /* 3328 * The link is really down 3329 */ 3330 ixgbe->link_check_complete = B_TRUE; 3331 3332 if (ixgbe->link_state != LINK_STATE_DOWN) { 3333 ixgbe->link_speed = 0; 3334 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN; 3335 ixgbe->link_state = LINK_STATE_DOWN; 3336 link_changed = B_TRUE; 3337 } 3338 } 3339 } 3340 3341 /* 3342 * If we are in an interrupt context, need to re-enable the 3343 * interrupt, which was automasked 3344 */ 3345 if (servicing_interrupt() != 0) { 3346 ixgbe->eims |= IXGBE_EICR_LSC; 3347 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3348 } 3349 3350 if (link_changed) { 3351 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3352 } 3353 } 3354 3355 /* 3356 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599. 3357 */ 3358 static void 3359 ixgbe_sfp_check(void *arg) 3360 { 3361 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3362 uint32_t eicr = ixgbe->eicr; 3363 struct ixgbe_hw *hw = &ixgbe->hw; 3364 3365 mutex_enter(&ixgbe->gen_lock); 3366 if (eicr & IXGBE_EICR_GPI_SDP1) { 3367 /* clear the interrupt */ 3368 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 3369 3370 /* if link up, do multispeed fiber setup */ 3371 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3372 B_TRUE, B_TRUE); 3373 ixgbe_driver_link_check(ixgbe); 3374 ixgbe_get_hw_state(ixgbe); 3375 } else if (eicr & IXGBE_EICR_GPI_SDP2) { 3376 /* clear the interrupt */ 3377 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); 3378 3379 /* if link up, do sfp module setup */ 3380 (void) hw->mac.ops.setup_sfp(hw); 3381 3382 /* do multispeed fiber setup */ 3383 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3384 B_TRUE, B_TRUE); 3385 ixgbe_driver_link_check(ixgbe); 3386 ixgbe_get_hw_state(ixgbe); 3387 } 3388 mutex_exit(&ixgbe->gen_lock); 3389 3390 /* 3391 * We need to fully re-check the link later. 3392 */ 3393 ixgbe->link_check_complete = B_FALSE; 3394 ixgbe->link_check_hrtime = gethrtime() + 3395 (IXGBE_LINK_UP_TIME * 100000000ULL); 3396 } 3397 3398 /* 3399 * ixgbe_overtemp_check - overtemp module processing done in taskq 3400 * 3401 * This routine will only be called on adapters with temperature sensor. 3402 * The indication of over-temperature can be either SDP0 interrupt or the link 3403 * status change interrupt. 3404 */ 3405 static void 3406 ixgbe_overtemp_check(void *arg) 3407 { 3408 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3409 struct ixgbe_hw *hw = &ixgbe->hw; 3410 uint32_t eicr = ixgbe->eicr; 3411 ixgbe_link_speed speed; 3412 boolean_t link_up; 3413 3414 mutex_enter(&ixgbe->gen_lock); 3415 3416 /* make sure we know current state of link */ 3417 (void) ixgbe_check_link(hw, &speed, &link_up, false); 3418 3419 /* check over-temp condition */ 3420 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || 3421 (eicr & IXGBE_EICR_LSC)) { 3422 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) { 3423 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3424 3425 /* 3426 * Disable the adapter interrupts 3427 */ 3428 ixgbe_disable_adapter_interrupts(ixgbe); 3429 3430 /* 3431 * Disable Rx/Tx units 3432 */ 3433 (void) ixgbe_stop_adapter(hw); 3434 3435 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3436 ixgbe_error(ixgbe, 3437 "Problem: Network adapter has been stopped " 3438 "because it has overheated"); 3439 ixgbe_error(ixgbe, 3440 "Action: Restart the computer. " 3441 "If the problem persists, power off the system " 3442 "and replace the adapter"); 3443 } 3444 } 3445 3446 /* write to clear the interrupt */ 3447 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 3448 3449 mutex_exit(&ixgbe->gen_lock); 3450 } 3451 3452 /* 3453 * ixgbe_link_timer - timer for link status detection 3454 */ 3455 static void 3456 ixgbe_link_timer(void *arg) 3457 { 3458 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3459 3460 mutex_enter(&ixgbe->gen_lock); 3461 ixgbe_driver_link_check(ixgbe); 3462 mutex_exit(&ixgbe->gen_lock); 3463 } 3464 3465 /* 3466 * ixgbe_local_timer - Driver watchdog function. 3467 * 3468 * This function will handle the transmit stall check and other routines. 3469 */ 3470 static void 3471 ixgbe_local_timer(void *arg) 3472 { 3473 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3474 3475 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP) 3476 goto out; 3477 3478 if (ixgbe->ixgbe_state & IXGBE_ERROR) { 3479 ixgbe->reset_count++; 3480 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3481 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3482 goto out; 3483 } 3484 3485 if (ixgbe_stall_check(ixgbe)) { 3486 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL); 3487 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3488 3489 ixgbe->reset_count++; 3490 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3491 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3492 } 3493 3494 out: 3495 ixgbe_restart_watchdog_timer(ixgbe); 3496 } 3497 3498 /* 3499 * ixgbe_stall_check - Check for transmit stall. 3500 * 3501 * This function checks if the adapter is stalled (in transmit). 3502 * 3503 * It is called each time the watchdog timeout is invoked. 3504 * If the transmit descriptor reclaim continuously fails, 3505 * the watchdog value will increment by 1. If the watchdog 3506 * value exceeds the threshold, the ixgbe is assumed to 3507 * have stalled and need to be reset. 3508 */ 3509 static boolean_t 3510 ixgbe_stall_check(ixgbe_t *ixgbe) 3511 { 3512 ixgbe_tx_ring_t *tx_ring; 3513 boolean_t result; 3514 int i; 3515 3516 if (ixgbe->link_state != LINK_STATE_UP) 3517 return (B_FALSE); 3518 3519 /* 3520 * If any tx ring is stalled, we'll reset the chipset 3521 */ 3522 result = B_FALSE; 3523 for (i = 0; i < ixgbe->num_tx_rings; i++) { 3524 tx_ring = &ixgbe->tx_rings[i]; 3525 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) { 3526 tx_ring->tx_recycle(tx_ring); 3527 } 3528 3529 if (tx_ring->recycle_fail > 0) 3530 tx_ring->stall_watchdog++; 3531 else 3532 tx_ring->stall_watchdog = 0; 3533 3534 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 3535 result = B_TRUE; 3536 break; 3537 } 3538 } 3539 3540 if (result) { 3541 tx_ring->stall_watchdog = 0; 3542 tx_ring->recycle_fail = 0; 3543 } 3544 3545 return (result); 3546 } 3547 3548 3549 /* 3550 * is_valid_mac_addr - Check if the mac address is valid. 3551 */ 3552 static boolean_t 3553 is_valid_mac_addr(uint8_t *mac_addr) 3554 { 3555 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 3556 const uint8_t addr_test2[6] = 3557 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3558 3559 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 3560 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 3561 return (B_FALSE); 3562 3563 return (B_TRUE); 3564 } 3565 3566 static boolean_t 3567 ixgbe_find_mac_address(ixgbe_t *ixgbe) 3568 { 3569 #ifdef __sparc 3570 struct ixgbe_hw *hw = &ixgbe->hw; 3571 uchar_t *bytes; 3572 struct ether_addr sysaddr; 3573 uint_t nelts; 3574 int err; 3575 boolean_t found = B_FALSE; 3576 3577 /* 3578 * The "vendor's factory-set address" may already have 3579 * been extracted from the chip, but if the property 3580 * "local-mac-address" is set we use that instead. 3581 * 3582 * We check whether it looks like an array of 6 3583 * bytes (which it should, if OBP set it). If we can't 3584 * make sense of it this way, we'll ignore it. 3585 */ 3586 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 3587 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 3588 if (err == DDI_PROP_SUCCESS) { 3589 if (nelts == ETHERADDRL) { 3590 while (nelts--) 3591 hw->mac.addr[nelts] = bytes[nelts]; 3592 found = B_TRUE; 3593 } 3594 ddi_prop_free(bytes); 3595 } 3596 3597 /* 3598 * Look up the OBP property "local-mac-address?". If the user has set 3599 * 'local-mac-address? = false', use "the system address" instead. 3600 */ 3601 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 3602 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 3603 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 3604 if (localetheraddr(NULL, &sysaddr) != 0) { 3605 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 3606 found = B_TRUE; 3607 } 3608 } 3609 ddi_prop_free(bytes); 3610 } 3611 3612 /* 3613 * Finally(!), if there's a valid "mac-address" property (created 3614 * if we netbooted from this interface), we must use this instead 3615 * of any of the above to ensure that the NFS/install server doesn't 3616 * get confused by the address changing as Solaris takes over! 3617 */ 3618 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 3619 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 3620 if (err == DDI_PROP_SUCCESS) { 3621 if (nelts == ETHERADDRL) { 3622 while (nelts--) 3623 hw->mac.addr[nelts] = bytes[nelts]; 3624 found = B_TRUE; 3625 } 3626 ddi_prop_free(bytes); 3627 } 3628 3629 if (found) { 3630 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 3631 return (B_TRUE); 3632 } 3633 #else 3634 _NOTE(ARGUNUSED(ixgbe)); 3635 #endif 3636 3637 return (B_TRUE); 3638 } 3639 3640 #pragma inline(ixgbe_arm_watchdog_timer) 3641 static void 3642 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 3643 { 3644 /* 3645 * Fire a watchdog timer 3646 */ 3647 ixgbe->watchdog_tid = 3648 timeout(ixgbe_local_timer, 3649 (void *)ixgbe, 1 * drv_usectohz(1000000)); 3650 3651 } 3652 3653 /* 3654 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 3655 */ 3656 void 3657 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 3658 { 3659 mutex_enter(&ixgbe->watchdog_lock); 3660 3661 if (!ixgbe->watchdog_enable) { 3662 ixgbe->watchdog_enable = B_TRUE; 3663 ixgbe->watchdog_start = B_TRUE; 3664 ixgbe_arm_watchdog_timer(ixgbe); 3665 } 3666 3667 mutex_exit(&ixgbe->watchdog_lock); 3668 } 3669 3670 /* 3671 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 3672 */ 3673 void 3674 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 3675 { 3676 timeout_id_t tid; 3677 3678 mutex_enter(&ixgbe->watchdog_lock); 3679 3680 ixgbe->watchdog_enable = B_FALSE; 3681 ixgbe->watchdog_start = B_FALSE; 3682 tid = ixgbe->watchdog_tid; 3683 ixgbe->watchdog_tid = 0; 3684 3685 mutex_exit(&ixgbe->watchdog_lock); 3686 3687 if (tid != 0) 3688 (void) untimeout(tid); 3689 } 3690 3691 /* 3692 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 3693 */ 3694 void 3695 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 3696 { 3697 mutex_enter(&ixgbe->watchdog_lock); 3698 3699 if (ixgbe->watchdog_enable) { 3700 if (!ixgbe->watchdog_start) { 3701 ixgbe->watchdog_start = B_TRUE; 3702 ixgbe_arm_watchdog_timer(ixgbe); 3703 } 3704 } 3705 3706 mutex_exit(&ixgbe->watchdog_lock); 3707 } 3708 3709 /* 3710 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 3711 */ 3712 static void 3713 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 3714 { 3715 mutex_enter(&ixgbe->watchdog_lock); 3716 3717 if (ixgbe->watchdog_start) 3718 ixgbe_arm_watchdog_timer(ixgbe); 3719 3720 mutex_exit(&ixgbe->watchdog_lock); 3721 } 3722 3723 /* 3724 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 3725 */ 3726 void 3727 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 3728 { 3729 timeout_id_t tid; 3730 3731 mutex_enter(&ixgbe->watchdog_lock); 3732 3733 ixgbe->watchdog_start = B_FALSE; 3734 tid = ixgbe->watchdog_tid; 3735 ixgbe->watchdog_tid = 0; 3736 3737 mutex_exit(&ixgbe->watchdog_lock); 3738 3739 if (tid != 0) 3740 (void) untimeout(tid); 3741 } 3742 3743 /* 3744 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 3745 */ 3746 static void 3747 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 3748 { 3749 struct ixgbe_hw *hw = &ixgbe->hw; 3750 3751 /* 3752 * mask all interrupts off 3753 */ 3754 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 3755 3756 /* 3757 * for MSI-X, also disable autoclear 3758 */ 3759 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 3760 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 3761 } 3762 3763 IXGBE_WRITE_FLUSH(hw); 3764 } 3765 3766 /* 3767 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 3768 */ 3769 static void 3770 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 3771 { 3772 struct ixgbe_hw *hw = &ixgbe->hw; 3773 uint32_t eiac, eiam; 3774 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3775 3776 /* interrupt types to enable */ 3777 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 3778 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 3779 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */ 3780 3781 /* enable automask on "other" causes that this adapter can generate */ 3782 eiam = ixgbe->capab->other_intr; 3783 3784 /* 3785 * msi-x mode 3786 */ 3787 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 3788 /* enable autoclear but not on bits 29:20 */ 3789 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR); 3790 3791 /* general purpose interrupt enable */ 3792 gpie |= (IXGBE_GPIE_MSIX_MODE 3793 | IXGBE_GPIE_PBA_SUPPORT 3794 | IXGBE_GPIE_OCD 3795 | IXGBE_GPIE_EIAME); 3796 /* 3797 * non-msi-x mode 3798 */ 3799 } else { 3800 3801 /* disable autoclear, leave gpie at default */ 3802 eiac = 0; 3803 3804 /* 3805 * General purpose interrupt enable. 3806 * For 82599 or X540, extended interrupt automask enable 3807 * only in MSI or MSI-X mode 3808 */ 3809 if ((hw->mac.type == ixgbe_mac_82598EB) || 3810 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) { 3811 gpie |= IXGBE_GPIE_EIAME; 3812 } 3813 } 3814 3815 /* Enable specific "other" interrupt types */ 3816 switch (hw->mac.type) { 3817 case ixgbe_mac_82598EB: 3818 gpie |= ixgbe->capab->other_gpie; 3819 break; 3820 3821 case ixgbe_mac_82599EB: 3822 case ixgbe_mac_X540: 3823 gpie |= ixgbe->capab->other_gpie; 3824 3825 /* Enable RSC Delay 8us when LRO enabled */ 3826 if (ixgbe->lro_enable) { 3827 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT); 3828 } 3829 break; 3830 3831 default: 3832 break; 3833 } 3834 3835 /* write to interrupt control registers */ 3836 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3837 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 3838 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam); 3839 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3840 IXGBE_WRITE_FLUSH(hw); 3841 } 3842 3843 /* 3844 * ixgbe_loopback_ioctl - Loopback support. 3845 */ 3846 enum ioc_reply 3847 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 3848 { 3849 lb_info_sz_t *lbsp; 3850 lb_property_t *lbpp; 3851 uint32_t *lbmp; 3852 uint32_t size; 3853 uint32_t value; 3854 3855 if (mp->b_cont == NULL) 3856 return (IOC_INVAL); 3857 3858 switch (iocp->ioc_cmd) { 3859 default: 3860 return (IOC_INVAL); 3861 3862 case LB_GET_INFO_SIZE: 3863 size = sizeof (lb_info_sz_t); 3864 if (iocp->ioc_count != size) 3865 return (IOC_INVAL); 3866 3867 value = sizeof (lb_normal); 3868 value += sizeof (lb_mac); 3869 value += sizeof (lb_external); 3870 3871 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 3872 *lbsp = value; 3873 break; 3874 3875 case LB_GET_INFO: 3876 value = sizeof (lb_normal); 3877 value += sizeof (lb_mac); 3878 value += sizeof (lb_external); 3879 3880 size = value; 3881 if (iocp->ioc_count != size) 3882 return (IOC_INVAL); 3883 3884 value = 0; 3885 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 3886 3887 lbpp[value++] = lb_normal; 3888 lbpp[value++] = lb_mac; 3889 lbpp[value++] = lb_external; 3890 break; 3891 3892 case LB_GET_MODE: 3893 size = sizeof (uint32_t); 3894 if (iocp->ioc_count != size) 3895 return (IOC_INVAL); 3896 3897 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 3898 *lbmp = ixgbe->loopback_mode; 3899 break; 3900 3901 case LB_SET_MODE: 3902 size = 0; 3903 if (iocp->ioc_count != sizeof (uint32_t)) 3904 return (IOC_INVAL); 3905 3906 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 3907 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 3908 return (IOC_INVAL); 3909 break; 3910 } 3911 3912 iocp->ioc_count = size; 3913 iocp->ioc_error = 0; 3914 3915 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3916 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3917 return (IOC_INVAL); 3918 } 3919 3920 return (IOC_REPLY); 3921 } 3922 3923 /* 3924 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 3925 */ 3926 static boolean_t 3927 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 3928 { 3929 if (mode == ixgbe->loopback_mode) 3930 return (B_TRUE); 3931 3932 ixgbe->loopback_mode = mode; 3933 3934 if (mode == IXGBE_LB_NONE) { 3935 /* 3936 * Reset the chip 3937 */ 3938 (void) ixgbe_reset(ixgbe); 3939 return (B_TRUE); 3940 } 3941 3942 mutex_enter(&ixgbe->gen_lock); 3943 3944 switch (mode) { 3945 default: 3946 mutex_exit(&ixgbe->gen_lock); 3947 return (B_FALSE); 3948 3949 case IXGBE_LB_EXTERNAL: 3950 break; 3951 3952 case IXGBE_LB_INTERNAL_MAC: 3953 ixgbe_set_internal_mac_loopback(ixgbe); 3954 break; 3955 } 3956 3957 mutex_exit(&ixgbe->gen_lock); 3958 3959 return (B_TRUE); 3960 } 3961 3962 /* 3963 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 3964 */ 3965 static void 3966 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 3967 { 3968 struct ixgbe_hw *hw; 3969 uint32_t reg; 3970 uint8_t atlas; 3971 3972 hw = &ixgbe->hw; 3973 3974 /* 3975 * Setup MAC loopback 3976 */ 3977 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 3978 reg |= IXGBE_HLREG0_LPBK; 3979 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 3980 3981 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 3982 reg &= ~IXGBE_AUTOC_LMS_MASK; 3983 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 3984 3985 /* 3986 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 3987 */ 3988 switch (hw->mac.type) { 3989 case ixgbe_mac_82598EB: 3990 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 3991 &atlas); 3992 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 3993 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 3994 atlas); 3995 3996 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 3997 &atlas); 3998 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 3999 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4000 atlas); 4001 4002 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4003 &atlas); 4004 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 4005 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4006 atlas); 4007 4008 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4009 &atlas); 4010 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 4011 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4012 atlas); 4013 break; 4014 4015 case ixgbe_mac_82599EB: 4016 case ixgbe_mac_X540: 4017 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4018 reg |= (IXGBE_AUTOC_FLU | 4019 IXGBE_AUTOC_10G_KX4); 4020 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4021 4022 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL, 4023 B_FALSE, B_TRUE); 4024 break; 4025 4026 default: 4027 break; 4028 } 4029 } 4030 4031 #pragma inline(ixgbe_intr_rx_work) 4032 /* 4033 * ixgbe_intr_rx_work - RX processing of ISR. 4034 */ 4035 static void 4036 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 4037 { 4038 mblk_t *mp; 4039 4040 mutex_enter(&rx_ring->rx_lock); 4041 4042 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4043 mutex_exit(&rx_ring->rx_lock); 4044 4045 if (mp != NULL) 4046 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4047 rx_ring->ring_gen_num); 4048 } 4049 4050 #pragma inline(ixgbe_intr_tx_work) 4051 /* 4052 * ixgbe_intr_tx_work - TX processing of ISR. 4053 */ 4054 static void 4055 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 4056 { 4057 ixgbe_t *ixgbe = tx_ring->ixgbe; 4058 4059 /* 4060 * Recycle the tx descriptors 4061 */ 4062 tx_ring->tx_recycle(tx_ring); 4063 4064 /* 4065 * Schedule the re-transmit 4066 */ 4067 if (tx_ring->reschedule && 4068 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) { 4069 tx_ring->reschedule = B_FALSE; 4070 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl, 4071 tx_ring->ring_handle); 4072 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 4073 } 4074 } 4075 4076 #pragma inline(ixgbe_intr_other_work) 4077 /* 4078 * ixgbe_intr_other_work - Process interrupt types other than tx/rx 4079 */ 4080 static void 4081 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr) 4082 { 4083 ASSERT(mutex_owned(&ixgbe->gen_lock)); 4084 4085 /* 4086 * handle link status change 4087 */ 4088 if (eicr & IXGBE_EICR_LSC) { 4089 ixgbe_driver_link_check(ixgbe); 4090 ixgbe_get_hw_state(ixgbe); 4091 } 4092 4093 /* 4094 * check for fan failure on adapters with fans 4095 */ 4096 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 4097 (eicr & IXGBE_EICR_GPI_SDP1)) { 4098 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 4099 4100 /* 4101 * Disable the adapter interrupts 4102 */ 4103 ixgbe_disable_adapter_interrupts(ixgbe); 4104 4105 /* 4106 * Disable Rx/Tx units 4107 */ 4108 (void) ixgbe_stop_adapter(&ixgbe->hw); 4109 4110 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 4111 ixgbe_error(ixgbe, 4112 "Problem: Network adapter has been stopped " 4113 "because the fan has stopped.\n"); 4114 ixgbe_error(ixgbe, 4115 "Action: Replace the adapter.\n"); 4116 4117 /* re-enable the interrupt, which was automasked */ 4118 ixgbe->eims |= IXGBE_EICR_GPI_SDP1; 4119 } 4120 4121 /* 4122 * Do SFP check for adapters with hot-plug capability 4123 */ 4124 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) && 4125 ((eicr & IXGBE_EICR_GPI_SDP1) || (eicr & IXGBE_EICR_GPI_SDP2))) { 4126 ixgbe->eicr = eicr; 4127 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq, 4128 ixgbe_sfp_check, (void *)ixgbe, 4129 DDI_NOSLEEP)) != DDI_SUCCESS) { 4130 ixgbe_log(ixgbe, "No memory available to dispatch " 4131 "taskq for SFP check"); 4132 } 4133 } 4134 4135 /* 4136 * Do over-temperature check for adapters with temp sensor 4137 */ 4138 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) && 4139 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { 4140 ixgbe->eicr = eicr; 4141 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq, 4142 ixgbe_overtemp_check, (void *)ixgbe, 4143 DDI_NOSLEEP)) != DDI_SUCCESS) { 4144 ixgbe_log(ixgbe, "No memory available to dispatch " 4145 "taskq for overtemp check"); 4146 } 4147 } 4148 } 4149 4150 /* 4151 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 4152 */ 4153 static uint_t 4154 ixgbe_intr_legacy(void *arg1, void *arg2) 4155 { 4156 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4157 struct ixgbe_hw *hw = &ixgbe->hw; 4158 ixgbe_tx_ring_t *tx_ring; 4159 ixgbe_rx_ring_t *rx_ring; 4160 uint32_t eicr; 4161 mblk_t *mp; 4162 boolean_t tx_reschedule; 4163 uint_t result; 4164 4165 _NOTE(ARGUNUSED(arg2)); 4166 4167 mutex_enter(&ixgbe->gen_lock); 4168 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 4169 mutex_exit(&ixgbe->gen_lock); 4170 return (DDI_INTR_UNCLAIMED); 4171 } 4172 4173 mp = NULL; 4174 tx_reschedule = B_FALSE; 4175 4176 /* 4177 * Any bit set in eicr: claim this interrupt 4178 */ 4179 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4180 4181 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4182 mutex_exit(&ixgbe->gen_lock); 4183 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4184 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4185 return (DDI_INTR_CLAIMED); 4186 } 4187 4188 if (eicr) { 4189 /* 4190 * For legacy interrupt, we have only one interrupt, 4191 * so we have only one rx ring and one tx ring enabled. 4192 */ 4193 ASSERT(ixgbe->num_rx_rings == 1); 4194 ASSERT(ixgbe->num_tx_rings == 1); 4195 4196 /* 4197 * For legacy interrupt, rx rings[0] will use RTxQ[0]. 4198 */ 4199 if (eicr & 0x1) { 4200 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE; 4201 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4202 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4203 /* 4204 * Clean the rx descriptors 4205 */ 4206 rx_ring = &ixgbe->rx_rings[0]; 4207 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4208 } 4209 4210 /* 4211 * For legacy interrupt, tx rings[0] will use RTxQ[1]. 4212 */ 4213 if (eicr & 0x2) { 4214 /* 4215 * Recycle the tx descriptors 4216 */ 4217 tx_ring = &ixgbe->tx_rings[0]; 4218 tx_ring->tx_recycle(tx_ring); 4219 4220 /* 4221 * Schedule the re-transmit 4222 */ 4223 tx_reschedule = (tx_ring->reschedule && 4224 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)); 4225 } 4226 4227 /* any interrupt type other than tx/rx */ 4228 if (eicr & ixgbe->capab->other_intr) { 4229 switch (hw->mac.type) { 4230 case ixgbe_mac_82598EB: 4231 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4232 break; 4233 4234 case ixgbe_mac_82599EB: 4235 case ixgbe_mac_X540: 4236 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4237 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4238 break; 4239 4240 default: 4241 break; 4242 } 4243 ixgbe_intr_other_work(ixgbe, eicr); 4244 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4245 } 4246 4247 mutex_exit(&ixgbe->gen_lock); 4248 4249 result = DDI_INTR_CLAIMED; 4250 } else { 4251 mutex_exit(&ixgbe->gen_lock); 4252 4253 /* 4254 * No interrupt cause bits set: don't claim this interrupt. 4255 */ 4256 result = DDI_INTR_UNCLAIMED; 4257 } 4258 4259 /* re-enable the interrupts which were automasked */ 4260 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4261 4262 /* 4263 * Do the following work outside of the gen_lock 4264 */ 4265 if (mp != NULL) { 4266 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4267 rx_ring->ring_gen_num); 4268 } 4269 4270 if (tx_reschedule) { 4271 tx_ring->reschedule = B_FALSE; 4272 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle); 4273 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 4274 } 4275 4276 return (result); 4277 } 4278 4279 /* 4280 * ixgbe_intr_msi - Interrupt handler for MSI. 4281 */ 4282 static uint_t 4283 ixgbe_intr_msi(void *arg1, void *arg2) 4284 { 4285 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4286 struct ixgbe_hw *hw = &ixgbe->hw; 4287 uint32_t eicr; 4288 4289 _NOTE(ARGUNUSED(arg2)); 4290 4291 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4292 4293 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4294 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4295 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4296 return (DDI_INTR_CLAIMED); 4297 } 4298 4299 /* 4300 * For MSI interrupt, we have only one vector, 4301 * so we have only one rx ring and one tx ring enabled. 4302 */ 4303 ASSERT(ixgbe->num_rx_rings == 1); 4304 ASSERT(ixgbe->num_tx_rings == 1); 4305 4306 /* 4307 * For MSI interrupt, rx rings[0] will use RTxQ[0]. 4308 */ 4309 if (eicr & 0x1) { 4310 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 4311 } 4312 4313 /* 4314 * For MSI interrupt, tx rings[0] will use RTxQ[1]. 4315 */ 4316 if (eicr & 0x2) { 4317 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 4318 } 4319 4320 /* any interrupt type other than tx/rx */ 4321 if (eicr & ixgbe->capab->other_intr) { 4322 mutex_enter(&ixgbe->gen_lock); 4323 switch (hw->mac.type) { 4324 case ixgbe_mac_82598EB: 4325 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4326 break; 4327 4328 case ixgbe_mac_82599EB: 4329 case ixgbe_mac_X540: 4330 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4331 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4332 break; 4333 4334 default: 4335 break; 4336 } 4337 ixgbe_intr_other_work(ixgbe, eicr); 4338 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4339 mutex_exit(&ixgbe->gen_lock); 4340 } 4341 4342 /* re-enable the interrupts which were automasked */ 4343 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4344 4345 return (DDI_INTR_CLAIMED); 4346 } 4347 4348 /* 4349 * ixgbe_intr_msix - Interrupt handler for MSI-X. 4350 */ 4351 static uint_t 4352 ixgbe_intr_msix(void *arg1, void *arg2) 4353 { 4354 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1; 4355 ixgbe_t *ixgbe = vect->ixgbe; 4356 struct ixgbe_hw *hw = &ixgbe->hw; 4357 uint32_t eicr; 4358 int r_idx = 0; 4359 4360 _NOTE(ARGUNUSED(arg2)); 4361 4362 /* 4363 * Clean each rx ring that has its bit set in the map 4364 */ 4365 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 4366 while (r_idx >= 0) { 4367 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 4368 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 4369 (ixgbe->num_rx_rings - 1)); 4370 } 4371 4372 /* 4373 * Clean each tx ring that has its bit set in the map 4374 */ 4375 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1)); 4376 while (r_idx >= 0) { 4377 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]); 4378 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 4379 (ixgbe->num_tx_rings - 1)); 4380 } 4381 4382 4383 /* 4384 * Clean other interrupt (link change) that has its bit set in the map 4385 */ 4386 if (BT_TEST(vect->other_map, 0) == 1) { 4387 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4388 4389 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != 4390 DDI_FM_OK) { 4391 ddi_fm_service_impact(ixgbe->dip, 4392 DDI_SERVICE_DEGRADED); 4393 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4394 return (DDI_INTR_CLAIMED); 4395 } 4396 4397 /* 4398 * Check "other" cause bits: any interrupt type other than tx/rx 4399 */ 4400 if (eicr & ixgbe->capab->other_intr) { 4401 mutex_enter(&ixgbe->gen_lock); 4402 switch (hw->mac.type) { 4403 case ixgbe_mac_82598EB: 4404 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4405 ixgbe_intr_other_work(ixgbe, eicr); 4406 break; 4407 4408 case ixgbe_mac_82599EB: 4409 case ixgbe_mac_X540: 4410 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4411 ixgbe_intr_other_work(ixgbe, eicr); 4412 break; 4413 4414 default: 4415 break; 4416 } 4417 mutex_exit(&ixgbe->gen_lock); 4418 } 4419 4420 /* re-enable the interrupts which were automasked */ 4421 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4422 } 4423 4424 return (DDI_INTR_CLAIMED); 4425 } 4426 4427 /* 4428 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 4429 * 4430 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 4431 * if not successful, try Legacy. 4432 * ixgbe->intr_force can be used to force sequence to start with 4433 * any of the 3 types. 4434 * If MSI-X is not used, number of tx/rx rings is forced to 1. 4435 */ 4436 static int 4437 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 4438 { 4439 dev_info_t *devinfo; 4440 int intr_types; 4441 int rc; 4442 4443 devinfo = ixgbe->dip; 4444 4445 /* 4446 * Get supported interrupt types 4447 */ 4448 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 4449 4450 if (rc != DDI_SUCCESS) { 4451 ixgbe_log(ixgbe, 4452 "Get supported interrupt types failed: %d", rc); 4453 return (IXGBE_FAILURE); 4454 } 4455 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 4456 4457 ixgbe->intr_type = 0; 4458 4459 /* 4460 * Install MSI-X interrupts 4461 */ 4462 if ((intr_types & DDI_INTR_TYPE_MSIX) && 4463 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 4464 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 4465 if (rc == IXGBE_SUCCESS) 4466 return (IXGBE_SUCCESS); 4467 4468 ixgbe_log(ixgbe, 4469 "Allocate MSI-X failed, trying MSI interrupts..."); 4470 } 4471 4472 /* 4473 * MSI-X not used, force rings and groups to 1 4474 */ 4475 ixgbe->num_rx_rings = 1; 4476 ixgbe->num_rx_groups = 1; 4477 ixgbe->num_tx_rings = 1; 4478 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 4479 ixgbe_log(ixgbe, 4480 "MSI-X not used, force rings and groups number to 1"); 4481 4482 /* 4483 * Install MSI interrupts 4484 */ 4485 if ((intr_types & DDI_INTR_TYPE_MSI) && 4486 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 4487 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 4488 if (rc == IXGBE_SUCCESS) 4489 return (IXGBE_SUCCESS); 4490 4491 ixgbe_log(ixgbe, 4492 "Allocate MSI failed, trying Legacy interrupts..."); 4493 } 4494 4495 /* 4496 * Install legacy interrupts 4497 */ 4498 if (intr_types & DDI_INTR_TYPE_FIXED) { 4499 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 4500 if (rc == IXGBE_SUCCESS) 4501 return (IXGBE_SUCCESS); 4502 4503 ixgbe_log(ixgbe, 4504 "Allocate Legacy interrupts failed"); 4505 } 4506 4507 /* 4508 * If none of the 3 types succeeded, return failure 4509 */ 4510 return (IXGBE_FAILURE); 4511 } 4512 4513 /* 4514 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 4515 * 4516 * For legacy and MSI, only 1 handle is needed. For MSI-X, 4517 * if fewer than 2 handles are available, return failure. 4518 * Upon success, this maps the vectors to rx and tx rings for 4519 * interrupts. 4520 */ 4521 static int 4522 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 4523 { 4524 dev_info_t *devinfo; 4525 int request, count, actual; 4526 int minimum; 4527 int rc; 4528 uint32_t ring_per_group; 4529 4530 devinfo = ixgbe->dip; 4531 4532 switch (intr_type) { 4533 case DDI_INTR_TYPE_FIXED: 4534 request = 1; /* Request 1 legacy interrupt handle */ 4535 minimum = 1; 4536 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 4537 break; 4538 4539 case DDI_INTR_TYPE_MSI: 4540 request = 1; /* Request 1 MSI interrupt handle */ 4541 minimum = 1; 4542 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 4543 break; 4544 4545 case DDI_INTR_TYPE_MSIX: 4546 /* 4547 * Best number of vectors for the adapter is 4548 * (# rx rings + # tx rings), however we will 4549 * limit the request number. 4550 */ 4551 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings); 4552 if (request > ixgbe->capab->max_ring_vect) 4553 request = ixgbe->capab->max_ring_vect; 4554 minimum = 1; 4555 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 4556 break; 4557 4558 default: 4559 ixgbe_log(ixgbe, 4560 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 4561 intr_type); 4562 return (IXGBE_FAILURE); 4563 } 4564 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 4565 request, minimum); 4566 4567 /* 4568 * Get number of supported interrupts 4569 */ 4570 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 4571 if ((rc != DDI_SUCCESS) || (count < minimum)) { 4572 ixgbe_log(ixgbe, 4573 "Get interrupt number failed. Return: %d, count: %d", 4574 rc, count); 4575 return (IXGBE_FAILURE); 4576 } 4577 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 4578 4579 actual = 0; 4580 ixgbe->intr_cnt = 0; 4581 ixgbe->intr_cnt_max = 0; 4582 ixgbe->intr_cnt_min = 0; 4583 4584 /* 4585 * Allocate an array of interrupt handles 4586 */ 4587 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 4588 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 4589 4590 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 4591 request, &actual, DDI_INTR_ALLOC_NORMAL); 4592 if (rc != DDI_SUCCESS) { 4593 ixgbe_log(ixgbe, "Allocate interrupts failed. " 4594 "return: %d, request: %d, actual: %d", 4595 rc, request, actual); 4596 goto alloc_handle_fail; 4597 } 4598 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 4599 4600 /* 4601 * upper/lower limit of interrupts 4602 */ 4603 ixgbe->intr_cnt = actual; 4604 ixgbe->intr_cnt_max = request; 4605 ixgbe->intr_cnt_min = minimum; 4606 4607 /* 4608 * rss number per group should not exceed the rx interrupt number, 4609 * else need to adjust rx ring number. 4610 */ 4611 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 4612 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0); 4613 if (actual < ring_per_group) { 4614 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual; 4615 ixgbe_setup_vmdq_rss_conf(ixgbe); 4616 } 4617 4618 /* 4619 * Now we know the actual number of vectors. Here we map the vector 4620 * to other, rx rings and tx ring. 4621 */ 4622 if (actual < minimum) { 4623 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 4624 actual); 4625 goto alloc_handle_fail; 4626 } 4627 4628 /* 4629 * Get priority for first vector, assume remaining are all the same 4630 */ 4631 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 4632 if (rc != DDI_SUCCESS) { 4633 ixgbe_log(ixgbe, 4634 "Get interrupt priority failed: %d", rc); 4635 goto alloc_handle_fail; 4636 } 4637 4638 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 4639 if (rc != DDI_SUCCESS) { 4640 ixgbe_log(ixgbe, 4641 "Get interrupt cap failed: %d", rc); 4642 goto alloc_handle_fail; 4643 } 4644 4645 ixgbe->intr_type = intr_type; 4646 4647 return (IXGBE_SUCCESS); 4648 4649 alloc_handle_fail: 4650 ixgbe_rem_intrs(ixgbe); 4651 4652 return (IXGBE_FAILURE); 4653 } 4654 4655 /* 4656 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 4657 * 4658 * Before adding the interrupt handlers, the interrupt vectors have 4659 * been allocated, and the rx/tx rings have also been allocated. 4660 */ 4661 static int 4662 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 4663 { 4664 int vector = 0; 4665 int rc; 4666 4667 switch (ixgbe->intr_type) { 4668 case DDI_INTR_TYPE_MSIX: 4669 /* 4670 * Add interrupt handler for all vectors 4671 */ 4672 for (vector = 0; vector < ixgbe->intr_cnt; vector++) { 4673 /* 4674 * install pointer to vect_map[vector] 4675 */ 4676 rc = ddi_intr_add_handler(ixgbe->htable[vector], 4677 (ddi_intr_handler_t *)ixgbe_intr_msix, 4678 (void *)&ixgbe->vect_map[vector], NULL); 4679 4680 if (rc != DDI_SUCCESS) { 4681 ixgbe_log(ixgbe, 4682 "Add interrupt handler failed. " 4683 "return: %d, vector: %d", rc, vector); 4684 for (vector--; vector >= 0; vector--) { 4685 (void) ddi_intr_remove_handler( 4686 ixgbe->htable[vector]); 4687 } 4688 return (IXGBE_FAILURE); 4689 } 4690 } 4691 4692 break; 4693 4694 case DDI_INTR_TYPE_MSI: 4695 /* 4696 * Add interrupt handlers for the only vector 4697 */ 4698 rc = ddi_intr_add_handler(ixgbe->htable[vector], 4699 (ddi_intr_handler_t *)ixgbe_intr_msi, 4700 (void *)ixgbe, NULL); 4701 4702 if (rc != DDI_SUCCESS) { 4703 ixgbe_log(ixgbe, 4704 "Add MSI interrupt handler failed: %d", rc); 4705 return (IXGBE_FAILURE); 4706 } 4707 4708 break; 4709 4710 case DDI_INTR_TYPE_FIXED: 4711 /* 4712 * Add interrupt handlers for the only vector 4713 */ 4714 rc = ddi_intr_add_handler(ixgbe->htable[vector], 4715 (ddi_intr_handler_t *)ixgbe_intr_legacy, 4716 (void *)ixgbe, NULL); 4717 4718 if (rc != DDI_SUCCESS) { 4719 ixgbe_log(ixgbe, 4720 "Add legacy interrupt handler failed: %d", rc); 4721 return (IXGBE_FAILURE); 4722 } 4723 4724 break; 4725 4726 default: 4727 return (IXGBE_FAILURE); 4728 } 4729 4730 return (IXGBE_SUCCESS); 4731 } 4732 4733 #pragma inline(ixgbe_map_rxring_to_vector) 4734 /* 4735 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 4736 */ 4737 static void 4738 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 4739 { 4740 /* 4741 * Set bit in map 4742 */ 4743 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 4744 4745 /* 4746 * Count bits set 4747 */ 4748 ixgbe->vect_map[v_idx].rxr_cnt++; 4749 4750 /* 4751 * Remember bit position 4752 */ 4753 ixgbe->rx_rings[r_idx].intr_vector = v_idx; 4754 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 4755 } 4756 4757 #pragma inline(ixgbe_map_txring_to_vector) 4758 /* 4759 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 4760 */ 4761 static void 4762 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 4763 { 4764 /* 4765 * Set bit in map 4766 */ 4767 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 4768 4769 /* 4770 * Count bits set 4771 */ 4772 ixgbe->vect_map[v_idx].txr_cnt++; 4773 4774 /* 4775 * Remember bit position 4776 */ 4777 ixgbe->tx_rings[t_idx].intr_vector = v_idx; 4778 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 4779 } 4780 4781 /* 4782 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector 4783 * allocation register (IVAR). 4784 * cause: 4785 * -1 : other cause 4786 * 0 : rx 4787 * 1 : tx 4788 */ 4789 static void 4790 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector, 4791 int8_t cause) 4792 { 4793 struct ixgbe_hw *hw = &ixgbe->hw; 4794 u32 ivar, index; 4795 4796 switch (hw->mac.type) { 4797 case ixgbe_mac_82598EB: 4798 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4799 if (cause == -1) { 4800 cause = 0; 4801 } 4802 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 4803 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4804 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3))); 4805 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3))); 4806 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 4807 break; 4808 4809 case ixgbe_mac_82599EB: 4810 case ixgbe_mac_X540: 4811 if (cause == -1) { 4812 /* other causes */ 4813 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4814 index = (intr_alloc_entry & 1) * 8; 4815 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4816 ivar &= ~(0xFF << index); 4817 ivar |= (msix_vector << index); 4818 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4819 } else { 4820 /* tx or rx causes */ 4821 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4822 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 4823 ivar = IXGBE_READ_REG(hw, 4824 IXGBE_IVAR(intr_alloc_entry >> 1)); 4825 ivar &= ~(0xFF << index); 4826 ivar |= (msix_vector << index); 4827 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 4828 ivar); 4829 } 4830 break; 4831 4832 default: 4833 break; 4834 } 4835 } 4836 4837 /* 4838 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of 4839 * given interrupt vector allocation register (IVAR). 4840 * cause: 4841 * -1 : other cause 4842 * 0 : rx 4843 * 1 : tx 4844 */ 4845 static void 4846 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 4847 { 4848 struct ixgbe_hw *hw = &ixgbe->hw; 4849 u32 ivar, index; 4850 4851 switch (hw->mac.type) { 4852 case ixgbe_mac_82598EB: 4853 if (cause == -1) { 4854 cause = 0; 4855 } 4856 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 4857 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4858 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 * 4859 (intr_alloc_entry & 0x3))); 4860 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 4861 break; 4862 4863 case ixgbe_mac_82599EB: 4864 case ixgbe_mac_X540: 4865 if (cause == -1) { 4866 /* other causes */ 4867 index = (intr_alloc_entry & 1) * 8; 4868 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4869 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 4870 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4871 } else { 4872 /* tx or rx causes */ 4873 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 4874 ivar = IXGBE_READ_REG(hw, 4875 IXGBE_IVAR(intr_alloc_entry >> 1)); 4876 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 4877 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 4878 ivar); 4879 } 4880 break; 4881 4882 default: 4883 break; 4884 } 4885 } 4886 4887 /* 4888 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of 4889 * given interrupt vector allocation register (IVAR). 4890 * cause: 4891 * -1 : other cause 4892 * 0 : rx 4893 * 1 : tx 4894 */ 4895 static void 4896 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 4897 { 4898 struct ixgbe_hw *hw = &ixgbe->hw; 4899 u32 ivar, index; 4900 4901 switch (hw->mac.type) { 4902 case ixgbe_mac_82598EB: 4903 if (cause == -1) { 4904 cause = 0; 4905 } 4906 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 4907 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4908 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 * 4909 (intr_alloc_entry & 0x3))); 4910 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 4911 break; 4912 4913 case ixgbe_mac_82599EB: 4914 case ixgbe_mac_X540: 4915 if (cause == -1) { 4916 /* other causes */ 4917 index = (intr_alloc_entry & 1) * 8; 4918 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4919 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 4920 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4921 } else { 4922 /* tx or rx causes */ 4923 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 4924 ivar = IXGBE_READ_REG(hw, 4925 IXGBE_IVAR(intr_alloc_entry >> 1)); 4926 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 4927 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 4928 ivar); 4929 } 4930 break; 4931 4932 default: 4933 break; 4934 } 4935 } 4936 4937 /* 4938 * Convert the rx ring index driver maintained to the rx ring index 4939 * in h/w. 4940 */ 4941 static uint32_t 4942 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index) 4943 { 4944 4945 struct ixgbe_hw *hw = &ixgbe->hw; 4946 uint32_t rx_ring_per_group, hw_rx_index; 4947 4948 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS || 4949 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) { 4950 return (sw_rx_index); 4951 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) { 4952 switch (hw->mac.type) { 4953 case ixgbe_mac_82598EB: 4954 return (sw_rx_index); 4955 4956 case ixgbe_mac_82599EB: 4957 case ixgbe_mac_X540: 4958 return (sw_rx_index * 2); 4959 4960 default: 4961 break; 4962 } 4963 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) { 4964 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 4965 4966 switch (hw->mac.type) { 4967 case ixgbe_mac_82598EB: 4968 hw_rx_index = (sw_rx_index / rx_ring_per_group) * 4969 16 + (sw_rx_index % rx_ring_per_group); 4970 return (hw_rx_index); 4971 4972 case ixgbe_mac_82599EB: 4973 case ixgbe_mac_X540: 4974 if (ixgbe->num_rx_groups > 32) { 4975 hw_rx_index = (sw_rx_index / 4976 rx_ring_per_group) * 2 + 4977 (sw_rx_index % rx_ring_per_group); 4978 } else { 4979 hw_rx_index = (sw_rx_index / 4980 rx_ring_per_group) * 4 + 4981 (sw_rx_index % rx_ring_per_group); 4982 } 4983 return (hw_rx_index); 4984 4985 default: 4986 break; 4987 } 4988 } 4989 4990 /* 4991 * Should never reach. Just to make compiler happy. 4992 */ 4993 return (sw_rx_index); 4994 } 4995 4996 /* 4997 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors. 4998 * 4999 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt 5000 * to vector[0 - (intr_cnt -1)]. 5001 */ 5002 static int 5003 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe) 5004 { 5005 int i, vector = 0; 5006 5007 /* initialize vector map */ 5008 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 5009 for (i = 0; i < ixgbe->intr_cnt; i++) { 5010 ixgbe->vect_map[i].ixgbe = ixgbe; 5011 } 5012 5013 /* 5014 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0], 5015 * tx rings[0] on RTxQ[1]. 5016 */ 5017 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5018 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 5019 ixgbe_map_txring_to_vector(ixgbe, 0, 1); 5020 return (IXGBE_SUCCESS); 5021 } 5022 5023 /* 5024 * Interrupts/vectors mapping for MSI-X 5025 */ 5026 5027 /* 5028 * Map other interrupt to vector 0, 5029 * Set bit in map and count the bits set. 5030 */ 5031 BT_SET(ixgbe->vect_map[vector].other_map, 0); 5032 ixgbe->vect_map[vector].other_cnt++; 5033 5034 /* 5035 * Map rx ring interrupts to vectors 5036 */ 5037 for (i = 0; i < ixgbe->num_rx_rings; i++) { 5038 ixgbe_map_rxring_to_vector(ixgbe, i, vector); 5039 vector = (vector +1) % ixgbe->intr_cnt; 5040 } 5041 5042 /* 5043 * Map tx ring interrupts to vectors 5044 */ 5045 for (i = 0; i < ixgbe->num_tx_rings; i++) { 5046 ixgbe_map_txring_to_vector(ixgbe, i, vector); 5047 vector = (vector +1) % ixgbe->intr_cnt; 5048 } 5049 5050 return (IXGBE_SUCCESS); 5051 } 5052 5053 /* 5054 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 5055 * 5056 * This relies on ring/vector mapping already set up in the 5057 * vect_map[] structures 5058 */ 5059 static void 5060 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 5061 { 5062 struct ixgbe_hw *hw = &ixgbe->hw; 5063 ixgbe_intr_vector_t *vect; /* vector bitmap */ 5064 int r_idx; /* ring index */ 5065 int v_idx; /* vector index */ 5066 uint32_t hw_index; 5067 5068 /* 5069 * Clear any previous entries 5070 */ 5071 switch (hw->mac.type) { 5072 case ixgbe_mac_82598EB: 5073 for (v_idx = 0; v_idx < 25; v_idx++) 5074 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5075 break; 5076 5077 case ixgbe_mac_82599EB: 5078 case ixgbe_mac_X540: 5079 for (v_idx = 0; v_idx < 64; v_idx++) 5080 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5081 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0); 5082 break; 5083 5084 default: 5085 break; 5086 } 5087 5088 /* 5089 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and 5090 * tx rings[0] will use RTxQ[1]. 5091 */ 5092 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5093 ixgbe_setup_ivar(ixgbe, 0, 0, 0); 5094 ixgbe_setup_ivar(ixgbe, 0, 1, 1); 5095 return; 5096 } 5097 5098 /* 5099 * For MSI-X interrupt, "Other" is always on vector[0]. 5100 */ 5101 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1); 5102 5103 /* 5104 * For each interrupt vector, populate the IVAR table 5105 */ 5106 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 5107 vect = &ixgbe->vect_map[v_idx]; 5108 5109 /* 5110 * For each rx ring bit set 5111 */ 5112 r_idx = bt_getlowbit(vect->rx_map, 0, 5113 (ixgbe->num_rx_rings - 1)); 5114 5115 while (r_idx >= 0) { 5116 hw_index = ixgbe->rx_rings[r_idx].hw_index; 5117 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0); 5118 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 5119 (ixgbe->num_rx_rings - 1)); 5120 } 5121 5122 /* 5123 * For each tx ring bit set 5124 */ 5125 r_idx = bt_getlowbit(vect->tx_map, 0, 5126 (ixgbe->num_tx_rings - 1)); 5127 5128 while (r_idx >= 0) { 5129 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1); 5130 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 5131 (ixgbe->num_tx_rings - 1)); 5132 } 5133 } 5134 } 5135 5136 /* 5137 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 5138 */ 5139 static void 5140 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 5141 { 5142 int i; 5143 int rc; 5144 5145 for (i = 0; i < ixgbe->intr_cnt; i++) { 5146 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 5147 if (rc != DDI_SUCCESS) { 5148 IXGBE_DEBUGLOG_1(ixgbe, 5149 "Remove intr handler failed: %d", rc); 5150 } 5151 } 5152 } 5153 5154 /* 5155 * ixgbe_rem_intrs - Remove the allocated interrupts. 5156 */ 5157 static void 5158 ixgbe_rem_intrs(ixgbe_t *ixgbe) 5159 { 5160 int i; 5161 int rc; 5162 5163 for (i = 0; i < ixgbe->intr_cnt; i++) { 5164 rc = ddi_intr_free(ixgbe->htable[i]); 5165 if (rc != DDI_SUCCESS) { 5166 IXGBE_DEBUGLOG_1(ixgbe, 5167 "Free intr failed: %d", rc); 5168 } 5169 } 5170 5171 kmem_free(ixgbe->htable, ixgbe->intr_size); 5172 ixgbe->htable = NULL; 5173 } 5174 5175 /* 5176 * ixgbe_enable_intrs - Enable all the ddi interrupts. 5177 */ 5178 static int 5179 ixgbe_enable_intrs(ixgbe_t *ixgbe) 5180 { 5181 int i; 5182 int rc; 5183 5184 /* 5185 * Enable interrupts 5186 */ 5187 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5188 /* 5189 * Call ddi_intr_block_enable() for MSI 5190 */ 5191 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 5192 if (rc != DDI_SUCCESS) { 5193 ixgbe_log(ixgbe, 5194 "Enable block intr failed: %d", rc); 5195 return (IXGBE_FAILURE); 5196 } 5197 } else { 5198 /* 5199 * Call ddi_intr_enable() for Legacy/MSI non block enable 5200 */ 5201 for (i = 0; i < ixgbe->intr_cnt; i++) { 5202 rc = ddi_intr_enable(ixgbe->htable[i]); 5203 if (rc != DDI_SUCCESS) { 5204 ixgbe_log(ixgbe, 5205 "Enable intr failed: %d", rc); 5206 return (IXGBE_FAILURE); 5207 } 5208 } 5209 } 5210 5211 return (IXGBE_SUCCESS); 5212 } 5213 5214 /* 5215 * ixgbe_disable_intrs - Disable all the interrupts. 5216 */ 5217 static int 5218 ixgbe_disable_intrs(ixgbe_t *ixgbe) 5219 { 5220 int i; 5221 int rc; 5222 5223 /* 5224 * Disable all interrupts 5225 */ 5226 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5227 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 5228 if (rc != DDI_SUCCESS) { 5229 ixgbe_log(ixgbe, 5230 "Disable block intr failed: %d", rc); 5231 return (IXGBE_FAILURE); 5232 } 5233 } else { 5234 for (i = 0; i < ixgbe->intr_cnt; i++) { 5235 rc = ddi_intr_disable(ixgbe->htable[i]); 5236 if (rc != DDI_SUCCESS) { 5237 ixgbe_log(ixgbe, 5238 "Disable intr failed: %d", rc); 5239 return (IXGBE_FAILURE); 5240 } 5241 } 5242 } 5243 5244 return (IXGBE_SUCCESS); 5245 } 5246 5247 /* 5248 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 5249 */ 5250 static void 5251 ixgbe_get_hw_state(ixgbe_t *ixgbe) 5252 { 5253 struct ixgbe_hw *hw = &ixgbe->hw; 5254 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 5255 boolean_t link_up = B_FALSE; 5256 uint32_t pcs1g_anlp = 0; 5257 uint32_t pcs1g_ana = 0; 5258 boolean_t autoneg = B_FALSE; 5259 5260 ASSERT(mutex_owned(&ixgbe->gen_lock)); 5261 ixgbe->param_lp_1000fdx_cap = 0; 5262 ixgbe->param_lp_100fdx_cap = 0; 5263 5264 /* check for link, don't wait */ 5265 (void) ixgbe_check_link(hw, &speed, &link_up, false); 5266 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 5267 5268 if (link_up) { 5269 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 5270 5271 ixgbe->param_lp_1000fdx_cap = 5272 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5273 ixgbe->param_lp_100fdx_cap = 5274 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5275 } 5276 5277 (void) ixgbe_get_link_capabilities(hw, &speed, &autoneg); 5278 5279 ixgbe->param_adv_1000fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) && 5280 (speed & IXGBE_LINK_SPEED_1GB_FULL)) ? 1 : 0; 5281 ixgbe->param_adv_100fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) && 5282 (speed & IXGBE_LINK_SPEED_100_FULL)) ? 1 : 0; 5283 } 5284 5285 /* 5286 * ixgbe_get_driver_control - Notify that driver is in control of device. 5287 */ 5288 static void 5289 ixgbe_get_driver_control(struct ixgbe_hw *hw) 5290 { 5291 uint32_t ctrl_ext; 5292 5293 /* 5294 * Notify firmware that driver is in control of device 5295 */ 5296 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5297 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 5298 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5299 } 5300 5301 /* 5302 * ixgbe_release_driver_control - Notify that driver is no longer in control 5303 * of device. 5304 */ 5305 static void 5306 ixgbe_release_driver_control(struct ixgbe_hw *hw) 5307 { 5308 uint32_t ctrl_ext; 5309 5310 /* 5311 * Notify firmware that driver is no longer in control of device 5312 */ 5313 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5314 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 5315 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5316 } 5317 5318 /* 5319 * ixgbe_atomic_reserve - Atomic decrease operation. 5320 */ 5321 int 5322 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 5323 { 5324 uint32_t oldval; 5325 uint32_t newval; 5326 5327 /* 5328 * ATOMICALLY 5329 */ 5330 do { 5331 oldval = *count_p; 5332 if (oldval < n) 5333 return (-1); 5334 newval = oldval - n; 5335 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 5336 5337 return (newval); 5338 } 5339 5340 /* 5341 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 5342 */ 5343 static uint8_t * 5344 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 5345 { 5346 uint8_t *addr = *upd_ptr; 5347 uint8_t *new_ptr; 5348 5349 _NOTE(ARGUNUSED(hw)); 5350 _NOTE(ARGUNUSED(vmdq)); 5351 5352 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 5353 *upd_ptr = new_ptr; 5354 return (addr); 5355 } 5356 5357 /* 5358 * FMA support 5359 */ 5360 int 5361 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 5362 { 5363 ddi_fm_error_t de; 5364 5365 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5366 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 5367 return (de.fme_status); 5368 } 5369 5370 int 5371 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 5372 { 5373 ddi_fm_error_t de; 5374 5375 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5376 return (de.fme_status); 5377 } 5378 5379 /* 5380 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 5381 */ 5382 static int 5383 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5384 { 5385 _NOTE(ARGUNUSED(impl_data)); 5386 /* 5387 * as the driver can always deal with an error in any dma or 5388 * access handle, we can just return the fme_status value. 5389 */ 5390 pci_ereport_post(dip, err, NULL); 5391 return (err->fme_status); 5392 } 5393 5394 static void 5395 ixgbe_fm_init(ixgbe_t *ixgbe) 5396 { 5397 ddi_iblock_cookie_t iblk; 5398 int fma_dma_flag; 5399 5400 /* 5401 * Only register with IO Fault Services if we have some capability 5402 */ 5403 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 5404 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5405 } else { 5406 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5407 } 5408 5409 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 5410 fma_dma_flag = 1; 5411 } else { 5412 fma_dma_flag = 0; 5413 } 5414 5415 ixgbe_set_fma_flags(fma_dma_flag); 5416 5417 if (ixgbe->fm_capabilities) { 5418 5419 /* 5420 * Register capabilities with IO Fault Services 5421 */ 5422 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 5423 5424 /* 5425 * Initialize pci ereport capabilities if ereport capable 5426 */ 5427 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5428 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5429 pci_ereport_setup(ixgbe->dip); 5430 5431 /* 5432 * Register error callback if error callback capable 5433 */ 5434 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5435 ddi_fm_handler_register(ixgbe->dip, 5436 ixgbe_fm_error_cb, (void*) ixgbe); 5437 } 5438 } 5439 5440 static void 5441 ixgbe_fm_fini(ixgbe_t *ixgbe) 5442 { 5443 /* 5444 * Only unregister FMA capabilities if they are registered 5445 */ 5446 if (ixgbe->fm_capabilities) { 5447 5448 /* 5449 * Release any resources allocated by pci_ereport_setup() 5450 */ 5451 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5452 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5453 pci_ereport_teardown(ixgbe->dip); 5454 5455 /* 5456 * Un-register error callback if error callback capable 5457 */ 5458 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5459 ddi_fm_handler_unregister(ixgbe->dip); 5460 5461 /* 5462 * Unregister from IO Fault Service 5463 */ 5464 ddi_fm_fini(ixgbe->dip); 5465 } 5466 } 5467 5468 void 5469 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 5470 { 5471 uint64_t ena; 5472 char buf[FM_MAX_CLASS]; 5473 5474 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 5475 ena = fm_ena_generate(0, FM_ENA_FMT1); 5476 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 5477 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 5478 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 5479 } 5480 } 5481 5482 static int 5483 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 5484 { 5485 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh; 5486 5487 mutex_enter(&rx_ring->rx_lock); 5488 rx_ring->ring_gen_num = mr_gen_num; 5489 mutex_exit(&rx_ring->rx_lock); 5490 return (0); 5491 } 5492 5493 /* 5494 * Get the global ring index by a ring index within a group. 5495 */ 5496 static int 5497 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex) 5498 { 5499 ixgbe_rx_ring_t *rx_ring; 5500 int i; 5501 5502 for (i = 0; i < ixgbe->num_rx_rings; i++) { 5503 rx_ring = &ixgbe->rx_rings[i]; 5504 if (rx_ring->group_index == gindex) 5505 rindex--; 5506 if (rindex < 0) 5507 return (i); 5508 } 5509 5510 return (-1); 5511 } 5512 5513 /* 5514 * Callback funtion for MAC layer to register all rings. 5515 */ 5516 /* ARGSUSED */ 5517 void 5518 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index, 5519 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5520 { 5521 ixgbe_t *ixgbe = (ixgbe_t *)arg; 5522 mac_intr_t *mintr = &infop->mri_intr; 5523 5524 switch (rtype) { 5525 case MAC_RING_TYPE_RX: { 5526 /* 5527 * 'index' is the ring index within the group. 5528 * Need to get the global ring index by searching in groups. 5529 */ 5530 int global_ring_index = ixgbe_get_rx_ring_index( 5531 ixgbe, group_index, ring_index); 5532 5533 ASSERT(global_ring_index >= 0); 5534 5535 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index]; 5536 rx_ring->ring_handle = rh; 5537 5538 infop->mri_driver = (mac_ring_driver_t)rx_ring; 5539 infop->mri_start = ixgbe_ring_start; 5540 infop->mri_stop = NULL; 5541 infop->mri_poll = ixgbe_ring_rx_poll; 5542 infop->mri_stat = ixgbe_rx_ring_stat; 5543 5544 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 5545 mintr->mi_enable = ixgbe_rx_ring_intr_enable; 5546 mintr->mi_disable = ixgbe_rx_ring_intr_disable; 5547 if (ixgbe->intr_type & 5548 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 5549 mintr->mi_ddi_handle = 5550 ixgbe->htable[rx_ring->intr_vector]; 5551 } 5552 5553 break; 5554 } 5555 case MAC_RING_TYPE_TX: { 5556 ASSERT(group_index == -1); 5557 ASSERT(ring_index < ixgbe->num_tx_rings); 5558 5559 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index]; 5560 tx_ring->ring_handle = rh; 5561 5562 infop->mri_driver = (mac_ring_driver_t)tx_ring; 5563 infop->mri_start = NULL; 5564 infop->mri_stop = NULL; 5565 infop->mri_tx = ixgbe_ring_tx; 5566 infop->mri_stat = ixgbe_tx_ring_stat; 5567 if (ixgbe->intr_type & 5568 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 5569 mintr->mi_ddi_handle = 5570 ixgbe->htable[tx_ring->intr_vector]; 5571 } 5572 break; 5573 } 5574 default: 5575 break; 5576 } 5577 } 5578 5579 /* 5580 * Callback funtion for MAC layer to register all groups. 5581 */ 5582 void 5583 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index, 5584 mac_group_info_t *infop, mac_group_handle_t gh) 5585 { 5586 ixgbe_t *ixgbe = (ixgbe_t *)arg; 5587 5588 switch (rtype) { 5589 case MAC_RING_TYPE_RX: { 5590 ixgbe_rx_group_t *rx_group; 5591 5592 rx_group = &ixgbe->rx_groups[index]; 5593 rx_group->group_handle = gh; 5594 5595 infop->mgi_driver = (mac_group_driver_t)rx_group; 5596 infop->mgi_start = NULL; 5597 infop->mgi_stop = NULL; 5598 infop->mgi_addmac = ixgbe_addmac; 5599 infop->mgi_remmac = ixgbe_remmac; 5600 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups); 5601 5602 break; 5603 } 5604 case MAC_RING_TYPE_TX: 5605 break; 5606 default: 5607 break; 5608 } 5609 } 5610 5611 /* 5612 * Enable interrupt on the specificed rx ring. 5613 */ 5614 int 5615 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh) 5616 { 5617 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 5618 ixgbe_t *ixgbe = rx_ring->ixgbe; 5619 int r_idx = rx_ring->index; 5620 int hw_r_idx = rx_ring->hw_index; 5621 int v_idx = rx_ring->intr_vector; 5622 5623 mutex_enter(&ixgbe->gen_lock); 5624 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 5625 mutex_exit(&ixgbe->gen_lock); 5626 /* 5627 * Simply return 0. 5628 * Interrupts are being adjusted. ixgbe_intr_adjust() 5629 * will eventually re-enable the interrupt when it's 5630 * done with the adjustment. 5631 */ 5632 return (0); 5633 } 5634 5635 /* 5636 * To enable interrupt by setting the VAL bit of given interrupt 5637 * vector allocation register (IVAR). 5638 */ 5639 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0); 5640 5641 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 5642 5643 /* 5644 * Trigger a Rx interrupt on this ring 5645 */ 5646 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx)); 5647 IXGBE_WRITE_FLUSH(&ixgbe->hw); 5648 5649 mutex_exit(&ixgbe->gen_lock); 5650 5651 return (0); 5652 } 5653 5654 /* 5655 * Disable interrupt on the specificed rx ring. 5656 */ 5657 int 5658 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh) 5659 { 5660 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 5661 ixgbe_t *ixgbe = rx_ring->ixgbe; 5662 int r_idx = rx_ring->index; 5663 int hw_r_idx = rx_ring->hw_index; 5664 int v_idx = rx_ring->intr_vector; 5665 5666 mutex_enter(&ixgbe->gen_lock); 5667 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 5668 mutex_exit(&ixgbe->gen_lock); 5669 /* 5670 * Simply return 0. 5671 * In the rare case where an interrupt is being 5672 * disabled while interrupts are being adjusted, 5673 * we don't fail the operation. No interrupts will 5674 * be generated while they are adjusted, and 5675 * ixgbe_intr_adjust() will cause the interrupts 5676 * to be re-enabled once it completes. Note that 5677 * in this case, packets may be delivered to the 5678 * stack via interrupts before xgbe_rx_ring_intr_enable() 5679 * is called again. This is acceptable since interrupt 5680 * adjustment is infrequent, and the stack will be 5681 * able to handle these packets. 5682 */ 5683 return (0); 5684 } 5685 5686 /* 5687 * To disable interrupt by clearing the VAL bit of given interrupt 5688 * vector allocation register (IVAR). 5689 */ 5690 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0); 5691 5692 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx); 5693 5694 mutex_exit(&ixgbe->gen_lock); 5695 5696 return (0); 5697 } 5698 5699 /* 5700 * Add a mac address. 5701 */ 5702 static int 5703 ixgbe_addmac(void *arg, const uint8_t *mac_addr) 5704 { 5705 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 5706 ixgbe_t *ixgbe = rx_group->ixgbe; 5707 struct ixgbe_hw *hw = &ixgbe->hw; 5708 int slot, i; 5709 5710 mutex_enter(&ixgbe->gen_lock); 5711 5712 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 5713 mutex_exit(&ixgbe->gen_lock); 5714 return (ECANCELED); 5715 } 5716 5717 if (ixgbe->unicst_avail == 0) { 5718 /* no slots available */ 5719 mutex_exit(&ixgbe->gen_lock); 5720 return (ENOSPC); 5721 } 5722 5723 /* 5724 * The first ixgbe->num_rx_groups slots are reserved for each respective 5725 * group. The rest slots are shared by all groups. While adding a 5726 * MAC address, reserved slots are firstly checked then the shared 5727 * slots are searched. 5728 */ 5729 slot = -1; 5730 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) { 5731 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) { 5732 if (ixgbe->unicst_addr[i].mac.set == 0) { 5733 slot = i; 5734 break; 5735 } 5736 } 5737 } else { 5738 slot = rx_group->index; 5739 } 5740 5741 if (slot == -1) { 5742 /* no slots available */ 5743 mutex_exit(&ixgbe->gen_lock); 5744 return (ENOSPC); 5745 } 5746 5747 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 5748 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr, 5749 rx_group->index, IXGBE_RAH_AV); 5750 ixgbe->unicst_addr[slot].mac.set = 1; 5751 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index; 5752 ixgbe->unicst_avail--; 5753 5754 mutex_exit(&ixgbe->gen_lock); 5755 5756 return (0); 5757 } 5758 5759 /* 5760 * Remove a mac address. 5761 */ 5762 static int 5763 ixgbe_remmac(void *arg, const uint8_t *mac_addr) 5764 { 5765 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 5766 ixgbe_t *ixgbe = rx_group->ixgbe; 5767 struct ixgbe_hw *hw = &ixgbe->hw; 5768 int slot; 5769 5770 mutex_enter(&ixgbe->gen_lock); 5771 5772 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 5773 mutex_exit(&ixgbe->gen_lock); 5774 return (ECANCELED); 5775 } 5776 5777 slot = ixgbe_unicst_find(ixgbe, mac_addr); 5778 if (slot == -1) { 5779 mutex_exit(&ixgbe->gen_lock); 5780 return (EINVAL); 5781 } 5782 5783 if (ixgbe->unicst_addr[slot].mac.set == 0) { 5784 mutex_exit(&ixgbe->gen_lock); 5785 return (EINVAL); 5786 } 5787 5788 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 5789 (void) ixgbe_clear_rar(hw, slot); 5790 ixgbe->unicst_addr[slot].mac.set = 0; 5791 ixgbe->unicst_avail++; 5792 5793 mutex_exit(&ixgbe->gen_lock); 5794 5795 return (0); 5796 } 5797