1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "ixl_pf.h" 37 38 #ifdef PCI_IOV 39 #include "ixl_pf_iov.h" 40 #endif 41 42 #ifdef IXL_IW 43 #include "ixl_iw.h" 44 #include "ixl_iw_int.h" 45 #endif 46 47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); 48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); 49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed); 50 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int); 51 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int); 52 static char * ixl_switch_element_string(struct sbuf *, u8, u16); 53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *); 54 55 /* Sysctls */ 56 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); 57 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); 58 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 59 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 60 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); 61 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); 62 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); 63 64 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 65 static int ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS); 66 67 /* Debug Sysctls */ 68 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); 69 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); 70 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); 71 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); 72 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); 73 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS); 74 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); 75 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); 76 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); 77 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); 78 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); 79 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); 80 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); 81 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); 82 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); 83 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); 84 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); 85 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); 86 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); 87 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 88 89 /* Debug Sysctls */ 90 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS); 91 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS); 92 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS); 93 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); 94 #ifdef IXL_DEBUG 95 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); 96 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); 97 #endif 98 99 #ifdef IXL_IW 100 extern int ixl_enable_iwarp; 101 extern int ixl_limit_iwarp_msix; 102 #endif 103 104 static const char * const ixl_fc_string[6] = { 105 "None", 106 "Rx", 107 "Tx", 108 "Full", 109 "Priority", 110 "Default" 111 }; 112 113 static char *ixl_fec_string[3] = { 114 "CL108 RS-FEC", 115 "CL74 FC-FEC/BASE-R", 116 "None" 117 }; 118 119 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); 120 121 /* 122 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string 123 */ 124 void 125 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) 126 { 127 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); 128 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); 129 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); 130 131 sbuf_printf(buf, 132 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", 133 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 134 hw->aq.api_maj_ver, hw->aq.api_min_ver, 135 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> 136 IXL_NVM_VERSION_HI_SHIFT, 137 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> 138 IXL_NVM_VERSION_LO_SHIFT, 139 hw->nvm.eetrack, 140 oem_ver, oem_build, oem_patch); 141 } 142 143 void 144 ixl_print_nvm_version(struct ixl_pf *pf) 145 { 146 struct i40e_hw *hw = &pf->hw; 147 device_t dev = pf->dev; 148 struct sbuf *sbuf; 149 150 sbuf = sbuf_new_auto(); 151 ixl_nvm_version_str(hw, sbuf); 152 sbuf_finish(sbuf); 153 device_printf(dev, "%s\n", sbuf_data(sbuf)); 154 sbuf_delete(sbuf); 155 } 156 157 /** 158 * ixl_get_fw_mode - Check the state of FW 159 * @hw: device hardware structure 160 * 161 * Identify state of FW. It might be in a recovery mode 162 * which limits functionality and requires special handling 163 * from the driver. 164 * 165 * @returns FW mode (normal, recovery, unexpected EMP reset) 166 */ 167 static enum ixl_fw_mode 168 ixl_get_fw_mode(struct ixl_pf *pf) 169 { 170 struct i40e_hw *hw = &pf->hw; 171 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL; 172 u32 fwsts; 173 174 #ifdef IXL_DEBUG 175 if (pf->recovery_mode) 176 return IXL_FW_MODE_RECOVERY; 177 #endif 178 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; 179 180 /* Is set and has one of expected values */ 181 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK && 182 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) || 183 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || 184 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK) 185 fw_mode = IXL_FW_MODE_RECOVERY; 186 else { 187 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 && 188 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10) 189 fw_mode = IXL_FW_MODE_UEMPR; 190 } 191 return (fw_mode); 192 } 193 194 /** 195 * ixl_pf_reset - Reset the PF 196 * @pf: PF structure 197 * 198 * Ensure that FW is in the right state and do the reset 199 * if needed. 200 * 201 * @returns zero on success, or an error code on failure. 202 */ 203 int 204 ixl_pf_reset(struct ixl_pf *pf) 205 { 206 struct i40e_hw *hw = &pf->hw; 207 enum i40e_status_code status; 208 enum ixl_fw_mode fw_mode; 209 210 fw_mode = ixl_get_fw_mode(pf); 211 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode); 212 if (fw_mode == IXL_FW_MODE_RECOVERY) { 213 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 214 /* Don't try to reset device if it's in recovery mode */ 215 return (0); 216 } 217 218 status = i40e_pf_reset(hw); 219 if (status == I40E_SUCCESS) 220 return (0); 221 222 /* Check FW mode again in case it has changed while 223 * waiting for reset to complete */ 224 fw_mode = ixl_get_fw_mode(pf); 225 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode); 226 if (fw_mode == IXL_FW_MODE_RECOVERY) { 227 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 228 return (0); 229 } 230 231 if (fw_mode == IXL_FW_MODE_UEMPR) 232 device_printf(pf->dev, 233 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); 234 else 235 device_printf(pf->dev, "PF reset failure %s\n", 236 i40e_stat_str(hw, status)); 237 return (EIO); 238 } 239 240 /** 241 * ixl_setup_hmc - Setup LAN Host Memory Cache 242 * @pf: PF structure 243 * 244 * Init and configure LAN Host Memory Cache 245 * 246 * @returns 0 on success, EIO on error 247 */ 248 int 249 ixl_setup_hmc(struct ixl_pf *pf) 250 { 251 struct i40e_hw *hw = &pf->hw; 252 enum i40e_status_code status; 253 254 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 255 hw->func_caps.num_rx_qp, 0, 0); 256 if (status) { 257 device_printf(pf->dev, "init_lan_hmc failed: %s\n", 258 i40e_stat_str(hw, status)); 259 return (EIO); 260 } 261 262 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 263 if (status) { 264 device_printf(pf->dev, "configure_lan_hmc failed: %s\n", 265 i40e_stat_str(hw, status)); 266 return (EIO); 267 } 268 269 return (0); 270 } 271 272 /** 273 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache 274 * @pf: PF structure 275 * 276 * Shutdown Host Memory Cache if configured. 277 * 278 */ 279 void 280 ixl_shutdown_hmc(struct ixl_pf *pf) 281 { 282 struct i40e_hw *hw = &pf->hw; 283 enum i40e_status_code status; 284 285 /* HMC not configured, no need to shutdown */ 286 if (hw->hmc.hmc_obj == NULL) 287 return; 288 289 status = i40e_shutdown_lan_hmc(hw); 290 if (status) 291 device_printf(pf->dev, 292 "Shutdown LAN HMC failed with code %s\n", 293 i40e_stat_str(hw, status)); 294 } 295 /* 296 * Write PF ITR values to queue ITR registers. 297 */ 298 void 299 ixl_configure_itr(struct ixl_pf *pf) 300 { 301 ixl_configure_tx_itr(pf); 302 ixl_configure_rx_itr(pf); 303 } 304 305 /********************************************************************* 306 * 307 * Get the hardware capabilities 308 * 309 **********************************************************************/ 310 311 int 312 ixl_get_hw_capabilities(struct ixl_pf *pf) 313 { 314 struct i40e_aqc_list_capabilities_element_resp *buf; 315 struct i40e_hw *hw = &pf->hw; 316 device_t dev = pf->dev; 317 enum i40e_status_code status; 318 int len, i2c_intfc_num; 319 bool again = TRUE; 320 u16 needed; 321 322 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 323 hw->func_caps.iwarp = 0; 324 return (0); 325 } 326 327 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 328 retry: 329 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) 330 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) { 331 device_printf(dev, "Unable to allocate cap memory\n"); 332 return (ENOMEM); 333 } 334 335 /* This populates the hw struct */ 336 status = i40e_aq_discover_capabilities(hw, buf, len, 337 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 338 free(buf, M_IXL); 339 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && 340 (again == TRUE)) { 341 /* retry once with a larger buffer */ 342 again = FALSE; 343 len = needed; 344 goto retry; 345 } else if (status != I40E_SUCCESS) { 346 device_printf(dev, "capability discovery failed; status %s, error %s\n", 347 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 348 return (ENODEV); 349 } 350 351 /* 352 * Some devices have both MDIO and I2C; since this isn't reported 353 * by the FW, check registers to see if an I2C interface exists. 354 */ 355 i2c_intfc_num = ixl_find_i2c_interface(pf); 356 if (i2c_intfc_num != -1) 357 pf->has_i2c = true; 358 359 /* Determine functions to use for driver I2C accesses */ 360 switch (pf->i2c_access_method) { 361 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: { 362 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 363 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 364 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 365 } else { 366 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 367 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 368 } 369 break; 370 } 371 case IXL_I2C_ACCESS_METHOD_AQ: 372 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 373 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 374 break; 375 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD: 376 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 377 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 378 break; 379 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS: 380 pf->read_i2c_byte = ixl_read_i2c_byte_bb; 381 pf->write_i2c_byte = ixl_write_i2c_byte_bb; 382 break; 383 default: 384 /* Should not happen */ 385 device_printf(dev, "Error setting I2C access functions\n"); 386 break; 387 } 388 389 /* Keep link active by default */ 390 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN); 391 392 /* Print a subset of the capability information. */ 393 device_printf(dev, 394 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n", 395 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, 396 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, 397 (hw->func_caps.mdio_port_mode == 2) ? "I2C" : 398 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : 399 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : 400 "MDIO shared"); 401 402 return (0); 403 } 404 405 /* For the set_advertise sysctl */ 406 void 407 ixl_set_initial_advertised_speeds(struct ixl_pf *pf) 408 { 409 device_t dev = pf->dev; 410 int err; 411 412 /* Make sure to initialize the device to the complete list of 413 * supported speeds on driver load, to ensure unloading and 414 * reloading the driver will restore this value. 415 */ 416 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); 417 if (err) { 418 /* Non-fatal error */ 419 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", 420 __func__, err); 421 return; 422 } 423 424 pf->advertised_speed = 425 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 426 } 427 428 int 429 ixl_teardown_hw_structs(struct ixl_pf *pf) 430 { 431 enum i40e_status_code status = 0; 432 struct i40e_hw *hw = &pf->hw; 433 device_t dev = pf->dev; 434 435 /* Shutdown LAN HMC */ 436 if (hw->hmc.hmc_obj) { 437 status = i40e_shutdown_lan_hmc(hw); 438 if (status) { 439 device_printf(dev, 440 "init: LAN HMC shutdown failure; status %s\n", 441 i40e_stat_str(hw, status)); 442 goto err_out; 443 } 444 } 445 446 /* Shutdown admin queue */ 447 ixl_disable_intr0(hw); 448 status = i40e_shutdown_adminq(hw); 449 if (status) 450 device_printf(dev, 451 "init: Admin Queue shutdown failure; status %s\n", 452 i40e_stat_str(hw, status)); 453 454 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); 455 err_out: 456 return (status); 457 } 458 459 /* 460 ** Creates new filter with given MAC address and VLAN ID 461 */ 462 static struct ixl_mac_filter * 463 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 464 { 465 struct ixl_mac_filter *f; 466 467 /* create a new empty filter */ 468 f = malloc(sizeof(struct ixl_mac_filter), 469 M_IXL, M_NOWAIT | M_ZERO); 470 if (f) { 471 LIST_INSERT_HEAD(headp, f, ftle); 472 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); 473 f->vlan = vlan; 474 } 475 476 return (f); 477 } 478 479 /** 480 * ixl_free_filters - Free all filters in given list 481 * headp - pointer to list head 482 * 483 * Frees memory used by each entry in the list. 484 * Does not remove filters from HW. 485 */ 486 void 487 ixl_free_filters(struct ixl_ftl_head *headp) 488 { 489 struct ixl_mac_filter *f, *nf; 490 491 f = LIST_FIRST(headp); 492 while (f != NULL) { 493 nf = LIST_NEXT(f, ftle); 494 free(f, M_IXL); 495 f = nf; 496 } 497 498 LIST_INIT(headp); 499 } 500 501 static u_int 502 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 503 { 504 struct ixl_add_maddr_arg *ama = arg; 505 struct ixl_vsi *vsi = ama->vsi; 506 const u8 *macaddr = (u8*)LLADDR(sdl); 507 struct ixl_mac_filter *f; 508 509 /* Does one already exist */ 510 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 511 if (f != NULL) 512 return (0); 513 514 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY); 515 if (f == NULL) { 516 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 517 return (0); 518 } 519 f->flags |= IXL_FILTER_MC; 520 521 return (1); 522 } 523 524 /********************************************************************* 525 * Filter Routines 526 * 527 * Routines for multicast and vlan filter management. 528 * 529 *********************************************************************/ 530 void 531 ixl_add_multi(struct ixl_vsi *vsi) 532 { 533 if_t ifp = vsi->ifp; 534 struct i40e_hw *hw = vsi->hw; 535 int mcnt = 0; 536 struct ixl_add_maddr_arg cb_arg; 537 538 IOCTL_DEBUGOUT("ixl_add_multi: begin"); 539 540 mcnt = if_llmaddr_count(ifp); 541 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { 542 i40e_aq_set_vsi_multicast_promiscuous(hw, 543 vsi->seid, TRUE, NULL); 544 /* delete all existing MC filters */ 545 ixl_del_multi(vsi, true); 546 return; 547 } 548 549 cb_arg.vsi = vsi; 550 LIST_INIT(&cb_arg.to_add); 551 552 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg); 553 if (mcnt > 0) 554 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt); 555 556 IOCTL_DEBUGOUT("ixl_add_multi: end"); 557 } 558 559 static u_int 560 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 561 { 562 struct ixl_mac_filter *f = arg; 563 564 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl))) 565 return (1); 566 else 567 return (0); 568 } 569 570 void 571 ixl_del_multi(struct ixl_vsi *vsi, bool all) 572 { 573 struct ixl_ftl_head to_del; 574 if_t ifp = vsi->ifp; 575 struct ixl_mac_filter *f, *fn; 576 int mcnt = 0; 577 578 IOCTL_DEBUGOUT("ixl_del_multi: begin"); 579 580 LIST_INIT(&to_del); 581 /* Search for removed multicast addresses */ 582 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) { 583 if ((f->flags & IXL_FILTER_MC) == 0 || 584 (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0))) 585 continue; 586 587 LIST_REMOVE(f, ftle); 588 LIST_INSERT_HEAD(&to_del, f, ftle); 589 mcnt++; 590 } 591 592 if (mcnt > 0) 593 ixl_del_hw_filters(vsi, &to_del, mcnt); 594 } 595 596 void 597 ixl_link_up_msg(struct ixl_pf *pf) 598 { 599 struct i40e_hw *hw = &pf->hw; 600 if_t ifp = pf->vsi.ifp; 601 char *req_fec_string, *neg_fec_string; 602 u8 fec_abilities; 603 604 fec_abilities = hw->phy.link_info.req_fec_info; 605 /* If both RS and KR are requested, only show RS */ 606 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) 607 req_fec_string = ixl_fec_string[0]; 608 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) 609 req_fec_string = ixl_fec_string[1]; 610 else 611 req_fec_string = ixl_fec_string[2]; 612 613 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) 614 neg_fec_string = ixl_fec_string[0]; 615 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) 616 neg_fec_string = ixl_fec_string[1]; 617 else 618 neg_fec_string = ixl_fec_string[2]; 619 620 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 621 if_name(ifp), 622 ixl_link_speed_string(hw->phy.link_info.link_speed), 623 req_fec_string, neg_fec_string, 624 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", 625 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && 626 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 627 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? 628 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 629 ixl_fc_string[1] : ixl_fc_string[0]); 630 } 631 632 /* 633 * Configure admin queue/misc interrupt cause registers in hardware. 634 */ 635 void 636 ixl_configure_intr0_msix(struct ixl_pf *pf) 637 { 638 struct i40e_hw *hw = &pf->hw; 639 u32 reg; 640 641 /* First set up the adminq - vector 0 */ 642 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 643 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 644 645 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 646 I40E_PFINT_ICR0_ENA_GRST_MASK | 647 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 648 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 649 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 650 I40E_PFINT_ICR0_ENA_VFLR_MASK | 651 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 652 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; 653 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 654 655 /* 656 * 0x7FF is the end of the queue list. 657 * This means we won't use MSI-X vector 0 for a queue interrupt 658 * in MSI-X mode. 659 */ 660 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 661 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ 662 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); 663 664 wr32(hw, I40E_PFINT_DYN_CTL0, 665 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 666 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 667 668 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 669 } 670 671 void 672 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types) 673 { 674 /* Display supported media types */ 675 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) 676 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL); 677 678 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) 679 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL); 680 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) 681 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL); 682 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) 683 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL); 684 685 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T)) 686 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL); 687 688 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T)) 689 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL); 690 691 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || 692 phy_types & (I40E_CAP_PHY_TYPE_XFI) || 693 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) 694 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 695 696 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) 697 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL); 698 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) 699 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL); 700 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) 701 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL); 702 703 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || 704 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || 705 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || 706 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || 707 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 708 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL); 709 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) 710 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL); 711 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) 712 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL); 713 714 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) 715 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL); 716 717 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) 718 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) 719 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL); 720 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) 721 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL); 722 if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) 723 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL); 724 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) 725 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 726 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) 727 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL); 728 729 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) 730 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL); 731 732 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 733 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL); 734 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) 735 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); 736 737 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) 738 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL); 739 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) 740 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL); 741 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) 742 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL); 743 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) 744 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL); 745 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) 746 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL); 747 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) 748 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL); 749 } 750 751 /********************************************************************* 752 * 753 * Get Firmware Switch configuration 754 * - this will need to be more robust when more complex 755 * switch configurations are enabled. 756 * 757 **********************************************************************/ 758 int 759 ixl_switch_config(struct ixl_pf *pf) 760 { 761 struct i40e_hw *hw = &pf->hw; 762 struct ixl_vsi *vsi = &pf->vsi; 763 device_t dev = iflib_get_dev(vsi->ctx); 764 struct i40e_aqc_get_switch_config_resp *sw_config; 765 u8 aq_buf[I40E_AQ_LARGE_BUF]; 766 int ret; 767 u16 next = 0; 768 769 memset(&aq_buf, 0, sizeof(aq_buf)); 770 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 771 ret = i40e_aq_get_switch_config(hw, sw_config, 772 sizeof(aq_buf), &next, NULL); 773 if (ret) { 774 device_printf(dev, "aq_get_switch_config() failed, error %d," 775 " aq_error %d\n", ret, pf->hw.aq.asq_last_status); 776 return (ret); 777 } 778 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { 779 device_printf(dev, 780 "Switch config: header reported: %d in structure, %d total\n", 781 LE16_TO_CPU(sw_config->header.num_reported), 782 LE16_TO_CPU(sw_config->header.num_total)); 783 for (int i = 0; 784 i < LE16_TO_CPU(sw_config->header.num_reported); i++) { 785 device_printf(dev, 786 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, 787 sw_config->element[i].element_type, 788 LE16_TO_CPU(sw_config->element[i].seid), 789 LE16_TO_CPU(sw_config->element[i].uplink_seid), 790 LE16_TO_CPU(sw_config->element[i].downlink_seid)); 791 } 792 } 793 /* Simplified due to a single VSI */ 794 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid); 795 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid); 796 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid); 797 return (ret); 798 } 799 800 void 801 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls) 802 { 803 struct sysctl_oid *tree; 804 struct sysctl_oid_list *child; 805 struct sysctl_oid_list *vsi_list; 806 807 tree = device_get_sysctl_tree(vsi->dev); 808 child = SYSCTL_CHILDREN(tree); 809 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name, 810 CTLFLAG_RD, NULL, "VSI Number"); 811 812 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 813 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats); 814 815 /* Copy of netstat RX errors counter for validation purposes */ 816 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors", 817 CTLFLAG_RD, &vsi->ierrors, 818 "RX packet errors"); 819 820 if (queues_sysctls) 821 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx); 822 } 823 824 /* 825 * Used to set the Tx ITR value for all of the PF LAN VSI's queues. 826 * Writes to the ITR registers immediately. 827 */ 828 static int 829 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) 830 { 831 struct ixl_pf *pf = (struct ixl_pf *)arg1; 832 device_t dev = pf->dev; 833 int error = 0; 834 int requested_tx_itr; 835 836 requested_tx_itr = pf->tx_itr; 837 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); 838 if ((error) || (req->newptr == NULL)) 839 return (error); 840 if (pf->dynamic_tx_itr) { 841 device_printf(dev, 842 "Cannot set TX itr value while dynamic TX itr is enabled\n"); 843 return (EINVAL); 844 } 845 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { 846 device_printf(dev, 847 "Invalid TX itr value; value must be between 0 and %d\n", 848 IXL_MAX_ITR); 849 return (EINVAL); 850 } 851 852 pf->tx_itr = requested_tx_itr; 853 ixl_configure_tx_itr(pf); 854 855 return (error); 856 } 857 858 /* 859 * Used to set the Rx ITR value for all of the PF LAN VSI's queues. 860 * Writes to the ITR registers immediately. 861 */ 862 static int 863 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) 864 { 865 struct ixl_pf *pf = (struct ixl_pf *)arg1; 866 device_t dev = pf->dev; 867 int error = 0; 868 int requested_rx_itr; 869 870 requested_rx_itr = pf->rx_itr; 871 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); 872 if ((error) || (req->newptr == NULL)) 873 return (error); 874 if (pf->dynamic_rx_itr) { 875 device_printf(dev, 876 "Cannot set RX itr value while dynamic RX itr is enabled\n"); 877 return (EINVAL); 878 } 879 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { 880 device_printf(dev, 881 "Invalid RX itr value; value must be between 0 and %d\n", 882 IXL_MAX_ITR); 883 return (EINVAL); 884 } 885 886 pf->rx_itr = requested_rx_itr; 887 ixl_configure_rx_itr(pf); 888 889 return (error); 890 } 891 892 void 893 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 894 struct sysctl_oid_list *child, 895 struct i40e_hw_port_stats *stats) 896 { 897 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 898 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics"); 899 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); 900 901 struct i40e_eth_stats *eth_stats = &stats->eth; 902 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); 903 904 struct ixl_sysctl_info ctls[] = 905 { 906 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 907 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 908 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 909 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 910 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, 911 /* Packet Reception Stats */ 912 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 913 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 914 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 915 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 916 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 917 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 918 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 919 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 920 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 921 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 922 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 923 {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, 924 /* Packet Transmission Stats */ 925 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 926 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 927 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 928 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 929 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 930 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 931 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 932 /* Flow control */ 933 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 934 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 935 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 936 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 937 /* End */ 938 {0,0,0} 939 }; 940 941 struct ixl_sysctl_info *entry = ctls; 942 while (entry->stat != 0) 943 { 944 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, 945 CTLFLAG_RD, entry->stat, 946 entry->description); 947 entry++; 948 } 949 } 950 951 void 952 ixl_set_rss_key(struct ixl_pf *pf) 953 { 954 struct i40e_hw *hw = &pf->hw; 955 struct ixl_vsi *vsi = &pf->vsi; 956 device_t dev = pf->dev; 957 u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; 958 enum i40e_status_code status; 959 960 #ifdef RSS 961 /* Fetch the configured RSS key */ 962 rss_getkey((uint8_t *) &rss_seed); 963 #else 964 ixl_get_default_rss_key(rss_seed); 965 #endif 966 /* Fill out hash function seed */ 967 if (hw->mac.type == I40E_MAC_X722) { 968 struct i40e_aqc_get_set_rss_key_data key_data; 969 bcopy(rss_seed, &key_data, 52); 970 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); 971 if (status) 972 device_printf(dev, 973 "i40e_aq_set_rss_key status %s, error %s\n", 974 i40e_stat_str(hw, status), 975 i40e_aq_str(hw, hw->aq.asq_last_status)); 976 } else { 977 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) 978 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); 979 } 980 } 981 982 /* 983 * Configure enabled PCTYPES for RSS. 984 */ 985 void 986 ixl_set_rss_pctypes(struct ixl_pf *pf) 987 { 988 struct i40e_hw *hw = &pf->hw; 989 u64 set_hena = 0, hena; 990 991 #ifdef RSS 992 u32 rss_hash_config; 993 994 rss_hash_config = rss_gethashconfig(); 995 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 996 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); 997 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 998 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 999 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1000 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 1001 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1002 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); 1003 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1004 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); 1005 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1006 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 1007 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1008 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 1009 #else 1010 if (hw->mac.type == I40E_MAC_X722) 1011 set_hena = IXL_DEFAULT_RSS_HENA_X722; 1012 else 1013 set_hena = IXL_DEFAULT_RSS_HENA_XL710; 1014 #endif 1015 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 1016 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 1017 hena |= set_hena; 1018 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 1019 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 1020 1021 } 1022 1023 /* 1024 ** Setup the PF's RSS parameters. 1025 */ 1026 void 1027 ixl_config_rss(struct ixl_pf *pf) 1028 { 1029 ixl_set_rss_key(pf); 1030 ixl_set_rss_pctypes(pf); 1031 ixl_set_rss_hlut(pf); 1032 } 1033 1034 /* 1035 * In some firmware versions there is default MAC/VLAN filter 1036 * configured which interferes with filters managed by driver. 1037 * Make sure it's removed. 1038 */ 1039 void 1040 ixl_del_default_hw_filters(struct ixl_vsi *vsi) 1041 { 1042 struct i40e_aqc_remove_macvlan_element_data e; 1043 1044 bzero(&e, sizeof(e)); 1045 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1046 e.vlan_tag = 0; 1047 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1048 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1049 1050 bzero(&e, sizeof(e)); 1051 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1052 e.vlan_tag = 0; 1053 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1054 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1055 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1056 } 1057 1058 /* 1059 ** Initialize filter list and add filters that the hardware 1060 ** needs to know about. 1061 ** 1062 ** Requires VSI's seid to be set before calling. 1063 */ 1064 void 1065 ixl_init_filters(struct ixl_vsi *vsi) 1066 { 1067 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1068 1069 ixl_dbg_filter(pf, "%s: start\n", __func__); 1070 1071 /* Initialize mac filter list for VSI */ 1072 LIST_INIT(&vsi->ftl); 1073 vsi->num_hw_filters = 0; 1074 1075 /* Receive broadcast Ethernet frames */ 1076 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL); 1077 1078 if (IXL_VSI_IS_VF(vsi)) 1079 return; 1080 1081 ixl_del_default_hw_filters(vsi); 1082 1083 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); 1084 1085 /* 1086 * Prevent Tx flow control frames from being sent out by 1087 * non-firmware transmitters. 1088 * This affects every VSI in the PF. 1089 */ 1090 #ifndef IXL_DEBUG_FC 1091 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1092 #else 1093 if (pf->enable_tx_fc_filter) 1094 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1095 #endif 1096 } 1097 1098 void 1099 ixl_reconfigure_filters(struct ixl_vsi *vsi) 1100 { 1101 struct i40e_hw *hw = vsi->hw; 1102 struct ixl_ftl_head tmp; 1103 int cnt; 1104 1105 /* 1106 * The ixl_add_hw_filters function adds filters configured 1107 * in HW to a list in VSI. Move all filters to a temporary 1108 * list to avoid corrupting it by concatenating to itself. 1109 */ 1110 LIST_INIT(&tmp); 1111 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle); 1112 cnt = vsi->num_hw_filters; 1113 vsi->num_hw_filters = 0; 1114 1115 ixl_add_hw_filters(vsi, &tmp, cnt); 1116 1117 /* 1118 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp 1119 * will be NULL. Furthermore, the ftl of such vsi already contains 1120 * IXL_VLAN_ANY filter so we can skip that as well. 1121 */ 1122 if (hw == NULL) 1123 return; 1124 1125 /* Filter could be removed if MAC address was changed */ 1126 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 1127 1128 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0) 1129 return; 1130 /* 1131 * VLAN HW filtering is enabled, make sure that filters 1132 * for all registered VLAN tags are configured 1133 */ 1134 ixl_add_vlan_filters(vsi, hw->mac.addr); 1135 } 1136 1137 /* 1138 * This routine adds a MAC/VLAN filter to the software filter 1139 * list, then adds that new filter to the HW if it doesn't already 1140 * exist in the SW filter list. 1141 */ 1142 void 1143 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1144 { 1145 struct ixl_mac_filter *f, *tmp; 1146 struct ixl_pf *pf; 1147 device_t dev; 1148 struct ixl_ftl_head to_add; 1149 int to_add_cnt; 1150 1151 pf = vsi->back; 1152 dev = pf->dev; 1153 to_add_cnt = 1; 1154 1155 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n", 1156 MAC_FORMAT_ARGS(macaddr), vlan); 1157 1158 /* Does one already exist */ 1159 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1160 if (f != NULL) 1161 return; 1162 1163 LIST_INIT(&to_add); 1164 f = ixl_new_filter(&to_add, macaddr, vlan); 1165 if (f == NULL) { 1166 device_printf(dev, "WARNING: no filter available!!\n"); 1167 return; 1168 } 1169 if (f->vlan != IXL_VLAN_ANY) 1170 f->flags |= IXL_FILTER_VLAN; 1171 else 1172 vsi->num_macs++; 1173 1174 /* 1175 ** Is this the first vlan being registered, if so we 1176 ** need to remove the ANY filter that indicates we are 1177 ** not in a vlan, and replace that with a 0 filter. 1178 */ 1179 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { 1180 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 1181 if (tmp != NULL) { 1182 struct ixl_ftl_head to_del; 1183 1184 /* Prepare new filter first to avoid removing 1185 * VLAN_ANY filter if allocation fails */ 1186 f = ixl_new_filter(&to_add, macaddr, 0); 1187 if (f == NULL) { 1188 device_printf(dev, "WARNING: no filter available!!\n"); 1189 free(LIST_FIRST(&to_add), M_IXL); 1190 return; 1191 } 1192 to_add_cnt++; 1193 1194 LIST_REMOVE(tmp, ftle); 1195 LIST_INIT(&to_del); 1196 LIST_INSERT_HEAD(&to_del, tmp, ftle); 1197 ixl_del_hw_filters(vsi, &to_del, 1); 1198 } 1199 } 1200 1201 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1202 } 1203 1204 /** 1205 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs 1206 * @vsi: pointer to VSI 1207 * @macaddr: MAC address 1208 * 1209 * Adds MAC/VLAN filter for each VLAN configured on the interface 1210 * if there is enough HW filters. Otherwise adds a single filter 1211 * for all tagged and untagged frames to allow all configured VLANs 1212 * to recieve traffic. 1213 */ 1214 void 1215 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1216 { 1217 struct ixl_ftl_head to_add; 1218 struct ixl_mac_filter *f; 1219 int to_add_cnt = 0; 1220 int i, vlan = 0; 1221 1222 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) { 1223 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1224 return; 1225 } 1226 LIST_INIT(&to_add); 1227 1228 /* Add filter for untagged frames if it does not exist yet */ 1229 f = ixl_find_filter(&vsi->ftl, macaddr, 0); 1230 if (f == NULL) { 1231 f = ixl_new_filter(&to_add, macaddr, 0); 1232 if (f == NULL) { 1233 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1234 return; 1235 } 1236 to_add_cnt++; 1237 } 1238 1239 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) { 1240 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan); 1241 if (vlan == -1) 1242 break; 1243 1244 /* Does one already exist */ 1245 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1246 if (f != NULL) 1247 continue; 1248 1249 f = ixl_new_filter(&to_add, macaddr, vlan); 1250 if (f == NULL) { 1251 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1252 ixl_free_filters(&to_add); 1253 return; 1254 } 1255 to_add_cnt++; 1256 } 1257 1258 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1259 } 1260 1261 void 1262 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1263 { 1264 struct ixl_mac_filter *f, *tmp; 1265 struct ixl_ftl_head ftl_head; 1266 int to_del_cnt = 1; 1267 1268 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1269 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n", 1270 MAC_FORMAT_ARGS(macaddr), vlan); 1271 1272 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1273 if (f == NULL) 1274 return; 1275 1276 LIST_REMOVE(f, ftle); 1277 LIST_INIT(&ftl_head); 1278 LIST_INSERT_HEAD(&ftl_head, f, ftle); 1279 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0) 1280 vsi->num_macs--; 1281 1282 /* If this is not the last vlan just remove the filter */ 1283 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) { 1284 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1285 return; 1286 } 1287 1288 /* It's the last vlan, we need to switch back to a non-vlan filter */ 1289 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0); 1290 if (tmp != NULL) { 1291 LIST_REMOVE(tmp, ftle); 1292 LIST_INSERT_AFTER(f, tmp, ftle); 1293 to_del_cnt++; 1294 } 1295 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1296 1297 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1298 } 1299 1300 /** 1301 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC 1302 * @vsi: VSI which filters need to be removed 1303 * @macaddr: MAC address 1304 * 1305 * Remove all MAC/VLAN filters with a given MAC address. For multicast 1306 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY) 1307 * so skip them to speed up processing. Those filters should be removed 1308 * using ixl_del_filter function. 1309 */ 1310 void 1311 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1312 { 1313 struct ixl_mac_filter *f, *tmp; 1314 struct ixl_ftl_head to_del; 1315 int to_del_cnt = 0; 1316 1317 LIST_INIT(&to_del); 1318 1319 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) { 1320 if ((f->flags & IXL_FILTER_MC) != 0 || 1321 !ixl_ether_is_equal(f->macaddr, macaddr)) 1322 continue; 1323 1324 LIST_REMOVE(f, ftle); 1325 LIST_INSERT_HEAD(&to_del, f, ftle); 1326 to_del_cnt++; 1327 } 1328 1329 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1330 "%s: " MAC_FORMAT ", to_del_cnt: %d\n", 1331 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt); 1332 if (to_del_cnt > 0) 1333 ixl_del_hw_filters(vsi, &to_del, to_del_cnt); 1334 } 1335 1336 /* 1337 ** Find the filter with both matching mac addr and vlan id 1338 */ 1339 struct ixl_mac_filter * 1340 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 1341 { 1342 struct ixl_mac_filter *f; 1343 1344 LIST_FOREACH(f, headp, ftle) { 1345 if (ixl_ether_is_equal(f->macaddr, macaddr) && 1346 (f->vlan == vlan)) { 1347 return (f); 1348 } 1349 } 1350 1351 return (NULL); 1352 } 1353 1354 /* 1355 ** This routine takes additions to the vsi filter 1356 ** table and creates an Admin Queue call to create 1357 ** the filters in the hardware. 1358 */ 1359 void 1360 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt) 1361 { 1362 struct i40e_aqc_add_macvlan_element_data *a, *b; 1363 struct ixl_mac_filter *f, *fn; 1364 struct ixl_pf *pf; 1365 struct i40e_hw *hw; 1366 device_t dev; 1367 enum i40e_status_code status; 1368 int j = 0; 1369 1370 pf = vsi->back; 1371 dev = vsi->dev; 1372 hw = &pf->hw; 1373 1374 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt); 1375 1376 if (cnt < 1) { 1377 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n"); 1378 return; 1379 } 1380 1381 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, 1382 M_IXL, M_NOWAIT | M_ZERO); 1383 if (a == NULL) { 1384 device_printf(dev, "add_hw_filters failed to get memory\n"); 1385 return; 1386 } 1387 1388 LIST_FOREACH(f, to_add, ftle) { 1389 b = &a[j]; // a pox on fvl long names :) 1390 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); 1391 if (f->vlan == IXL_VLAN_ANY) { 1392 b->vlan_tag = 0; 1393 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; 1394 } else { 1395 b->vlan_tag = f->vlan; 1396 b->flags = 0; 1397 } 1398 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1399 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n", 1400 MAC_FORMAT_ARGS(f->macaddr)); 1401 1402 if (++j == cnt) 1403 break; 1404 } 1405 if (j != cnt) { 1406 /* Something went wrong */ 1407 device_printf(dev, 1408 "%s ERROR: list of filters to short expected: %d, found: %d\n", 1409 __func__, cnt, j); 1410 ixl_free_filters(to_add); 1411 goto out_free; 1412 } 1413 1414 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); 1415 if (status == I40E_SUCCESS) { 1416 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle); 1417 vsi->num_hw_filters += j; 1418 goto out_free; 1419 } 1420 1421 device_printf(dev, 1422 "i40e_aq_add_macvlan status %s, error %s\n", 1423 i40e_stat_str(hw, status), 1424 i40e_aq_str(hw, hw->aq.asq_last_status)); 1425 j = 0; 1426 1427 /* Verify which filters were actually configured in HW 1428 * and add them to the list */ 1429 LIST_FOREACH_SAFE(f, to_add, ftle, fn) { 1430 LIST_REMOVE(f, ftle); 1431 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) { 1432 ixl_dbg_filter(pf, 1433 "%s filter " MAC_FORMAT " VTAG: %d not added\n", 1434 __func__, 1435 MAC_FORMAT_ARGS(f->macaddr), 1436 f->vlan); 1437 free(f, M_IXL); 1438 } else { 1439 LIST_INSERT_HEAD(&vsi->ftl, f, ftle); 1440 vsi->num_hw_filters++; 1441 } 1442 j++; 1443 } 1444 1445 out_free: 1446 free(a, M_IXL); 1447 } 1448 1449 /* 1450 ** This routine takes removals in the vsi filter 1451 ** table and creates an Admin Queue call to delete 1452 ** the filters in the hardware. 1453 */ 1454 void 1455 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt) 1456 { 1457 struct i40e_aqc_remove_macvlan_element_data *d, *e; 1458 struct ixl_pf *pf; 1459 struct i40e_hw *hw; 1460 device_t dev; 1461 struct ixl_mac_filter *f, *f_temp; 1462 enum i40e_status_code status; 1463 int j = 0; 1464 1465 pf = vsi->back; 1466 hw = &pf->hw; 1467 dev = vsi->dev; 1468 1469 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt); 1470 1471 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, 1472 M_IXL, M_NOWAIT | M_ZERO); 1473 if (d == NULL) { 1474 device_printf(dev, "%s: failed to get memory\n", __func__); 1475 return; 1476 } 1477 1478 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) { 1479 e = &d[j]; // a pox on fvl long names :) 1480 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); 1481 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1482 if (f->vlan == IXL_VLAN_ANY) { 1483 e->vlan_tag = 0; 1484 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1485 } else { 1486 e->vlan_tag = f->vlan; 1487 } 1488 1489 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n", 1490 MAC_FORMAT_ARGS(f->macaddr)); 1491 1492 /* delete entry from the list */ 1493 LIST_REMOVE(f, ftle); 1494 free(f, M_IXL); 1495 if (++j == cnt) 1496 break; 1497 } 1498 if (j != cnt || !LIST_EMPTY(to_del)) { 1499 /* Something went wrong */ 1500 device_printf(dev, 1501 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n", 1502 __func__, cnt, j); 1503 ixl_free_filters(to_del); 1504 goto out_free; 1505 } 1506 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); 1507 if (status) { 1508 device_printf(dev, 1509 "%s: i40e_aq_remove_macvlan status %s, error %s\n", 1510 __func__, i40e_stat_str(hw, status), 1511 i40e_aq_str(hw, hw->aq.asq_last_status)); 1512 for (int i = 0; i < j; i++) { 1513 if (d[i].error_code == 0) 1514 continue; 1515 device_printf(dev, 1516 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n", 1517 __func__, MAC_FORMAT_ARGS(d[i].mac_addr), 1518 d[i].vlan_tag); 1519 } 1520 } 1521 1522 vsi->num_hw_filters -= j; 1523 1524 out_free: 1525 free(d, M_IXL); 1526 1527 ixl_dbg_filter(pf, "%s: end\n", __func__); 1528 } 1529 1530 int 1531 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1532 { 1533 struct i40e_hw *hw = &pf->hw; 1534 int error = 0; 1535 u32 reg; 1536 u16 pf_qidx; 1537 1538 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1539 1540 ixl_dbg(pf, IXL_DBG_EN_DIS, 1541 "Enabling PF TX ring %4d / VSI TX ring %4d...\n", 1542 pf_qidx, vsi_qidx); 1543 1544 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); 1545 1546 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1547 reg |= I40E_QTX_ENA_QENA_REQ_MASK | 1548 I40E_QTX_ENA_QENA_STAT_MASK; 1549 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1550 /* Verify the enable took */ 1551 for (int j = 0; j < 10; j++) { 1552 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1553 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 1554 break; 1555 i40e_usec_delay(10); 1556 } 1557 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 1558 device_printf(pf->dev, "TX queue %d still disabled!\n", 1559 pf_qidx); 1560 error = ETIMEDOUT; 1561 } 1562 1563 return (error); 1564 } 1565 1566 int 1567 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1568 { 1569 struct i40e_hw *hw = &pf->hw; 1570 int error = 0; 1571 u32 reg; 1572 u16 pf_qidx; 1573 1574 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1575 1576 ixl_dbg(pf, IXL_DBG_EN_DIS, 1577 "Enabling PF RX ring %4d / VSI RX ring %4d...\n", 1578 pf_qidx, vsi_qidx); 1579 1580 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1581 reg |= I40E_QRX_ENA_QENA_REQ_MASK | 1582 I40E_QRX_ENA_QENA_STAT_MASK; 1583 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1584 /* Verify the enable took */ 1585 for (int j = 0; j < 10; j++) { 1586 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1587 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 1588 break; 1589 i40e_usec_delay(10); 1590 } 1591 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 1592 device_printf(pf->dev, "RX queue %d still disabled!\n", 1593 pf_qidx); 1594 error = ETIMEDOUT; 1595 } 1596 1597 return (error); 1598 } 1599 1600 int 1601 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1602 { 1603 int error = 0; 1604 1605 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); 1606 /* Called function already prints error message */ 1607 if (error) 1608 return (error); 1609 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); 1610 return (error); 1611 } 1612 1613 /* 1614 * Returns error on first ring that is detected hung. 1615 */ 1616 int 1617 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1618 { 1619 struct i40e_hw *hw = &pf->hw; 1620 int error = 0; 1621 u32 reg; 1622 u16 pf_qidx; 1623 1624 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1625 1626 ixl_dbg(pf, IXL_DBG_EN_DIS, 1627 "Disabling PF TX ring %4d / VSI TX ring %4d...\n", 1628 pf_qidx, vsi_qidx); 1629 1630 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); 1631 i40e_usec_delay(500); 1632 1633 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1634 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 1635 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1636 /* Verify the disable took */ 1637 for (int j = 0; j < 10; j++) { 1638 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1639 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) 1640 break; 1641 i40e_msec_delay(10); 1642 } 1643 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { 1644 device_printf(pf->dev, "TX queue %d still enabled!\n", 1645 pf_qidx); 1646 error = ETIMEDOUT; 1647 } 1648 1649 return (error); 1650 } 1651 1652 /* 1653 * Returns error on first ring that is detected hung. 1654 */ 1655 int 1656 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1657 { 1658 struct i40e_hw *hw = &pf->hw; 1659 int error = 0; 1660 u32 reg; 1661 u16 pf_qidx; 1662 1663 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1664 1665 ixl_dbg(pf, IXL_DBG_EN_DIS, 1666 "Disabling PF RX ring %4d / VSI RX ring %4d...\n", 1667 pf_qidx, vsi_qidx); 1668 1669 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1670 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 1671 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1672 /* Verify the disable took */ 1673 for (int j = 0; j < 10; j++) { 1674 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1675 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) 1676 break; 1677 i40e_msec_delay(10); 1678 } 1679 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { 1680 device_printf(pf->dev, "RX queue %d still enabled!\n", 1681 pf_qidx); 1682 error = ETIMEDOUT; 1683 } 1684 1685 return (error); 1686 } 1687 1688 int 1689 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1690 { 1691 int error = 0; 1692 1693 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); 1694 /* Called function already prints error message */ 1695 if (error) 1696 return (error); 1697 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); 1698 return (error); 1699 } 1700 1701 static void 1702 ixl_handle_tx_mdd_event(struct ixl_pf *pf) 1703 { 1704 struct i40e_hw *hw = &pf->hw; 1705 device_t dev = pf->dev; 1706 struct ixl_vf *vf; 1707 bool mdd_detected = false; 1708 bool pf_mdd_detected = false; 1709 bool vf_mdd_detected = false; 1710 u16 vf_num, queue; 1711 u8 pf_num, event; 1712 u8 pf_mdet_num, vp_mdet_num; 1713 u32 reg; 1714 1715 /* find what triggered the MDD event */ 1716 reg = rd32(hw, I40E_GL_MDET_TX); 1717 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 1718 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 1719 I40E_GL_MDET_TX_PF_NUM_SHIFT; 1720 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 1721 I40E_GL_MDET_TX_VF_NUM_SHIFT; 1722 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 1723 I40E_GL_MDET_TX_EVENT_SHIFT; 1724 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 1725 I40E_GL_MDET_TX_QUEUE_SHIFT; 1726 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 1727 mdd_detected = true; 1728 } 1729 1730 if (!mdd_detected) 1731 return; 1732 1733 reg = rd32(hw, I40E_PF_MDET_TX); 1734 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 1735 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 1736 pf_mdet_num = hw->pf_id; 1737 pf_mdd_detected = true; 1738 } 1739 1740 /* Check if MDD was caused by a VF */ 1741 for (int i = 0; i < pf->num_vfs; i++) { 1742 vf = &(pf->vfs[i]); 1743 reg = rd32(hw, I40E_VP_MDET_TX(i)); 1744 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 1745 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 1746 vp_mdet_num = i; 1747 vf->num_mdd_events++; 1748 vf_mdd_detected = true; 1749 } 1750 } 1751 1752 /* Print out an error message */ 1753 if (vf_mdd_detected && pf_mdd_detected) 1754 device_printf(dev, 1755 "Malicious Driver Detection event %d" 1756 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n", 1757 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num); 1758 else if (vf_mdd_detected && !pf_mdd_detected) 1759 device_printf(dev, 1760 "Malicious Driver Detection event %d" 1761 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n", 1762 event, queue, pf_num, vf_num, vp_mdet_num); 1763 else if (!vf_mdd_detected && pf_mdd_detected) 1764 device_printf(dev, 1765 "Malicious Driver Detection event %d" 1766 " on TX queue %d, pf number %d (PF-%d)\n", 1767 event, queue, pf_num, pf_mdet_num); 1768 /* Theoretically shouldn't happen */ 1769 else 1770 device_printf(dev, 1771 "TX Malicious Driver Detection event (unknown)\n"); 1772 } 1773 1774 static void 1775 ixl_handle_rx_mdd_event(struct ixl_pf *pf) 1776 { 1777 struct i40e_hw *hw = &pf->hw; 1778 device_t dev = pf->dev; 1779 struct ixl_vf *vf; 1780 bool mdd_detected = false; 1781 bool pf_mdd_detected = false; 1782 bool vf_mdd_detected = false; 1783 u16 queue; 1784 u8 pf_num, event; 1785 u8 pf_mdet_num, vp_mdet_num; 1786 u32 reg; 1787 1788 /* 1789 * GL_MDET_RX doesn't contain VF number information, unlike 1790 * GL_MDET_TX. 1791 */ 1792 reg = rd32(hw, I40E_GL_MDET_RX); 1793 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 1794 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 1795 I40E_GL_MDET_RX_FUNCTION_SHIFT; 1796 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 1797 I40E_GL_MDET_RX_EVENT_SHIFT; 1798 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 1799 I40E_GL_MDET_RX_QUEUE_SHIFT; 1800 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 1801 mdd_detected = true; 1802 } 1803 1804 if (!mdd_detected) 1805 return; 1806 1807 reg = rd32(hw, I40E_PF_MDET_RX); 1808 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 1809 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 1810 pf_mdet_num = hw->pf_id; 1811 pf_mdd_detected = true; 1812 } 1813 1814 /* Check if MDD was caused by a VF */ 1815 for (int i = 0; i < pf->num_vfs; i++) { 1816 vf = &(pf->vfs[i]); 1817 reg = rd32(hw, I40E_VP_MDET_RX(i)); 1818 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 1819 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 1820 vp_mdet_num = i; 1821 vf->num_mdd_events++; 1822 vf_mdd_detected = true; 1823 } 1824 } 1825 1826 /* Print out an error message */ 1827 if (vf_mdd_detected && pf_mdd_detected) 1828 device_printf(dev, 1829 "Malicious Driver Detection event %d" 1830 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n", 1831 event, queue, pf_num, pf_mdet_num, vp_mdet_num); 1832 else if (vf_mdd_detected && !pf_mdd_detected) 1833 device_printf(dev, 1834 "Malicious Driver Detection event %d" 1835 " on RX queue %d, pf number %d, (VF-%d)\n", 1836 event, queue, pf_num, vp_mdet_num); 1837 else if (!vf_mdd_detected && pf_mdd_detected) 1838 device_printf(dev, 1839 "Malicious Driver Detection event %d" 1840 " on RX queue %d, pf number %d (PF-%d)\n", 1841 event, queue, pf_num, pf_mdet_num); 1842 /* Theoretically shouldn't happen */ 1843 else 1844 device_printf(dev, 1845 "RX Malicious Driver Detection event (unknown)\n"); 1846 } 1847 1848 /** 1849 * ixl_handle_mdd_event 1850 * 1851 * Called from interrupt handler to identify possibly malicious vfs 1852 * (But also detects events from the PF, as well) 1853 **/ 1854 void 1855 ixl_handle_mdd_event(struct ixl_pf *pf) 1856 { 1857 struct i40e_hw *hw = &pf->hw; 1858 u32 reg; 1859 1860 /* 1861 * Handle both TX/RX because it's possible they could 1862 * both trigger in the same interrupt. 1863 */ 1864 ixl_handle_tx_mdd_event(pf); 1865 ixl_handle_rx_mdd_event(pf); 1866 1867 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING); 1868 1869 /* re-enable mdd interrupt cause */ 1870 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1871 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 1872 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1873 ixl_flush(hw); 1874 } 1875 1876 void 1877 ixl_enable_intr0(struct i40e_hw *hw) 1878 { 1879 u32 reg; 1880 1881 /* Use IXL_ITR_NONE so ITR isn't updated here */ 1882 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | 1883 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 1884 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 1885 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1886 } 1887 1888 void 1889 ixl_disable_intr0(struct i40e_hw *hw) 1890 { 1891 u32 reg; 1892 1893 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; 1894 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1895 ixl_flush(hw); 1896 } 1897 1898 void 1899 ixl_enable_queue(struct i40e_hw *hw, int id) 1900 { 1901 u32 reg; 1902 1903 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | 1904 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1905 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 1906 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1907 } 1908 1909 void 1910 ixl_disable_queue(struct i40e_hw *hw, int id) 1911 { 1912 u32 reg; 1913 1914 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1915 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1916 } 1917 1918 void 1919 ixl_handle_empr_reset(struct ixl_pf *pf) 1920 { 1921 struct ixl_vsi *vsi = &pf->vsi; 1922 bool is_up = !!(if_getdrvflags(vsi->ifp) & IFF_DRV_RUNNING); 1923 1924 ixl_prepare_for_reset(pf, is_up); 1925 /* 1926 * i40e_pf_reset checks the type of reset and acts 1927 * accordingly. If EMP or Core reset was performed 1928 * doing PF reset is not necessary and it sometimes 1929 * fails. 1930 */ 1931 ixl_pf_reset(pf); 1932 1933 if (!IXL_PF_IN_RECOVERY_MODE(pf) && 1934 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) { 1935 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 1936 device_printf(pf->dev, 1937 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 1938 pf->link_up = FALSE; 1939 ixl_update_link_status(pf); 1940 } 1941 1942 ixl_rebuild_hw_structs_after_reset(pf, is_up); 1943 1944 atomic_clear_32(&pf->state, IXL_PF_STATE_RESETTING); 1945 } 1946 1947 void 1948 ixl_update_stats_counters(struct ixl_pf *pf) 1949 { 1950 struct i40e_hw *hw = &pf->hw; 1951 struct ixl_vsi *vsi = &pf->vsi; 1952 struct ixl_vf *vf; 1953 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx; 1954 1955 struct i40e_hw_port_stats *nsd = &pf->stats; 1956 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 1957 1958 /* Update hw stats */ 1959 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 1960 pf->stat_offsets_loaded, 1961 &osd->crc_errors, &nsd->crc_errors); 1962 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 1963 pf->stat_offsets_loaded, 1964 &osd->illegal_bytes, &nsd->illegal_bytes); 1965 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 1966 I40E_GLPRT_GORCL(hw->port), 1967 pf->stat_offsets_loaded, 1968 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 1969 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 1970 I40E_GLPRT_GOTCL(hw->port), 1971 pf->stat_offsets_loaded, 1972 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 1973 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 1974 pf->stat_offsets_loaded, 1975 &osd->eth.rx_discards, 1976 &nsd->eth.rx_discards); 1977 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 1978 I40E_GLPRT_UPRCL(hw->port), 1979 pf->stat_offsets_loaded, 1980 &osd->eth.rx_unicast, 1981 &nsd->eth.rx_unicast); 1982 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 1983 I40E_GLPRT_UPTCL(hw->port), 1984 pf->stat_offsets_loaded, 1985 &osd->eth.tx_unicast, 1986 &nsd->eth.tx_unicast); 1987 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 1988 I40E_GLPRT_MPRCL(hw->port), 1989 pf->stat_offsets_loaded, 1990 &osd->eth.rx_multicast, 1991 &nsd->eth.rx_multicast); 1992 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 1993 I40E_GLPRT_MPTCL(hw->port), 1994 pf->stat_offsets_loaded, 1995 &osd->eth.tx_multicast, 1996 &nsd->eth.tx_multicast); 1997 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 1998 I40E_GLPRT_BPRCL(hw->port), 1999 pf->stat_offsets_loaded, 2000 &osd->eth.rx_broadcast, 2001 &nsd->eth.rx_broadcast); 2002 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 2003 I40E_GLPRT_BPTCL(hw->port), 2004 pf->stat_offsets_loaded, 2005 &osd->eth.tx_broadcast, 2006 &nsd->eth.tx_broadcast); 2007 2008 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 2009 pf->stat_offsets_loaded, 2010 &osd->tx_dropped_link_down, 2011 &nsd->tx_dropped_link_down); 2012 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 2013 pf->stat_offsets_loaded, 2014 &osd->mac_local_faults, 2015 &nsd->mac_local_faults); 2016 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 2017 pf->stat_offsets_loaded, 2018 &osd->mac_remote_faults, 2019 &nsd->mac_remote_faults); 2020 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 2021 pf->stat_offsets_loaded, 2022 &osd->rx_length_errors, 2023 &nsd->rx_length_errors); 2024 2025 /* Flow control (LFC) stats */ 2026 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 2027 pf->stat_offsets_loaded, 2028 &osd->link_xon_rx, &nsd->link_xon_rx); 2029 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 2030 pf->stat_offsets_loaded, 2031 &osd->link_xon_tx, &nsd->link_xon_tx); 2032 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 2033 pf->stat_offsets_loaded, 2034 &osd->link_xoff_rx, &nsd->link_xoff_rx); 2035 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 2036 pf->stat_offsets_loaded, 2037 &osd->link_xoff_tx, &nsd->link_xoff_tx); 2038 2039 /* 2040 * For watchdog management we need to know if we have been paused 2041 * during the last interval, so capture that here. 2042 */ 2043 if (pf->stats.link_xoff_rx != prev_link_xoff_rx) 2044 vsi->shared->isc_pause_frames = 1; 2045 2046 /* Packet size stats rx */ 2047 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 2048 I40E_GLPRT_PRC64L(hw->port), 2049 pf->stat_offsets_loaded, 2050 &osd->rx_size_64, &nsd->rx_size_64); 2051 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 2052 I40E_GLPRT_PRC127L(hw->port), 2053 pf->stat_offsets_loaded, 2054 &osd->rx_size_127, &nsd->rx_size_127); 2055 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 2056 I40E_GLPRT_PRC255L(hw->port), 2057 pf->stat_offsets_loaded, 2058 &osd->rx_size_255, &nsd->rx_size_255); 2059 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 2060 I40E_GLPRT_PRC511L(hw->port), 2061 pf->stat_offsets_loaded, 2062 &osd->rx_size_511, &nsd->rx_size_511); 2063 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 2064 I40E_GLPRT_PRC1023L(hw->port), 2065 pf->stat_offsets_loaded, 2066 &osd->rx_size_1023, &nsd->rx_size_1023); 2067 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 2068 I40E_GLPRT_PRC1522L(hw->port), 2069 pf->stat_offsets_loaded, 2070 &osd->rx_size_1522, &nsd->rx_size_1522); 2071 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 2072 I40E_GLPRT_PRC9522L(hw->port), 2073 pf->stat_offsets_loaded, 2074 &osd->rx_size_big, &nsd->rx_size_big); 2075 2076 /* Packet size stats tx */ 2077 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 2078 I40E_GLPRT_PTC64L(hw->port), 2079 pf->stat_offsets_loaded, 2080 &osd->tx_size_64, &nsd->tx_size_64); 2081 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 2082 I40E_GLPRT_PTC127L(hw->port), 2083 pf->stat_offsets_loaded, 2084 &osd->tx_size_127, &nsd->tx_size_127); 2085 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 2086 I40E_GLPRT_PTC255L(hw->port), 2087 pf->stat_offsets_loaded, 2088 &osd->tx_size_255, &nsd->tx_size_255); 2089 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 2090 I40E_GLPRT_PTC511L(hw->port), 2091 pf->stat_offsets_loaded, 2092 &osd->tx_size_511, &nsd->tx_size_511); 2093 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 2094 I40E_GLPRT_PTC1023L(hw->port), 2095 pf->stat_offsets_loaded, 2096 &osd->tx_size_1023, &nsd->tx_size_1023); 2097 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 2098 I40E_GLPRT_PTC1522L(hw->port), 2099 pf->stat_offsets_loaded, 2100 &osd->tx_size_1522, &nsd->tx_size_1522); 2101 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 2102 I40E_GLPRT_PTC9522L(hw->port), 2103 pf->stat_offsets_loaded, 2104 &osd->tx_size_big, &nsd->tx_size_big); 2105 2106 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 2107 pf->stat_offsets_loaded, 2108 &osd->rx_undersize, &nsd->rx_undersize); 2109 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 2110 pf->stat_offsets_loaded, 2111 &osd->rx_fragments, &nsd->rx_fragments); 2112 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 2113 pf->stat_offsets_loaded, 2114 &osd->rx_oversize, &nsd->rx_oversize); 2115 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 2116 pf->stat_offsets_loaded, 2117 &osd->rx_jabber, &nsd->rx_jabber); 2118 /* EEE */ 2119 i40e_get_phy_lpi_status(hw, nsd); 2120 2121 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded, 2122 &osd->tx_lpi_count, &nsd->tx_lpi_count, 2123 &osd->rx_lpi_count, &nsd->rx_lpi_count); 2124 2125 pf->stat_offsets_loaded = true; 2126 /* End hw stats */ 2127 2128 /* Update vsi stats */ 2129 ixl_update_vsi_stats(vsi); 2130 2131 for (int i = 0; i < pf->num_vfs; i++) { 2132 vf = &pf->vfs[i]; 2133 if (vf->vf_flags & VF_FLAG_ENABLED) 2134 ixl_update_eth_stats(&pf->vfs[i].vsi); 2135 } 2136 } 2137 2138 /** 2139 * Update VSI-specific ethernet statistics counters. 2140 **/ 2141 void 2142 ixl_update_eth_stats(struct ixl_vsi *vsi) 2143 { 2144 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 2145 struct i40e_hw *hw = &pf->hw; 2146 struct i40e_eth_stats *es; 2147 struct i40e_eth_stats *oes; 2148 u16 stat_idx = vsi->info.stat_counter_idx; 2149 2150 es = &vsi->eth_stats; 2151 oes = &vsi->eth_stats_offsets; 2152 2153 /* Gather up the stats that the hw collects */ 2154 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 2155 vsi->stat_offsets_loaded, 2156 &oes->tx_errors, &es->tx_errors); 2157 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 2158 vsi->stat_offsets_loaded, 2159 &oes->rx_discards, &es->rx_discards); 2160 2161 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 2162 I40E_GLV_GORCL(stat_idx), 2163 vsi->stat_offsets_loaded, 2164 &oes->rx_bytes, &es->rx_bytes); 2165 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 2166 I40E_GLV_UPRCL(stat_idx), 2167 vsi->stat_offsets_loaded, 2168 &oes->rx_unicast, &es->rx_unicast); 2169 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 2170 I40E_GLV_MPRCL(stat_idx), 2171 vsi->stat_offsets_loaded, 2172 &oes->rx_multicast, &es->rx_multicast); 2173 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 2174 I40E_GLV_BPRCL(stat_idx), 2175 vsi->stat_offsets_loaded, 2176 &oes->rx_broadcast, &es->rx_broadcast); 2177 2178 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 2179 I40E_GLV_GOTCL(stat_idx), 2180 vsi->stat_offsets_loaded, 2181 &oes->tx_bytes, &es->tx_bytes); 2182 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 2183 I40E_GLV_UPTCL(stat_idx), 2184 vsi->stat_offsets_loaded, 2185 &oes->tx_unicast, &es->tx_unicast); 2186 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 2187 I40E_GLV_MPTCL(stat_idx), 2188 vsi->stat_offsets_loaded, 2189 &oes->tx_multicast, &es->tx_multicast); 2190 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 2191 I40E_GLV_BPTCL(stat_idx), 2192 vsi->stat_offsets_loaded, 2193 &oes->tx_broadcast, &es->tx_broadcast); 2194 vsi->stat_offsets_loaded = true; 2195 } 2196 2197 void 2198 ixl_update_vsi_stats(struct ixl_vsi *vsi) 2199 { 2200 struct ixl_pf *pf; 2201 struct i40e_eth_stats *es; 2202 u64 tx_discards, csum_errs; 2203 2204 struct i40e_hw_port_stats *nsd; 2205 2206 pf = vsi->back; 2207 es = &vsi->eth_stats; 2208 nsd = &pf->stats; 2209 2210 ixl_update_eth_stats(vsi); 2211 2212 tx_discards = es->tx_discards + nsd->tx_dropped_link_down; 2213 2214 csum_errs = 0; 2215 for (int i = 0; i < vsi->num_rx_queues; i++) 2216 csum_errs += vsi->rx_queues[i].rxr.csum_errs; 2217 nsd->checksum_error = csum_errs; 2218 2219 /* Update ifnet stats */ 2220 IXL_SET_IPACKETS(vsi, es->rx_unicast + 2221 es->rx_multicast + 2222 es->rx_broadcast); 2223 IXL_SET_OPACKETS(vsi, es->tx_unicast + 2224 es->tx_multicast + 2225 es->tx_broadcast); 2226 IXL_SET_IBYTES(vsi, es->rx_bytes); 2227 IXL_SET_OBYTES(vsi, es->tx_bytes); 2228 IXL_SET_IMCASTS(vsi, es->rx_multicast); 2229 IXL_SET_OMCASTS(vsi, es->tx_multicast); 2230 2231 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + 2232 nsd->checksum_error + nsd->rx_length_errors + 2233 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize + 2234 nsd->rx_jabber); 2235 IXL_SET_OERRORS(vsi, es->tx_errors); 2236 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); 2237 IXL_SET_OQDROPS(vsi, tx_discards); 2238 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); 2239 IXL_SET_COLLISIONS(vsi, 0); 2240 } 2241 2242 /** 2243 * Reset all of the stats for the given pf 2244 **/ 2245 void 2246 ixl_pf_reset_stats(struct ixl_pf *pf) 2247 { 2248 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); 2249 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); 2250 pf->stat_offsets_loaded = false; 2251 } 2252 2253 /** 2254 * Resets all stats of the given vsi 2255 **/ 2256 void 2257 ixl_vsi_reset_stats(struct ixl_vsi *vsi) 2258 { 2259 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); 2260 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); 2261 vsi->stat_offsets_loaded = false; 2262 } 2263 2264 /** 2265 * Read and update a 48 bit stat from the hw 2266 * 2267 * Since the device stats are not reset at PFReset, they likely will not 2268 * be zeroed when the driver starts. We'll save the first values read 2269 * and use them as offsets to be subtracted from the raw values in order 2270 * to report stats that count from zero. 2271 **/ 2272 void 2273 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 2274 bool offset_loaded, u64 *offset, u64 *stat) 2275 { 2276 u64 new_data; 2277 2278 new_data = rd64(hw, loreg); 2279 2280 if (!offset_loaded) 2281 *offset = new_data; 2282 if (new_data >= *offset) 2283 *stat = new_data - *offset; 2284 else 2285 *stat = (new_data + ((u64)1 << 48)) - *offset; 2286 *stat &= 0xFFFFFFFFFFFFULL; 2287 } 2288 2289 /** 2290 * Read and update a 32 bit stat from the hw 2291 **/ 2292 void 2293 ixl_stat_update32(struct i40e_hw *hw, u32 reg, 2294 bool offset_loaded, u64 *offset, u64 *stat) 2295 { 2296 u32 new_data; 2297 2298 new_data = rd32(hw, reg); 2299 if (!offset_loaded) 2300 *offset = new_data; 2301 if (new_data >= *offset) 2302 *stat = (u32)(new_data - *offset); 2303 else 2304 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 2305 } 2306 2307 /** 2308 * Add subset of device sysctls safe to use in recovery mode 2309 */ 2310 void 2311 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf) 2312 { 2313 device_t dev = pf->dev; 2314 2315 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2316 struct sysctl_oid_list *ctx_list = 2317 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2318 2319 struct sysctl_oid *debug_node; 2320 struct sysctl_oid_list *debug_list; 2321 2322 SYSCTL_ADD_PROC(ctx, ctx_list, 2323 OID_AUTO, "fw_version", 2324 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2325 ixl_sysctl_show_fw, "A", "Firmware version"); 2326 2327 /* Add sysctls meant to print debug information, but don't list them 2328 * in "sysctl -a" output. */ 2329 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2330 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2331 "Debug Sysctls"); 2332 debug_list = SYSCTL_CHILDREN(debug_node); 2333 2334 SYSCTL_ADD_UINT(ctx, debug_list, 2335 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2336 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2337 2338 SYSCTL_ADD_UINT(ctx, debug_list, 2339 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2340 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2341 2342 SYSCTL_ADD_PROC(ctx, debug_list, 2343 OID_AUTO, "dump_debug_data", 2344 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2345 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2346 2347 SYSCTL_ADD_PROC(ctx, debug_list, 2348 OID_AUTO, "do_pf_reset", 2349 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2350 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2351 2352 SYSCTL_ADD_PROC(ctx, debug_list, 2353 OID_AUTO, "do_core_reset", 2354 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2355 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2356 2357 SYSCTL_ADD_PROC(ctx, debug_list, 2358 OID_AUTO, "do_global_reset", 2359 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2360 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2361 2362 SYSCTL_ADD_PROC(ctx, debug_list, 2363 OID_AUTO, "queue_interrupt_table", 2364 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2365 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2366 } 2367 2368 void 2369 ixl_add_device_sysctls(struct ixl_pf *pf) 2370 { 2371 device_t dev = pf->dev; 2372 struct i40e_hw *hw = &pf->hw; 2373 2374 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2375 struct sysctl_oid_list *ctx_list = 2376 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2377 2378 struct sysctl_oid *debug_node; 2379 struct sysctl_oid_list *debug_list; 2380 2381 struct sysctl_oid *fec_node; 2382 struct sysctl_oid_list *fec_list; 2383 struct sysctl_oid *eee_node; 2384 struct sysctl_oid_list *eee_list; 2385 2386 /* Set up sysctls */ 2387 SYSCTL_ADD_PROC(ctx, ctx_list, 2388 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2389 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); 2390 2391 SYSCTL_ADD_PROC(ctx, ctx_list, 2392 OID_AUTO, "advertise_speed", 2393 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2394 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); 2395 2396 SYSCTL_ADD_PROC(ctx, ctx_list, 2397 OID_AUTO, "supported_speeds", 2398 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2399 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); 2400 2401 SYSCTL_ADD_PROC(ctx, ctx_list, 2402 OID_AUTO, "current_speed", 2403 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2404 ixl_sysctl_current_speed, "A", "Current Port Speed"); 2405 2406 SYSCTL_ADD_PROC(ctx, ctx_list, 2407 OID_AUTO, "fw_version", 2408 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2409 ixl_sysctl_show_fw, "A", "Firmware version"); 2410 2411 SYSCTL_ADD_PROC(ctx, ctx_list, 2412 OID_AUTO, "unallocated_queues", 2413 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2414 ixl_sysctl_unallocated_queues, "I", 2415 "Queues not allocated to a PF or VF"); 2416 2417 SYSCTL_ADD_PROC(ctx, ctx_list, 2418 OID_AUTO, "tx_itr", 2419 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2420 ixl_sysctl_pf_tx_itr, "I", 2421 "Immediately set TX ITR value for all queues"); 2422 2423 SYSCTL_ADD_PROC(ctx, ctx_list, 2424 OID_AUTO, "rx_itr", 2425 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2426 ixl_sysctl_pf_rx_itr, "I", 2427 "Immediately set RX ITR value for all queues"); 2428 2429 SYSCTL_ADD_INT(ctx, ctx_list, 2430 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, 2431 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); 2432 2433 SYSCTL_ADD_INT(ctx, ctx_list, 2434 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, 2435 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); 2436 2437 /* Add FEC sysctls for 25G adapters */ 2438 if (i40e_is_25G_device(hw->device_id)) { 2439 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2440 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2441 "FEC Sysctls"); 2442 fec_list = SYSCTL_CHILDREN(fec_node); 2443 2444 SYSCTL_ADD_PROC(ctx, fec_list, 2445 OID_AUTO, "fc_ability", 2446 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2447 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); 2448 2449 SYSCTL_ADD_PROC(ctx, fec_list, 2450 OID_AUTO, "rs_ability", 2451 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2452 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); 2453 2454 SYSCTL_ADD_PROC(ctx, fec_list, 2455 OID_AUTO, "fc_requested", 2456 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2457 ixl_sysctl_fec_fc_request, "I", 2458 "FC FEC mode requested on link"); 2459 2460 SYSCTL_ADD_PROC(ctx, fec_list, 2461 OID_AUTO, "rs_requested", 2462 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2463 ixl_sysctl_fec_rs_request, "I", 2464 "RS FEC mode requested on link"); 2465 2466 SYSCTL_ADD_PROC(ctx, fec_list, 2467 OID_AUTO, "auto_fec_enabled", 2468 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2469 ixl_sysctl_fec_auto_enable, "I", 2470 "Let FW decide FEC ability/request modes"); 2471 } 2472 2473 SYSCTL_ADD_PROC(ctx, ctx_list, 2474 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2475 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); 2476 2477 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2478 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2479 "Energy Efficient Ethernet (EEE) Sysctls"); 2480 eee_list = SYSCTL_CHILDREN(eee_node); 2481 2482 SYSCTL_ADD_PROC(ctx, eee_list, 2483 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 2484 pf, 0, ixl_sysctl_eee_enable, "I", 2485 "Enable Energy Efficient Ethernet (EEE)"); 2486 2487 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status", 2488 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0, 2489 "TX LPI status"); 2490 2491 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status", 2492 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0, 2493 "RX LPI status"); 2494 2495 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count", 2496 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count, 2497 "TX LPI count"); 2498 2499 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count", 2500 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count, 2501 "RX LPI count"); 2502 2503 SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, 2504 "link_active_on_if_down", 2505 CTLTYPE_INT | CTLFLAG_RWTUN, 2506 pf, 0, ixl_sysctl_set_link_active, "I", 2507 IXL_SYSCTL_HELP_SET_LINK_ACTIVE); 2508 2509 /* Add sysctls meant to print debug information, but don't list them 2510 * in "sysctl -a" output. */ 2511 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2512 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2513 "Debug Sysctls"); 2514 debug_list = SYSCTL_CHILDREN(debug_node); 2515 2516 SYSCTL_ADD_UINT(ctx, debug_list, 2517 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2518 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2519 2520 SYSCTL_ADD_UINT(ctx, debug_list, 2521 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2522 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2523 2524 SYSCTL_ADD_PROC(ctx, debug_list, 2525 OID_AUTO, "link_status", 2526 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2527 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); 2528 2529 SYSCTL_ADD_PROC(ctx, debug_list, 2530 OID_AUTO, "phy_abilities_init", 2531 CTLTYPE_STRING | CTLFLAG_RD, 2532 pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities"); 2533 2534 SYSCTL_ADD_PROC(ctx, debug_list, 2535 OID_AUTO, "phy_abilities", 2536 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2537 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); 2538 2539 SYSCTL_ADD_PROC(ctx, debug_list, 2540 OID_AUTO, "filter_list", 2541 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2542 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); 2543 2544 SYSCTL_ADD_PROC(ctx, debug_list, 2545 OID_AUTO, "hw_res_alloc", 2546 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2547 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); 2548 2549 SYSCTL_ADD_PROC(ctx, debug_list, 2550 OID_AUTO, "switch_config", 2551 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2552 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); 2553 2554 SYSCTL_ADD_PROC(ctx, debug_list, 2555 OID_AUTO, "switch_vlans", 2556 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2557 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration"); 2558 2559 SYSCTL_ADD_PROC(ctx, debug_list, 2560 OID_AUTO, "rss_key", 2561 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2562 pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); 2563 2564 SYSCTL_ADD_PROC(ctx, debug_list, 2565 OID_AUTO, "rss_lut", 2566 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2567 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); 2568 2569 SYSCTL_ADD_PROC(ctx, debug_list, 2570 OID_AUTO, "rss_hena", 2571 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2572 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); 2573 2574 SYSCTL_ADD_PROC(ctx, debug_list, 2575 OID_AUTO, "disable_fw_link_management", 2576 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2577 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); 2578 2579 SYSCTL_ADD_PROC(ctx, debug_list, 2580 OID_AUTO, "dump_debug_data", 2581 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2582 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2583 2584 SYSCTL_ADD_PROC(ctx, debug_list, 2585 OID_AUTO, "do_pf_reset", 2586 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2587 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2588 2589 SYSCTL_ADD_PROC(ctx, debug_list, 2590 OID_AUTO, "do_core_reset", 2591 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2592 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2593 2594 SYSCTL_ADD_PROC(ctx, debug_list, 2595 OID_AUTO, "do_global_reset", 2596 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2597 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2598 2599 SYSCTL_ADD_PROC(ctx, debug_list, 2600 OID_AUTO, "queue_interrupt_table", 2601 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2602 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2603 2604 if (pf->has_i2c) { 2605 SYSCTL_ADD_PROC(ctx, debug_list, 2606 OID_AUTO, "read_i2c_byte", 2607 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2608 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C); 2609 2610 SYSCTL_ADD_PROC(ctx, debug_list, 2611 OID_AUTO, "write_i2c_byte", 2612 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2613 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C); 2614 2615 SYSCTL_ADD_PROC(ctx, debug_list, 2616 OID_AUTO, "read_i2c_diag_data", 2617 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2618 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); 2619 } 2620 } 2621 2622 /* 2623 * Primarily for finding out how many queues can be assigned to VFs, 2624 * at runtime. 2625 */ 2626 static int 2627 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) 2628 { 2629 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2630 int queues; 2631 2632 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); 2633 2634 return sysctl_handle_int(oidp, NULL, queues, req); 2635 } 2636 2637 static const char * 2638 ixl_link_speed_string(enum i40e_aq_link_speed link_speed) 2639 { 2640 const char * link_speed_str[] = { 2641 "Unknown", 2642 "100 Mbps", 2643 "1 Gbps", 2644 "10 Gbps", 2645 "40 Gbps", 2646 "20 Gbps", 2647 "25 Gbps", 2648 "2.5 Gbps", 2649 "5 Gbps" 2650 }; 2651 int index; 2652 2653 switch (link_speed) { 2654 case I40E_LINK_SPEED_100MB: 2655 index = 1; 2656 break; 2657 case I40E_LINK_SPEED_1GB: 2658 index = 2; 2659 break; 2660 case I40E_LINK_SPEED_10GB: 2661 index = 3; 2662 break; 2663 case I40E_LINK_SPEED_40GB: 2664 index = 4; 2665 break; 2666 case I40E_LINK_SPEED_20GB: 2667 index = 5; 2668 break; 2669 case I40E_LINK_SPEED_25GB: 2670 index = 6; 2671 break; 2672 case I40E_LINK_SPEED_2_5GB: 2673 index = 7; 2674 break; 2675 case I40E_LINK_SPEED_5GB: 2676 index = 8; 2677 break; 2678 case I40E_LINK_SPEED_UNKNOWN: 2679 default: 2680 index = 0; 2681 break; 2682 } 2683 2684 return (link_speed_str[index]); 2685 } 2686 2687 int 2688 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2689 { 2690 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2691 struct i40e_hw *hw = &pf->hw; 2692 int error = 0; 2693 2694 ixl_update_link_status(pf); 2695 2696 error = sysctl_handle_string(oidp, 2697 __DECONST(void *, 2698 ixl_link_speed_string(hw->phy.link_info.link_speed)), 2699 8, req); 2700 2701 return (error); 2702 } 2703 2704 /* 2705 * Converts 8-bit speeds value to and from sysctl flags and 2706 * Admin Queue flags. 2707 */ 2708 static u8 2709 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) 2710 { 2711 #define SPEED_MAP_SIZE 8 2712 static u16 speedmap[SPEED_MAP_SIZE] = { 2713 (I40E_LINK_SPEED_100MB | (0x1 << 8)), 2714 (I40E_LINK_SPEED_1GB | (0x2 << 8)), 2715 (I40E_LINK_SPEED_10GB | (0x4 << 8)), 2716 (I40E_LINK_SPEED_20GB | (0x8 << 8)), 2717 (I40E_LINK_SPEED_25GB | (0x10 << 8)), 2718 (I40E_LINK_SPEED_40GB | (0x20 << 8)), 2719 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)), 2720 (I40E_LINK_SPEED_5GB | (0x80 << 8)), 2721 }; 2722 u8 retval = 0; 2723 2724 for (int i = 0; i < SPEED_MAP_SIZE; i++) { 2725 if (to_aq) 2726 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; 2727 else 2728 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; 2729 } 2730 2731 return (retval); 2732 } 2733 2734 int 2735 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) 2736 { 2737 struct i40e_hw *hw = &pf->hw; 2738 device_t dev = pf->dev; 2739 struct i40e_aq_get_phy_abilities_resp abilities; 2740 struct i40e_aq_set_phy_config config; 2741 enum i40e_status_code aq_error = 0; 2742 2743 /* Get current capability information */ 2744 aq_error = i40e_aq_get_phy_capabilities(hw, 2745 FALSE, FALSE, &abilities, NULL); 2746 if (aq_error) { 2747 device_printf(dev, 2748 "%s: Error getting phy capabilities %d," 2749 " aq error: %d\n", __func__, aq_error, 2750 hw->aq.asq_last_status); 2751 return (EIO); 2752 } 2753 2754 /* Prepare new config */ 2755 bzero(&config, sizeof(config)); 2756 if (from_aq) 2757 config.link_speed = speeds; 2758 else 2759 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); 2760 config.phy_type = abilities.phy_type; 2761 config.phy_type_ext = abilities.phy_type_ext; 2762 config.abilities = abilities.abilities 2763 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 2764 config.eee_capability = abilities.eee_capability; 2765 config.eeer = abilities.eeer_val; 2766 config.low_power_ctrl = abilities.d3_lpan; 2767 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 2768 & I40E_AQ_PHY_FEC_CONFIG_MASK; 2769 2770 /* Do aq command & restart link */ 2771 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 2772 if (aq_error) { 2773 device_printf(dev, 2774 "%s: Error setting new phy config %d," 2775 " aq error: %d\n", __func__, aq_error, 2776 hw->aq.asq_last_status); 2777 return (EIO); 2778 } 2779 2780 return (0); 2781 } 2782 2783 /* 2784 ** Supported link speeds 2785 ** Flags: 2786 ** 0x1 - 100 Mb 2787 ** 0x2 - 1G 2788 ** 0x4 - 10G 2789 ** 0x8 - 20G 2790 ** 0x10 - 25G 2791 ** 0x20 - 40G 2792 ** 0x40 - 2.5G 2793 ** 0x80 - 5G 2794 */ 2795 static int 2796 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) 2797 { 2798 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2799 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 2800 2801 return sysctl_handle_int(oidp, NULL, supported, req); 2802 } 2803 2804 /* 2805 ** Control link advertise speed: 2806 ** Flags: 2807 ** 0x1 - advertise 100 Mb 2808 ** 0x2 - advertise 1G 2809 ** 0x4 - advertise 10G 2810 ** 0x8 - advertise 20G 2811 ** 0x10 - advertise 25G 2812 ** 0x20 - advertise 40G 2813 ** 0x40 - advertise 2.5G 2814 ** 0x80 - advertise 5G 2815 ** 2816 ** Set to 0 to disable link 2817 */ 2818 int 2819 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) 2820 { 2821 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2822 device_t dev = pf->dev; 2823 u8 converted_speeds; 2824 int requested_ls = 0; 2825 int error = 0; 2826 2827 /* Read in new mode */ 2828 requested_ls = pf->advertised_speed; 2829 error = sysctl_handle_int(oidp, &requested_ls, 0, req); 2830 if ((error) || (req->newptr == NULL)) 2831 return (error); 2832 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 2833 device_printf(dev, "Interface is currently in FW recovery mode. " 2834 "Setting advertise speed not supported\n"); 2835 return (EINVAL); 2836 } 2837 2838 /* Error out if bits outside of possible flag range are set */ 2839 if ((requested_ls & ~((u8)0xFF)) != 0) { 2840 device_printf(dev, "Input advertised speed out of range; " 2841 "valid flags are: 0x%02x\n", 2842 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2843 return (EINVAL); 2844 } 2845 2846 /* Check if adapter supports input value */ 2847 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); 2848 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { 2849 device_printf(dev, "Invalid advertised speed; " 2850 "valid flags are: 0x%02x\n", 2851 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2852 return (EINVAL); 2853 } 2854 2855 error = ixl_set_advertised_speeds(pf, requested_ls, false); 2856 if (error) 2857 return (error); 2858 2859 pf->advertised_speed = requested_ls; 2860 ixl_update_link_status(pf); 2861 return (0); 2862 } 2863 2864 /* 2865 * Input: bitmap of enum i40e_aq_link_speed 2866 */ 2867 u64 2868 ixl_max_aq_speed_to_value(u8 link_speeds) 2869 { 2870 if (link_speeds & I40E_LINK_SPEED_40GB) 2871 return IF_Gbps(40); 2872 if (link_speeds & I40E_LINK_SPEED_25GB) 2873 return IF_Gbps(25); 2874 if (link_speeds & I40E_LINK_SPEED_20GB) 2875 return IF_Gbps(20); 2876 if (link_speeds & I40E_LINK_SPEED_10GB) 2877 return IF_Gbps(10); 2878 if (link_speeds & I40E_LINK_SPEED_5GB) 2879 return IF_Gbps(5); 2880 if (link_speeds & I40E_LINK_SPEED_2_5GB) 2881 return IF_Mbps(2500); 2882 if (link_speeds & I40E_LINK_SPEED_1GB) 2883 return IF_Gbps(1); 2884 if (link_speeds & I40E_LINK_SPEED_100MB) 2885 return IF_Mbps(100); 2886 else 2887 /* Minimum supported link speed */ 2888 return IF_Mbps(100); 2889 } 2890 2891 /* 2892 ** Get the width and transaction speed of 2893 ** the bus this adapter is plugged into. 2894 */ 2895 void 2896 ixl_get_bus_info(struct ixl_pf *pf) 2897 { 2898 struct i40e_hw *hw = &pf->hw; 2899 device_t dev = pf->dev; 2900 u16 link; 2901 u32 offset, num_ports; 2902 u64 max_speed; 2903 2904 /* Some devices don't use PCIE */ 2905 if (hw->mac.type == I40E_MAC_X722) 2906 return; 2907 2908 /* Read PCI Express Capabilities Link Status Register */ 2909 pci_find_cap(dev, PCIY_EXPRESS, &offset); 2910 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2911 2912 /* Fill out hw struct with PCIE info */ 2913 i40e_set_pci_config_data(hw, link); 2914 2915 /* Use info to print out bandwidth messages */ 2916 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 2917 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": 2918 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": 2919 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), 2920 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : 2921 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : 2922 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : 2923 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : 2924 ("Unknown")); 2925 2926 /* 2927 * If adapter is in slot with maximum supported speed, 2928 * no warning message needs to be printed out. 2929 */ 2930 if (hw->bus.speed >= i40e_bus_speed_8000 2931 && hw->bus.width >= i40e_bus_width_pcie_x8) 2932 return; 2933 2934 num_ports = bitcount32(hw->func_caps.valid_functions); 2935 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; 2936 2937 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { 2938 device_printf(dev, "PCI-Express bandwidth available" 2939 " for this device may be insufficient for" 2940 " optimal performance.\n"); 2941 device_printf(dev, "Please move the device to a different" 2942 " PCI-e link with more lanes and/or higher" 2943 " transfer rate.\n"); 2944 } 2945 } 2946 2947 static int 2948 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 2949 { 2950 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2951 struct i40e_hw *hw = &pf->hw; 2952 struct sbuf *sbuf; 2953 2954 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2955 ixl_nvm_version_str(hw, sbuf); 2956 sbuf_finish(sbuf); 2957 sbuf_delete(sbuf); 2958 2959 return (0); 2960 } 2961 2962 void 2963 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) 2964 { 2965 u8 nvma_ptr = nvma->config & 0xFF; 2966 u8 nvma_flags = (nvma->config & 0xF00) >> 8; 2967 const char * cmd_str; 2968 2969 switch (nvma->command) { 2970 case I40E_NVM_READ: 2971 if (nvma_ptr == 0xF && nvma_flags == 0xF && 2972 nvma->offset == 0 && nvma->data_size == 1) { 2973 device_printf(dev, "NVMUPD: Get Driver Status Command\n"); 2974 return; 2975 } 2976 cmd_str = "READ "; 2977 break; 2978 case I40E_NVM_WRITE: 2979 cmd_str = "WRITE"; 2980 break; 2981 default: 2982 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command); 2983 return; 2984 } 2985 device_printf(dev, 2986 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n", 2987 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size); 2988 } 2989 2990 int 2991 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) 2992 { 2993 struct i40e_hw *hw = &pf->hw; 2994 struct i40e_nvm_access *nvma; 2995 device_t dev = pf->dev; 2996 enum i40e_status_code status = 0; 2997 size_t nvma_size, ifd_len, exp_len; 2998 int err, perrno; 2999 3000 DEBUGFUNC("ixl_handle_nvmupd_cmd"); 3001 3002 /* Sanity checks */ 3003 nvma_size = sizeof(struct i40e_nvm_access); 3004 ifd_len = ifd->ifd_len; 3005 3006 if (ifd_len < nvma_size || 3007 ifd->ifd_data == NULL) { 3008 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", 3009 __func__); 3010 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", 3011 __func__, ifd_len, nvma_size); 3012 device_printf(dev, "%s: data pointer: %p\n", __func__, 3013 ifd->ifd_data); 3014 return (EINVAL); 3015 } 3016 3017 nvma = malloc(ifd_len, M_IXL, M_WAITOK); 3018 err = copyin(ifd->ifd_data, nvma, ifd_len); 3019 if (err) { 3020 device_printf(dev, "%s: Cannot get request from user space\n", 3021 __func__); 3022 free(nvma, M_IXL); 3023 return (err); 3024 } 3025 3026 if (pf->dbg_mask & IXL_DBG_NVMUPD) 3027 ixl_print_nvm_cmd(dev, nvma); 3028 3029 if (IXL_PF_IS_RESETTING(pf)) { 3030 int count = 0; 3031 while (count++ < 100) { 3032 i40e_msec_delay(100); 3033 if (!(IXL_PF_IS_RESETTING(pf))) 3034 break; 3035 } 3036 } 3037 3038 if (IXL_PF_IS_RESETTING(pf)) { 3039 device_printf(dev, 3040 "%s: timeout waiting for EMP reset to finish\n", 3041 __func__); 3042 free(nvma, M_IXL); 3043 return (-EBUSY); 3044 } 3045 3046 if (nvma->data_size < 1 || nvma->data_size > 4096) { 3047 device_printf(dev, 3048 "%s: invalid request, data size not in supported range\n", 3049 __func__); 3050 free(nvma, M_IXL); 3051 return (EINVAL); 3052 } 3053 3054 /* 3055 * Older versions of the NVM update tool don't set ifd_len to the size 3056 * of the entire buffer passed to the ioctl. Check the data_size field 3057 * in the contained i40e_nvm_access struct and ensure everything is 3058 * copied in from userspace. 3059 */ 3060 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */ 3061 3062 if (ifd_len < exp_len) { 3063 ifd_len = exp_len; 3064 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK); 3065 err = copyin(ifd->ifd_data, nvma, ifd_len); 3066 if (err) { 3067 device_printf(dev, "%s: Cannot get request from user space\n", 3068 __func__); 3069 free(nvma, M_IXL); 3070 return (err); 3071 } 3072 } 3073 3074 // TODO: Might need a different lock here 3075 // IXL_PF_LOCK(pf); 3076 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); 3077 // IXL_PF_UNLOCK(pf); 3078 3079 err = copyout(nvma, ifd->ifd_data, ifd_len); 3080 free(nvma, M_IXL); 3081 if (err) { 3082 device_printf(dev, "%s: Cannot return data to user space\n", 3083 __func__); 3084 return (err); 3085 } 3086 3087 /* Let the nvmupdate report errors, show them only when debug is enabled */ 3088 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) 3089 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", 3090 i40e_stat_str(hw, status), perrno); 3091 3092 /* 3093 * -EPERM is actually ERESTART, which the kernel interprets as it needing 3094 * to run this ioctl again. So use -EACCES for -EPERM instead. 3095 */ 3096 if (perrno == -EPERM) 3097 return (-EACCES); 3098 else 3099 return (perrno); 3100 } 3101 3102 int 3103 ixl_find_i2c_interface(struct ixl_pf *pf) 3104 { 3105 struct i40e_hw *hw = &pf->hw; 3106 bool i2c_en, port_matched; 3107 u32 reg; 3108 3109 for (int i = 0; i < 4; i++) { 3110 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); 3111 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); 3112 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) 3113 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) 3114 & BIT(hw->port); 3115 if (i2c_en && port_matched) 3116 return (i); 3117 } 3118 3119 return (-1); 3120 } 3121 3122 void 3123 ixl_set_link(struct ixl_pf *pf, bool enable) 3124 { 3125 struct i40e_hw *hw = &pf->hw; 3126 device_t dev = pf->dev; 3127 struct i40e_aq_get_phy_abilities_resp abilities; 3128 struct i40e_aq_set_phy_config config; 3129 enum i40e_status_code aq_error = 0; 3130 u32 phy_type, phy_type_ext; 3131 3132 /* Get initial capability information */ 3133 aq_error = i40e_aq_get_phy_capabilities(hw, 3134 FALSE, TRUE, &abilities, NULL); 3135 if (aq_error) { 3136 device_printf(dev, 3137 "%s: Error getting phy capabilities %d," 3138 " aq error: %d\n", __func__, aq_error, 3139 hw->aq.asq_last_status); 3140 return; 3141 } 3142 3143 phy_type = abilities.phy_type; 3144 phy_type_ext = abilities.phy_type_ext; 3145 3146 /* Get current capability information */ 3147 aq_error = i40e_aq_get_phy_capabilities(hw, 3148 FALSE, FALSE, &abilities, NULL); 3149 if (aq_error) { 3150 device_printf(dev, 3151 "%s: Error getting phy capabilities %d," 3152 " aq error: %d\n", __func__, aq_error, 3153 hw->aq.asq_last_status); 3154 return; 3155 } 3156 3157 /* Prepare new config */ 3158 memset(&config, 0, sizeof(config)); 3159 config.link_speed = abilities.link_speed; 3160 config.abilities = abilities.abilities; 3161 config.eee_capability = abilities.eee_capability; 3162 config.eeer = abilities.eeer_val; 3163 config.low_power_ctrl = abilities.d3_lpan; 3164 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 3165 & I40E_AQ_PHY_FEC_CONFIG_MASK; 3166 config.phy_type = 0; 3167 config.phy_type_ext = 0; 3168 3169 config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX | 3170 I40E_AQ_PHY_FLAG_PAUSE_RX); 3171 3172 switch (pf->fc) { 3173 case I40E_FC_FULL: 3174 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX | 3175 I40E_AQ_PHY_FLAG_PAUSE_RX; 3176 break; 3177 case I40E_FC_RX_PAUSE: 3178 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX; 3179 break; 3180 case I40E_FC_TX_PAUSE: 3181 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX; 3182 break; 3183 default: 3184 break; 3185 } 3186 3187 if (enable) { 3188 config.phy_type = phy_type; 3189 config.phy_type_ext = phy_type_ext; 3190 3191 } 3192 3193 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 3194 if (aq_error) { 3195 device_printf(dev, 3196 "%s: Error setting new phy config %d," 3197 " aq error: %d\n", __func__, aq_error, 3198 hw->aq.asq_last_status); 3199 return; 3200 } 3201 3202 aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL); 3203 if (aq_error) { 3204 device_printf(dev, 3205 "%s: Error set link config %d," 3206 " aq error: %d\n", __func__, aq_error, 3207 hw->aq.asq_last_status); 3208 return; 3209 } 3210 } 3211 3212 static char * 3213 ixl_phy_type_string(u32 bit_pos, bool ext) 3214 { 3215 static char * phy_types_str[32] = { 3216 "SGMII", 3217 "1000BASE-KX", 3218 "10GBASE-KX4", 3219 "10GBASE-KR", 3220 "40GBASE-KR4", 3221 "XAUI", 3222 "XFI", 3223 "SFI", 3224 "XLAUI", 3225 "XLPPI", 3226 "40GBASE-CR4", 3227 "10GBASE-CR1", 3228 "SFP+ Active DA", 3229 "QSFP+ Active DA", 3230 "Reserved (14)", 3231 "Reserved (15)", 3232 "Reserved (16)", 3233 "100BASE-TX", 3234 "1000BASE-T", 3235 "10GBASE-T", 3236 "10GBASE-SR", 3237 "10GBASE-LR", 3238 "10GBASE-SFP+Cu", 3239 "10GBASE-CR1", 3240 "40GBASE-CR4", 3241 "40GBASE-SR4", 3242 "40GBASE-LR4", 3243 "1000BASE-SX", 3244 "1000BASE-LX", 3245 "1000BASE-T Optical", 3246 "20GBASE-KR2", 3247 "Reserved (31)" 3248 }; 3249 static char * ext_phy_types_str[8] = { 3250 "25GBASE-KR", 3251 "25GBASE-CR", 3252 "25GBASE-SR", 3253 "25GBASE-LR", 3254 "25GBASE-AOC", 3255 "25GBASE-ACC", 3256 "2.5GBASE-T", 3257 "5GBASE-T" 3258 }; 3259 3260 if (ext && bit_pos > 7) return "Invalid_Ext"; 3261 if (bit_pos > 31) return "Invalid"; 3262 3263 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; 3264 } 3265 3266 /* TODO: ERJ: I don't this is necessary anymore. */ 3267 int 3268 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) 3269 { 3270 device_t dev = pf->dev; 3271 struct i40e_hw *hw = &pf->hw; 3272 struct i40e_aq_desc desc; 3273 enum i40e_status_code status; 3274 3275 struct i40e_aqc_get_link_status *aq_link_status = 3276 (struct i40e_aqc_get_link_status *)&desc.params.raw; 3277 3278 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 3279 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); 3280 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 3281 if (status) { 3282 device_printf(dev, 3283 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", 3284 __func__, i40e_stat_str(hw, status), 3285 i40e_aq_str(hw, hw->aq.asq_last_status)); 3286 return (EIO); 3287 } 3288 3289 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); 3290 return (0); 3291 } 3292 3293 static char * 3294 ixl_phy_type_string_ls(u8 val) 3295 { 3296 if (val >= 0x1F) 3297 return ixl_phy_type_string(val - 0x1F, true); 3298 else 3299 return ixl_phy_type_string(val, false); 3300 } 3301 3302 static int 3303 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) 3304 { 3305 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3306 device_t dev = pf->dev; 3307 struct sbuf *buf; 3308 int error = 0; 3309 3310 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3311 if (!buf) { 3312 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3313 return (ENOMEM); 3314 } 3315 3316 struct i40e_aqc_get_link_status link_status; 3317 error = ixl_aq_get_link_status(pf, &link_status); 3318 if (error) { 3319 sbuf_delete(buf); 3320 return (error); 3321 } 3322 3323 sbuf_printf(buf, "\n" 3324 "PHY Type : 0x%02x<%s>\n" 3325 "Speed : 0x%02x\n" 3326 "Link info: 0x%02x\n" 3327 "AN info : 0x%02x\n" 3328 "Ext info : 0x%02x\n" 3329 "Loopback : 0x%02x\n" 3330 "Max Frame: %d\n" 3331 "Config : 0x%02x\n" 3332 "Power : 0x%02x", 3333 link_status.phy_type, 3334 ixl_phy_type_string_ls(link_status.phy_type), 3335 link_status.link_speed, 3336 link_status.link_info, 3337 link_status.an_info, 3338 link_status.ext_info, 3339 link_status.loopback, 3340 link_status.max_frame_size, 3341 link_status.config, 3342 link_status.power_desc); 3343 3344 error = sbuf_finish(buf); 3345 if (error) 3346 device_printf(dev, "Error finishing sbuf: %d\n", error); 3347 3348 sbuf_delete(buf); 3349 return (error); 3350 } 3351 3352 static int 3353 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) 3354 { 3355 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3356 struct i40e_hw *hw = &pf->hw; 3357 device_t dev = pf->dev; 3358 enum i40e_status_code status; 3359 struct i40e_aq_get_phy_abilities_resp abilities; 3360 struct sbuf *buf; 3361 int error = 0; 3362 3363 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3364 if (!buf) { 3365 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3366 return (ENOMEM); 3367 } 3368 3369 status = i40e_aq_get_phy_capabilities(hw, 3370 FALSE, arg2 != 0, &abilities, NULL); 3371 if (status) { 3372 device_printf(dev, 3373 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 3374 __func__, i40e_stat_str(hw, status), 3375 i40e_aq_str(hw, hw->aq.asq_last_status)); 3376 sbuf_delete(buf); 3377 return (EIO); 3378 } 3379 3380 sbuf_printf(buf, "\n" 3381 "PHY Type : %08x", 3382 abilities.phy_type); 3383 3384 if (abilities.phy_type != 0) { 3385 sbuf_printf(buf, "<"); 3386 for (int i = 0; i < 32; i++) 3387 if ((1 << i) & abilities.phy_type) 3388 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); 3389 sbuf_printf(buf, ">"); 3390 } 3391 3392 sbuf_printf(buf, "\nPHY Ext : %02x", 3393 abilities.phy_type_ext); 3394 3395 if (abilities.phy_type_ext != 0) { 3396 sbuf_printf(buf, "<"); 3397 for (int i = 0; i < 4; i++) 3398 if ((1 << i) & abilities.phy_type_ext) 3399 sbuf_printf(buf, "%s,", 3400 ixl_phy_type_string(i, true)); 3401 sbuf_printf(buf, ">"); 3402 } 3403 3404 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed); 3405 if (abilities.link_speed != 0) { 3406 u8 link_speed; 3407 sbuf_printf(buf, " <"); 3408 for (int i = 0; i < 8; i++) { 3409 link_speed = (1 << i) & abilities.link_speed; 3410 if (link_speed) 3411 sbuf_printf(buf, "%s, ", 3412 ixl_link_speed_string(link_speed)); 3413 } 3414 sbuf_printf(buf, ">"); 3415 } 3416 3417 sbuf_printf(buf, "\n" 3418 "Abilities: %02x\n" 3419 "EEE cap : %04x\n" 3420 "EEER reg : %08x\n" 3421 "D3 Lpan : %02x\n" 3422 "ID : %02x %02x %02x %02x\n" 3423 "ModType : %02x %02x %02x\n" 3424 "ModType E: %01x\n" 3425 "FEC Cfg : %02x\n" 3426 "Ext CC : %02x", 3427 abilities.abilities, abilities.eee_capability, 3428 abilities.eeer_val, abilities.d3_lpan, 3429 abilities.phy_id[0], abilities.phy_id[1], 3430 abilities.phy_id[2], abilities.phy_id[3], 3431 abilities.module_type[0], abilities.module_type[1], 3432 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, 3433 abilities.fec_cfg_curr_mod_ext_info & 0x1F, 3434 abilities.ext_comp_code); 3435 3436 error = sbuf_finish(buf); 3437 if (error) 3438 device_printf(dev, "Error finishing sbuf: %d\n", error); 3439 3440 sbuf_delete(buf); 3441 return (error); 3442 } 3443 3444 static int 3445 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) 3446 { 3447 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3448 struct ixl_vsi *vsi = &pf->vsi; 3449 struct ixl_mac_filter *f; 3450 device_t dev = pf->dev; 3451 int error = 0, ftl_len = 0, ftl_counter = 0; 3452 3453 struct sbuf *buf; 3454 3455 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3456 if (!buf) { 3457 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3458 return (ENOMEM); 3459 } 3460 3461 sbuf_printf(buf, "\n"); 3462 3463 /* Print MAC filters */ 3464 sbuf_printf(buf, "PF Filters:\n"); 3465 LIST_FOREACH(f, &vsi->ftl, ftle) 3466 ftl_len++; 3467 3468 if (ftl_len < 1) 3469 sbuf_printf(buf, "(none)\n"); 3470 else { 3471 LIST_FOREACH(f, &vsi->ftl, ftle) { 3472 sbuf_printf(buf, 3473 MAC_FORMAT ", vlan %4d, flags %#06x", 3474 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3475 /* don't print '\n' for last entry */ 3476 if (++ftl_counter != ftl_len) 3477 sbuf_printf(buf, "\n"); 3478 } 3479 } 3480 3481 #ifdef PCI_IOV 3482 /* TODO: Give each VF its own filter list sysctl */ 3483 struct ixl_vf *vf; 3484 if (pf->num_vfs > 0) { 3485 sbuf_printf(buf, "\n\n"); 3486 for (int i = 0; i < pf->num_vfs; i++) { 3487 vf = &pf->vfs[i]; 3488 if (!(vf->vf_flags & VF_FLAG_ENABLED)) 3489 continue; 3490 3491 vsi = &vf->vsi; 3492 ftl_len = 0, ftl_counter = 0; 3493 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num); 3494 LIST_FOREACH(f, &vsi->ftl, ftle) 3495 ftl_len++; 3496 3497 if (ftl_len < 1) 3498 sbuf_printf(buf, "(none)\n"); 3499 else { 3500 LIST_FOREACH(f, &vsi->ftl, ftle) { 3501 sbuf_printf(buf, 3502 MAC_FORMAT ", vlan %4d, flags %#06x\n", 3503 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3504 } 3505 } 3506 } 3507 } 3508 #endif 3509 3510 error = sbuf_finish(buf); 3511 if (error) 3512 device_printf(dev, "Error finishing sbuf: %d\n", error); 3513 sbuf_delete(buf); 3514 3515 return (error); 3516 } 3517 3518 #define IXL_SW_RES_SIZE 0x14 3519 int 3520 ixl_res_alloc_cmp(const void *a, const void *b) 3521 { 3522 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; 3523 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; 3524 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; 3525 3526 return ((int)one->resource_type - (int)two->resource_type); 3527 } 3528 3529 /* 3530 * Longest string length: 25 3531 */ 3532 const char * 3533 ixl_switch_res_type_string(u8 type) 3534 { 3535 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = { 3536 "VEB", 3537 "VSI", 3538 "Perfect Match MAC address", 3539 "S-tag", 3540 "(Reserved)", 3541 "Multicast hash entry", 3542 "Unicast hash entry", 3543 "VLAN", 3544 "VSI List entry", 3545 "(Reserved)", 3546 "VLAN Statistic Pool", 3547 "Mirror Rule", 3548 "Queue Set", 3549 "Inner VLAN Forward filter", 3550 "(Reserved)", 3551 "Inner MAC", 3552 "IP", 3553 "GRE/VN1 Key", 3554 "VN2 Key", 3555 "Tunneling Port" 3556 }; 3557 3558 if (type < IXL_SW_RES_SIZE) 3559 return ixl_switch_res_type_strings[type]; 3560 else 3561 return "(Reserved)"; 3562 } 3563 3564 static int 3565 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) 3566 { 3567 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3568 struct i40e_hw *hw = &pf->hw; 3569 device_t dev = pf->dev; 3570 struct sbuf *buf; 3571 enum i40e_status_code status; 3572 int error = 0; 3573 3574 u8 num_entries; 3575 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; 3576 3577 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3578 if (!buf) { 3579 device_printf(dev, "Could not allocate sbuf for output.\n"); 3580 return (ENOMEM); 3581 } 3582 3583 bzero(resp, sizeof(resp)); 3584 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, 3585 resp, 3586 IXL_SW_RES_SIZE, 3587 NULL); 3588 if (status) { 3589 device_printf(dev, 3590 "%s: get_switch_resource_alloc() error %s, aq error %s\n", 3591 __func__, i40e_stat_str(hw, status), 3592 i40e_aq_str(hw, hw->aq.asq_last_status)); 3593 sbuf_delete(buf); 3594 return (error); 3595 } 3596 3597 /* Sort entries by type for display */ 3598 qsort(resp, num_entries, 3599 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), 3600 &ixl_res_alloc_cmp); 3601 3602 sbuf_cat(buf, "\n"); 3603 sbuf_printf(buf, "# of entries: %d\n", num_entries); 3604 sbuf_printf(buf, 3605 " Type | Guaranteed | Total | Used | Un-allocated\n" 3606 " | (this) | (all) | (this) | (all) \n"); 3607 for (int i = 0; i < num_entries; i++) { 3608 sbuf_printf(buf, 3609 "%25s | %10d %5d %6d %12d", 3610 ixl_switch_res_type_string(resp[i].resource_type), 3611 resp[i].guaranteed, 3612 resp[i].total, 3613 resp[i].used, 3614 resp[i].total_unalloced); 3615 if (i < num_entries - 1) 3616 sbuf_cat(buf, "\n"); 3617 } 3618 3619 error = sbuf_finish(buf); 3620 if (error) 3621 device_printf(dev, "Error finishing sbuf: %d\n", error); 3622 3623 sbuf_delete(buf); 3624 return (error); 3625 } 3626 3627 enum ixl_sw_seid_offset { 3628 IXL_SW_SEID_EMP = 1, 3629 IXL_SW_SEID_MAC_START = 2, 3630 IXL_SW_SEID_MAC_END = 5, 3631 IXL_SW_SEID_PF_START = 16, 3632 IXL_SW_SEID_PF_END = 31, 3633 IXL_SW_SEID_VF_START = 32, 3634 IXL_SW_SEID_VF_END = 159, 3635 }; 3636 3637 /* 3638 * Caller must init and delete sbuf; this function will clear and 3639 * finish it for caller. 3640 * 3641 * Note: The SEID argument only applies for elements defined by FW at 3642 * power-on; these include the EMP, Ports, PFs and VFs. 3643 */ 3644 static char * 3645 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid) 3646 { 3647 sbuf_clear(s); 3648 3649 /* If SEID is in certain ranges, then we can infer the 3650 * mapping of SEID to switch element. 3651 */ 3652 if (seid == IXL_SW_SEID_EMP) { 3653 sbuf_cat(s, "EMP"); 3654 goto out; 3655 } else if (seid >= IXL_SW_SEID_MAC_START && 3656 seid <= IXL_SW_SEID_MAC_END) { 3657 sbuf_printf(s, "MAC %2d", 3658 seid - IXL_SW_SEID_MAC_START); 3659 goto out; 3660 } else if (seid >= IXL_SW_SEID_PF_START && 3661 seid <= IXL_SW_SEID_PF_END) { 3662 sbuf_printf(s, "PF %3d", 3663 seid - IXL_SW_SEID_PF_START); 3664 goto out; 3665 } else if (seid >= IXL_SW_SEID_VF_START && 3666 seid <= IXL_SW_SEID_VF_END) { 3667 sbuf_printf(s, "VF %3d", 3668 seid - IXL_SW_SEID_VF_START); 3669 goto out; 3670 } 3671 3672 switch (element_type) { 3673 case I40E_AQ_SW_ELEM_TYPE_BMC: 3674 sbuf_cat(s, "BMC"); 3675 break; 3676 case I40E_AQ_SW_ELEM_TYPE_PV: 3677 sbuf_cat(s, "PV"); 3678 break; 3679 case I40E_AQ_SW_ELEM_TYPE_VEB: 3680 sbuf_cat(s, "VEB"); 3681 break; 3682 case I40E_AQ_SW_ELEM_TYPE_PA: 3683 sbuf_cat(s, "PA"); 3684 break; 3685 case I40E_AQ_SW_ELEM_TYPE_VSI: 3686 sbuf_printf(s, "VSI"); 3687 break; 3688 default: 3689 sbuf_cat(s, "?"); 3690 break; 3691 } 3692 3693 out: 3694 sbuf_finish(s); 3695 return sbuf_data(s); 3696 } 3697 3698 static int 3699 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b) 3700 { 3701 const struct i40e_aqc_switch_config_element_resp *one, *two; 3702 one = (const struct i40e_aqc_switch_config_element_resp *)a; 3703 two = (const struct i40e_aqc_switch_config_element_resp *)b; 3704 3705 return ((int)one->seid - (int)two->seid); 3706 } 3707 3708 static int 3709 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) 3710 { 3711 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3712 struct i40e_hw *hw = &pf->hw; 3713 device_t dev = pf->dev; 3714 struct sbuf *buf; 3715 struct sbuf *nmbuf; 3716 enum i40e_status_code status; 3717 int error = 0; 3718 u16 next = 0; 3719 u8 aq_buf[I40E_AQ_LARGE_BUF]; 3720 3721 struct i40e_aqc_switch_config_element_resp *elem; 3722 struct i40e_aqc_get_switch_config_resp *sw_config; 3723 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 3724 3725 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3726 if (!buf) { 3727 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3728 return (ENOMEM); 3729 } 3730 3731 status = i40e_aq_get_switch_config(hw, sw_config, 3732 sizeof(aq_buf), &next, NULL); 3733 if (status) { 3734 device_printf(dev, 3735 "%s: aq_get_switch_config() error %s, aq error %s\n", 3736 __func__, i40e_stat_str(hw, status), 3737 i40e_aq_str(hw, hw->aq.asq_last_status)); 3738 sbuf_delete(buf); 3739 return error; 3740 } 3741 if (next) 3742 device_printf(dev, "%s: TODO: get more config with SEID %d\n", 3743 __func__, next); 3744 3745 nmbuf = sbuf_new_auto(); 3746 if (!nmbuf) { 3747 device_printf(dev, "Could not allocate sbuf for name output.\n"); 3748 sbuf_delete(buf); 3749 return (ENOMEM); 3750 } 3751 3752 /* Sort entries by SEID for display */ 3753 qsort(sw_config->element, sw_config->header.num_reported, 3754 sizeof(struct i40e_aqc_switch_config_element_resp), 3755 &ixl_sw_cfg_elem_seid_cmp); 3756 3757 sbuf_cat(buf, "\n"); 3758 /* Assuming <= 255 elements in switch */ 3759 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); 3760 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); 3761 /* Exclude: 3762 * Revision -- all elements are revision 1 for now 3763 */ 3764 sbuf_printf(buf, 3765 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n" 3766 " | | | (uplink)\n"); 3767 for (int i = 0; i < sw_config->header.num_reported; i++) { 3768 elem = &sw_config->element[i]; 3769 3770 // "%4d (%8s) | %8s %8s %#8x", 3771 sbuf_printf(buf, "%4d", elem->seid); 3772 sbuf_cat(buf, " "); 3773 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3774 elem->element_type, elem->seid)); 3775 sbuf_cat(buf, " | "); 3776 sbuf_printf(buf, "%4d", elem->uplink_seid); 3777 sbuf_cat(buf, " "); 3778 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3779 0, elem->uplink_seid)); 3780 sbuf_cat(buf, " | "); 3781 sbuf_printf(buf, "%4d", elem->downlink_seid); 3782 sbuf_cat(buf, " "); 3783 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3784 0, elem->downlink_seid)); 3785 sbuf_cat(buf, " | "); 3786 sbuf_printf(buf, "%8d", elem->connection_type); 3787 if (i < sw_config->header.num_reported - 1) 3788 sbuf_cat(buf, "\n"); 3789 } 3790 sbuf_delete(nmbuf); 3791 3792 error = sbuf_finish(buf); 3793 if (error) 3794 device_printf(dev, "Error finishing sbuf: %d\n", error); 3795 3796 sbuf_delete(buf); 3797 3798 return (error); 3799 } 3800 3801 static int 3802 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS) 3803 { 3804 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3805 struct i40e_hw *hw = &pf->hw; 3806 device_t dev = pf->dev; 3807 int requested_vlan = -1; 3808 enum i40e_status_code status = 0; 3809 int error = 0; 3810 3811 error = sysctl_handle_int(oidp, &requested_vlan, 0, req); 3812 if ((error) || (req->newptr == NULL)) 3813 return (error); 3814 3815 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) { 3816 device_printf(dev, "Flags disallow setting of vlans\n"); 3817 return (ENODEV); 3818 } 3819 3820 hw->switch_tag = requested_vlan; 3821 device_printf(dev, 3822 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n", 3823 hw->switch_tag, hw->first_tag, hw->second_tag); 3824 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL); 3825 if (status) { 3826 device_printf(dev, 3827 "%s: aq_set_switch_config() error %s, aq error %s\n", 3828 __func__, i40e_stat_str(hw, status), 3829 i40e_aq_str(hw, hw->aq.asq_last_status)); 3830 return (status); 3831 } 3832 return (0); 3833 } 3834 3835 static int 3836 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) 3837 { 3838 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3839 struct i40e_hw *hw = &pf->hw; 3840 device_t dev = pf->dev; 3841 struct sbuf *buf; 3842 int error = 0; 3843 enum i40e_status_code status; 3844 u32 reg; 3845 3846 struct i40e_aqc_get_set_rss_key_data key_data; 3847 3848 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3849 if (!buf) { 3850 device_printf(dev, "Could not allocate sbuf for output.\n"); 3851 return (ENOMEM); 3852 } 3853 3854 bzero(&key_data, sizeof(key_data)); 3855 3856 sbuf_cat(buf, "\n"); 3857 if (hw->mac.type == I40E_MAC_X722) { 3858 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); 3859 if (status) 3860 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", 3861 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3862 } else { 3863 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 3864 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 3865 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); 3866 } 3867 } 3868 3869 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); 3870 3871 error = sbuf_finish(buf); 3872 if (error) 3873 device_printf(dev, "Error finishing sbuf: %d\n", error); 3874 sbuf_delete(buf); 3875 3876 return (error); 3877 } 3878 3879 static void 3880 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) 3881 { 3882 int i, j, k, width; 3883 char c; 3884 3885 if (length < 1 || buf == NULL) return; 3886 3887 int byte_stride = 16; 3888 int lines = length / byte_stride; 3889 int rem = length % byte_stride; 3890 if (rem > 0) 3891 lines++; 3892 3893 for (i = 0; i < lines; i++) { 3894 width = (rem > 0 && i == lines - 1) 3895 ? rem : byte_stride; 3896 3897 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); 3898 3899 for (j = 0; j < width; j++) 3900 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); 3901 3902 if (width < byte_stride) { 3903 for (k = 0; k < (byte_stride - width); k++) 3904 sbuf_printf(sb, " "); 3905 } 3906 3907 if (!text) { 3908 sbuf_printf(sb, "\n"); 3909 continue; 3910 } 3911 3912 for (j = 0; j < width; j++) { 3913 c = (char)buf[i * byte_stride + j]; 3914 if (c < 32 || c > 126) 3915 sbuf_printf(sb, "."); 3916 else 3917 sbuf_printf(sb, "%c", c); 3918 3919 if (j == width - 1) 3920 sbuf_printf(sb, "\n"); 3921 } 3922 } 3923 } 3924 3925 static int 3926 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) 3927 { 3928 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3929 struct i40e_hw *hw = &pf->hw; 3930 device_t dev = pf->dev; 3931 struct sbuf *buf; 3932 int error = 0; 3933 enum i40e_status_code status; 3934 u8 hlut[512]; 3935 u32 reg; 3936 3937 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3938 if (!buf) { 3939 device_printf(dev, "Could not allocate sbuf for output.\n"); 3940 return (ENOMEM); 3941 } 3942 3943 bzero(hlut, sizeof(hlut)); 3944 sbuf_cat(buf, "\n"); 3945 if (hw->mac.type == I40E_MAC_X722) { 3946 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); 3947 if (status) 3948 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", 3949 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3950 } else { 3951 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { 3952 reg = rd32(hw, I40E_PFQF_HLUT(i)); 3953 bcopy(®, &hlut[i << 2], 4); 3954 } 3955 } 3956 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); 3957 3958 error = sbuf_finish(buf); 3959 if (error) 3960 device_printf(dev, "Error finishing sbuf: %d\n", error); 3961 sbuf_delete(buf); 3962 3963 return (error); 3964 } 3965 3966 static int 3967 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) 3968 { 3969 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3970 struct i40e_hw *hw = &pf->hw; 3971 u64 hena; 3972 3973 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 3974 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 3975 3976 return sysctl_handle_long(oidp, NULL, hena, req); 3977 } 3978 3979 /* 3980 * Sysctl to disable firmware's link management 3981 * 3982 * 1 - Disable link management on this port 3983 * 0 - Re-enable link management 3984 * 3985 * On normal NVMs, firmware manages link by default. 3986 */ 3987 static int 3988 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) 3989 { 3990 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3991 struct i40e_hw *hw = &pf->hw; 3992 device_t dev = pf->dev; 3993 int requested_mode = -1; 3994 enum i40e_status_code status = 0; 3995 int error = 0; 3996 3997 /* Read in new mode */ 3998 error = sysctl_handle_int(oidp, &requested_mode, 0, req); 3999 if ((error) || (req->newptr == NULL)) 4000 return (error); 4001 /* Check for sane value */ 4002 if (requested_mode < 0 || requested_mode > 1) { 4003 device_printf(dev, "Valid modes are 0 or 1\n"); 4004 return (EINVAL); 4005 } 4006 4007 /* Set new mode */ 4008 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); 4009 if (status) { 4010 device_printf(dev, 4011 "%s: Error setting new phy debug mode %s," 4012 " aq error: %s\n", __func__, i40e_stat_str(hw, status), 4013 i40e_aq_str(hw, hw->aq.asq_last_status)); 4014 return (EIO); 4015 } 4016 4017 return (0); 4018 } 4019 4020 /* 4021 * Read some diagnostic data from a (Q)SFP+ module 4022 * 4023 * SFP A2 QSFP Lower Page 4024 * Temperature 96-97 22-23 4025 * Vcc 98-99 26-27 4026 * TX power 102-103 34-35..40-41 4027 * RX power 104-105 50-51..56-57 4028 */ 4029 static int 4030 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 4031 { 4032 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4033 device_t dev = pf->dev; 4034 struct sbuf *sbuf; 4035 int error = 0; 4036 u8 output; 4037 4038 if (req->oldptr == NULL) { 4039 error = SYSCTL_OUT(req, 0, 128); 4040 return (0); 4041 } 4042 4043 error = pf->read_i2c_byte(pf, 0, 0xA0, &output); 4044 if (error) { 4045 device_printf(dev, "Error reading from i2c\n"); 4046 return (error); 4047 } 4048 4049 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 4050 if (output == 0x3) { 4051 /* 4052 * Check for: 4053 * - Internally calibrated data 4054 * - Diagnostic monitoring is implemented 4055 */ 4056 pf->read_i2c_byte(pf, 92, 0xA0, &output); 4057 if (!(output & 0x60)) { 4058 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); 4059 return (0); 4060 } 4061 4062 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4063 4064 for (u8 offset = 96; offset < 100; offset++) { 4065 pf->read_i2c_byte(pf, offset, 0xA2, &output); 4066 sbuf_printf(sbuf, "%02X ", output); 4067 } 4068 for (u8 offset = 102; offset < 106; offset++) { 4069 pf->read_i2c_byte(pf, offset, 0xA2, &output); 4070 sbuf_printf(sbuf, "%02X ", output); 4071 } 4072 } else if (output == 0xD || output == 0x11) { 4073 /* 4074 * QSFP+ modules are always internally calibrated, and must indicate 4075 * what types of diagnostic monitoring are implemented 4076 */ 4077 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4078 4079 for (u8 offset = 22; offset < 24; offset++) { 4080 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4081 sbuf_printf(sbuf, "%02X ", output); 4082 } 4083 for (u8 offset = 26; offset < 28; offset++) { 4084 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4085 sbuf_printf(sbuf, "%02X ", output); 4086 } 4087 /* Read the data from the first lane */ 4088 for (u8 offset = 34; offset < 36; offset++) { 4089 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4090 sbuf_printf(sbuf, "%02X ", output); 4091 } 4092 for (u8 offset = 50; offset < 52; offset++) { 4093 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4094 sbuf_printf(sbuf, "%02X ", output); 4095 } 4096 } else { 4097 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output); 4098 return (0); 4099 } 4100 4101 sbuf_finish(sbuf); 4102 sbuf_delete(sbuf); 4103 4104 return (0); 4105 } 4106 4107 /* 4108 * Sysctl to read a byte from I2C bus. 4109 * 4110 * Input: 32-bit value: 4111 * bits 0-7: device address (0xA0 or 0xA2) 4112 * bits 8-15: offset (0-255) 4113 * bits 16-31: unused 4114 * Output: 8-bit value read 4115 */ 4116 static int 4117 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) 4118 { 4119 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4120 device_t dev = pf->dev; 4121 int input = -1, error = 0; 4122 u8 dev_addr, offset, output; 4123 4124 /* Read in I2C read parameters */ 4125 error = sysctl_handle_int(oidp, &input, 0, req); 4126 if ((error) || (req->newptr == NULL)) 4127 return (error); 4128 /* Validate device address */ 4129 dev_addr = input & 0xFF; 4130 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4131 return (EINVAL); 4132 } 4133 offset = (input >> 8) & 0xFF; 4134 4135 error = pf->read_i2c_byte(pf, offset, dev_addr, &output); 4136 if (error) 4137 return (error); 4138 4139 device_printf(dev, "%02X\n", output); 4140 return (0); 4141 } 4142 4143 /* 4144 * Sysctl to write a byte to the I2C bus. 4145 * 4146 * Input: 32-bit value: 4147 * bits 0-7: device address (0xA0 or 0xA2) 4148 * bits 8-15: offset (0-255) 4149 * bits 16-23: value to write 4150 * bits 24-31: unused 4151 * Output: 8-bit value written 4152 */ 4153 static int 4154 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) 4155 { 4156 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4157 device_t dev = pf->dev; 4158 int input = -1, error = 0; 4159 u8 dev_addr, offset, value; 4160 4161 /* Read in I2C write parameters */ 4162 error = sysctl_handle_int(oidp, &input, 0, req); 4163 if ((error) || (req->newptr == NULL)) 4164 return (error); 4165 /* Validate device address */ 4166 dev_addr = input & 0xFF; 4167 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4168 return (EINVAL); 4169 } 4170 offset = (input >> 8) & 0xFF; 4171 value = (input >> 16) & 0xFF; 4172 4173 error = pf->write_i2c_byte(pf, offset, dev_addr, value); 4174 if (error) 4175 return (error); 4176 4177 device_printf(dev, "%02X written\n", value); 4178 return (0); 4179 } 4180 4181 static int 4182 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4183 u8 bit_pos, int *is_set) 4184 { 4185 device_t dev = pf->dev; 4186 struct i40e_hw *hw = &pf->hw; 4187 enum i40e_status_code status; 4188 4189 if (IXL_PF_IN_RECOVERY_MODE(pf)) 4190 return (EIO); 4191 4192 status = i40e_aq_get_phy_capabilities(hw, 4193 FALSE, FALSE, abilities, NULL); 4194 if (status) { 4195 device_printf(dev, 4196 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 4197 __func__, i40e_stat_str(hw, status), 4198 i40e_aq_str(hw, hw->aq.asq_last_status)); 4199 return (EIO); 4200 } 4201 4202 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); 4203 return (0); 4204 } 4205 4206 static int 4207 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4208 u8 bit_pos, int set) 4209 { 4210 device_t dev = pf->dev; 4211 struct i40e_hw *hw = &pf->hw; 4212 struct i40e_aq_set_phy_config config; 4213 enum i40e_status_code status; 4214 4215 /* Set new PHY config */ 4216 memset(&config, 0, sizeof(config)); 4217 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); 4218 if (set) 4219 config.fec_config |= bit_pos; 4220 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { 4221 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 4222 config.phy_type = abilities->phy_type; 4223 config.phy_type_ext = abilities->phy_type_ext; 4224 config.link_speed = abilities->link_speed; 4225 config.eee_capability = abilities->eee_capability; 4226 config.eeer = abilities->eeer_val; 4227 config.low_power_ctrl = abilities->d3_lpan; 4228 status = i40e_aq_set_phy_config(hw, &config, NULL); 4229 4230 if (status) { 4231 device_printf(dev, 4232 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", 4233 __func__, i40e_stat_str(hw, status), 4234 i40e_aq_str(hw, hw->aq.asq_last_status)); 4235 return (EIO); 4236 } 4237 } 4238 4239 return (0); 4240 } 4241 4242 static int 4243 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) 4244 { 4245 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4246 int mode, error = 0; 4247 4248 struct i40e_aq_get_phy_abilities_resp abilities; 4249 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); 4250 if (error) 4251 return (error); 4252 /* Read in new mode */ 4253 error = sysctl_handle_int(oidp, &mode, 0, req); 4254 if ((error) || (req->newptr == NULL)) 4255 return (error); 4256 4257 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); 4258 } 4259 4260 static int 4261 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) 4262 { 4263 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4264 int mode, error = 0; 4265 4266 struct i40e_aq_get_phy_abilities_resp abilities; 4267 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); 4268 if (error) 4269 return (error); 4270 /* Read in new mode */ 4271 error = sysctl_handle_int(oidp, &mode, 0, req); 4272 if ((error) || (req->newptr == NULL)) 4273 return (error); 4274 4275 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); 4276 } 4277 4278 static int 4279 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) 4280 { 4281 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4282 int mode, error = 0; 4283 4284 struct i40e_aq_get_phy_abilities_resp abilities; 4285 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); 4286 if (error) 4287 return (error); 4288 /* Read in new mode */ 4289 error = sysctl_handle_int(oidp, &mode, 0, req); 4290 if ((error) || (req->newptr == NULL)) 4291 return (error); 4292 4293 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); 4294 } 4295 4296 static int 4297 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) 4298 { 4299 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4300 int mode, error = 0; 4301 4302 struct i40e_aq_get_phy_abilities_resp abilities; 4303 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); 4304 if (error) 4305 return (error); 4306 /* Read in new mode */ 4307 error = sysctl_handle_int(oidp, &mode, 0, req); 4308 if ((error) || (req->newptr == NULL)) 4309 return (error); 4310 4311 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); 4312 } 4313 4314 static int 4315 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) 4316 { 4317 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4318 int mode, error = 0; 4319 4320 struct i40e_aq_get_phy_abilities_resp abilities; 4321 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); 4322 if (error) 4323 return (error); 4324 /* Read in new mode */ 4325 error = sysctl_handle_int(oidp, &mode, 0, req); 4326 if ((error) || (req->newptr == NULL)) 4327 return (error); 4328 4329 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); 4330 } 4331 4332 static int 4333 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) 4334 { 4335 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4336 struct i40e_hw *hw = &pf->hw; 4337 device_t dev = pf->dev; 4338 struct sbuf *buf; 4339 int error = 0; 4340 enum i40e_status_code status; 4341 4342 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4343 if (!buf) { 4344 device_printf(dev, "Could not allocate sbuf for output.\n"); 4345 return (ENOMEM); 4346 } 4347 4348 u8 *final_buff; 4349 /* This amount is only necessary if reading the entire cluster into memory */ 4350 #define IXL_FINAL_BUFF_SIZE (1280 * 1024) 4351 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT); 4352 if (final_buff == NULL) { 4353 device_printf(dev, "Could not allocate memory for output.\n"); 4354 goto out; 4355 } 4356 int final_buff_len = 0; 4357 4358 u8 cluster_id = 1; 4359 bool more = true; 4360 4361 u8 dump_buf[4096]; 4362 u16 curr_buff_size = 4096; 4363 u8 curr_next_table = 0; 4364 u32 curr_next_index = 0; 4365 4366 u16 ret_buff_size; 4367 u8 ret_next_table; 4368 u32 ret_next_index; 4369 4370 sbuf_cat(buf, "\n"); 4371 4372 while (more) { 4373 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, 4374 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); 4375 if (status) { 4376 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", 4377 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 4378 goto free_out; 4379 } 4380 4381 /* copy info out of temp buffer */ 4382 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); 4383 final_buff_len += ret_buff_size; 4384 4385 if (ret_next_table != curr_next_table) { 4386 /* We're done with the current table; we can dump out read data. */ 4387 sbuf_printf(buf, "%d:", curr_next_table); 4388 int bytes_printed = 0; 4389 while (bytes_printed <= final_buff_len) { 4390 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); 4391 bytes_printed += 16; 4392 } 4393 sbuf_cat(buf, "\n"); 4394 4395 /* The entire cluster has been read; we're finished */ 4396 if (ret_next_table == 0xFF) 4397 break; 4398 4399 /* Otherwise clear the output buffer and continue reading */ 4400 bzero(final_buff, IXL_FINAL_BUFF_SIZE); 4401 final_buff_len = 0; 4402 } 4403 4404 if (ret_next_index == 0xFFFFFFFF) 4405 ret_next_index = 0; 4406 4407 bzero(dump_buf, sizeof(dump_buf)); 4408 curr_next_table = ret_next_table; 4409 curr_next_index = ret_next_index; 4410 } 4411 4412 free_out: 4413 free(final_buff, M_IXL); 4414 out: 4415 error = sbuf_finish(buf); 4416 if (error) 4417 device_printf(dev, "Error finishing sbuf: %d\n", error); 4418 sbuf_delete(buf); 4419 4420 return (error); 4421 } 4422 4423 static int 4424 ixl_start_fw_lldp(struct ixl_pf *pf) 4425 { 4426 struct i40e_hw *hw = &pf->hw; 4427 enum i40e_status_code status; 4428 4429 status = i40e_aq_start_lldp(hw, false, NULL); 4430 if (status != I40E_SUCCESS) { 4431 switch (hw->aq.asq_last_status) { 4432 case I40E_AQ_RC_EEXIST: 4433 device_printf(pf->dev, 4434 "FW LLDP agent is already running\n"); 4435 break; 4436 case I40E_AQ_RC_EPERM: 4437 device_printf(pf->dev, 4438 "Device configuration forbids SW from starting " 4439 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII " 4440 "attribute to \"Enabled\" to use this sysctl\n"); 4441 return (EINVAL); 4442 default: 4443 device_printf(pf->dev, 4444 "Starting FW LLDP agent failed: error: %s, %s\n", 4445 i40e_stat_str(hw, status), 4446 i40e_aq_str(hw, hw->aq.asq_last_status)); 4447 return (EINVAL); 4448 } 4449 } 4450 4451 atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 4452 return (0); 4453 } 4454 4455 static int 4456 ixl_stop_fw_lldp(struct ixl_pf *pf) 4457 { 4458 struct i40e_hw *hw = &pf->hw; 4459 device_t dev = pf->dev; 4460 enum i40e_status_code status; 4461 4462 if (hw->func_caps.npar_enable != 0) { 4463 device_printf(dev, 4464 "Disabling FW LLDP agent is not supported on this device\n"); 4465 return (EINVAL); 4466 } 4467 4468 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) { 4469 device_printf(dev, 4470 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); 4471 return (EINVAL); 4472 } 4473 4474 status = i40e_aq_stop_lldp(hw, true, false, NULL); 4475 if (status != I40E_SUCCESS) { 4476 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) { 4477 device_printf(dev, 4478 "Disabling FW LLDP agent failed: error: %s, %s\n", 4479 i40e_stat_str(hw, status), 4480 i40e_aq_str(hw, hw->aq.asq_last_status)); 4481 return (EINVAL); 4482 } 4483 4484 device_printf(dev, "FW LLDP agent is already stopped\n"); 4485 } 4486 4487 i40e_aq_set_dcb_parameters(hw, true, NULL); 4488 atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 4489 return (0); 4490 } 4491 4492 static int 4493 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) 4494 { 4495 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4496 int state, new_state, error = 0; 4497 4498 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0); 4499 4500 /* Read in new mode */ 4501 error = sysctl_handle_int(oidp, &new_state, 0, req); 4502 if ((error) || (req->newptr == NULL)) 4503 return (error); 4504 4505 /* Already in requested state */ 4506 if (new_state == state) 4507 return (error); 4508 4509 if (new_state == 0) 4510 return ixl_stop_fw_lldp(pf); 4511 4512 return ixl_start_fw_lldp(pf); 4513 } 4514 4515 static int 4516 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 4517 { 4518 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4519 int state, new_state; 4520 int sysctl_handle_status = 0; 4521 enum i40e_status_code cmd_status; 4522 4523 /* Init states' values */ 4524 state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED)); 4525 4526 /* Get requested mode */ 4527 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req); 4528 if ((sysctl_handle_status) || (req->newptr == NULL)) 4529 return (sysctl_handle_status); 4530 4531 /* Check if state has changed */ 4532 if (new_state == state) 4533 return (0); 4534 4535 /* Set new state */ 4536 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state)); 4537 4538 /* Save new state or report error */ 4539 if (!cmd_status) { 4540 if (new_state == 0) 4541 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 4542 else 4543 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 4544 } else if (cmd_status == I40E_ERR_CONFIG) 4545 return (EPERM); 4546 else 4547 return (EIO); 4548 4549 return (0); 4550 } 4551 4552 static int 4553 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS) 4554 { 4555 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4556 int error, state; 4557 4558 state = !!(atomic_load_acq_32(&pf->state) & 4559 IXL_PF_STATE_LINK_ACTIVE_ON_DOWN); 4560 4561 error = sysctl_handle_int(oidp, &state, 0, req); 4562 if ((error) || (req->newptr == NULL)) 4563 return (error); 4564 4565 if (state == 0) 4566 atomic_clear_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN); 4567 else 4568 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN); 4569 4570 return (0); 4571 } 4572 4573 4574 int 4575 ixl_attach_get_link_status(struct ixl_pf *pf) 4576 { 4577 struct i40e_hw *hw = &pf->hw; 4578 device_t dev = pf->dev; 4579 int error = 0; 4580 4581 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 4582 (hw->aq.fw_maj_ver < 4)) { 4583 i40e_msec_delay(75); 4584 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); 4585 if (error) { 4586 device_printf(dev, "link restart failed, aq_err=%d\n", 4587 pf->hw.aq.asq_last_status); 4588 return error; 4589 } 4590 } 4591 4592 /* Determine link state */ 4593 hw->phy.get_link_info = TRUE; 4594 i40e_get_link_status(hw, &pf->link_up); 4595 4596 /* Flow Control mode not set by user, read current FW settings */ 4597 if (pf->fc == -1) 4598 pf->fc = hw->fc.current_mode; 4599 4600 return (0); 4601 } 4602 4603 static int 4604 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS) 4605 { 4606 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4607 int requested = 0, error = 0; 4608 4609 /* Read in new mode */ 4610 error = sysctl_handle_int(oidp, &requested, 0, req); 4611 if ((error) || (req->newptr == NULL)) 4612 return (error); 4613 4614 /* Initiate the PF reset later in the admin task */ 4615 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ); 4616 4617 return (error); 4618 } 4619 4620 static int 4621 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS) 4622 { 4623 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4624 struct i40e_hw *hw = &pf->hw; 4625 int requested = 0, error = 0; 4626 4627 /* Read in new mode */ 4628 error = sysctl_handle_int(oidp, &requested, 0, req); 4629 if ((error) || (req->newptr == NULL)) 4630 return (error); 4631 4632 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 4633 4634 return (error); 4635 } 4636 4637 static int 4638 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS) 4639 { 4640 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4641 struct i40e_hw *hw = &pf->hw; 4642 int requested = 0, error = 0; 4643 4644 /* Read in new mode */ 4645 error = sysctl_handle_int(oidp, &requested, 0, req); 4646 if ((error) || (req->newptr == NULL)) 4647 return (error); 4648 4649 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK); 4650 4651 return (error); 4652 } 4653 4654 /* 4655 * Print out mapping of TX queue indexes and Rx queue indexes 4656 * to MSI-X vectors. 4657 */ 4658 static int 4659 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) 4660 { 4661 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4662 struct ixl_vsi *vsi = &pf->vsi; 4663 device_t dev = pf->dev; 4664 struct sbuf *buf; 4665 int error = 0; 4666 4667 struct ixl_rx_queue *rx_que = vsi->rx_queues; 4668 struct ixl_tx_queue *tx_que = vsi->tx_queues; 4669 4670 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4671 if (!buf) { 4672 device_printf(dev, "Could not allocate sbuf for output.\n"); 4673 return (ENOMEM); 4674 } 4675 4676 sbuf_cat(buf, "\n"); 4677 for (int i = 0; i < vsi->num_rx_queues; i++) { 4678 rx_que = &vsi->rx_queues[i]; 4679 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); 4680 } 4681 for (int i = 0; i < vsi->num_tx_queues; i++) { 4682 tx_que = &vsi->tx_queues[i]; 4683 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); 4684 } 4685 4686 error = sbuf_finish(buf); 4687 if (error) 4688 device_printf(dev, "Error finishing sbuf: %d\n", error); 4689 sbuf_delete(buf); 4690 4691 return (error); 4692 } 4693