1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 34 35 #include "ixl_pf.h" 36 37 #ifdef PCI_IOV 38 #include "ixl_pf_iov.h" 39 #endif 40 41 #ifdef IXL_IW 42 #include "ixl_iw.h" 43 #include "ixl_iw_int.h" 44 #endif 45 46 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); 47 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); 48 static const char * ixl_link_speed_string(enum i40e_aq_link_speed); 49 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int); 50 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int); 51 static char * ixl_switch_element_string(struct sbuf *, u8, u16); 52 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *); 53 54 /* Sysctls */ 55 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); 56 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); 57 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 58 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 59 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); 60 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); 61 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); 62 63 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 64 static int ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS); 65 66 /* Debug Sysctls */ 67 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); 68 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); 69 static int ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS); 70 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); 71 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); 72 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); 73 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS); 74 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); 75 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); 76 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); 77 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); 78 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); 79 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); 80 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); 81 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); 82 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); 83 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); 84 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); 85 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); 86 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); 87 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 88 89 /* Debug Sysctls */ 90 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS); 91 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS); 92 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS); 93 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); 94 #ifdef IXL_DEBUG 95 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); 96 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); 97 #endif 98 99 #ifdef IXL_IW 100 extern int ixl_enable_iwarp; 101 extern int ixl_limit_iwarp_msix; 102 #endif 103 104 static const char * const ixl_fc_string[6] = { 105 "None", 106 "Rx", 107 "Tx", 108 "Full", 109 "Priority", 110 "Default" 111 }; 112 113 static char *ixl_fec_string[3] = { 114 "CL108 RS-FEC", 115 "CL74 FC-FEC/BASE-R", 116 "None" 117 }; 118 119 /* Functions for setting and checking driver state. Note the functions take 120 * bit positions, not bitmasks. The atomic_set_32 and atomic_clear_32 121 * operations require bitmasks. This can easily lead to programming error, so 122 * we provide wrapper functions to avoid this. 123 */ 124 125 /** 126 * ixl_set_state - Set the specified state 127 * @s: the state bitmap 128 * @bit: the state to set 129 * 130 * Atomically update the state bitmap with the specified bit set. 131 */ 132 inline void 133 ixl_set_state(volatile u32 *s, enum ixl_state bit) 134 { 135 /* atomic_set_32 expects a bitmask */ 136 atomic_set_32(s, BIT(bit)); 137 } 138 139 /** 140 * ixl_clear_state - Clear the specified state 141 * @s: the state bitmap 142 * @bit: the state to clear 143 * 144 * Atomically update the state bitmap with the specified bit cleared. 145 */ 146 inline void 147 ixl_clear_state(volatile u32 *s, enum ixl_state bit) 148 { 149 /* atomic_clear_32 expects a bitmask */ 150 atomic_clear_32(s, BIT(bit)); 151 } 152 153 /** 154 * ixl_test_state - Test the specified state 155 * @s: the state bitmap 156 * @bit: the bit to test 157 * 158 * Return true if the state is set, false otherwise. Use this only if the flow 159 * does not need to update the state. If you must update the state as well, 160 * prefer ixl_testandset_state. 161 */ 162 inline bool 163 ixl_test_state(volatile u32 *s, enum ixl_state bit) 164 { 165 return !!(*s & BIT(bit)); 166 } 167 168 /** 169 * ixl_testandset_state - Test and set the specified state 170 * @s: the state bitmap 171 * @bit: the bit to test 172 * 173 * Atomically update the state bitmap, setting the specified bit. Returns the 174 * previous value of the bit. 175 */ 176 inline u32 177 ixl_testandset_state(volatile u32 *s, enum ixl_state bit) 178 { 179 /* atomic_testandset_32 expects a bit position, as opposed to bitmask 180 expected by other atomic functions */ 181 return atomic_testandset_32(s, bit); 182 } 183 184 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); 185 186 /* 187 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string 188 */ 189 void 190 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) 191 { 192 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); 193 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); 194 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); 195 196 sbuf_printf(buf, 197 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", 198 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 199 hw->aq.api_maj_ver, hw->aq.api_min_ver, 200 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> 201 IXL_NVM_VERSION_HI_SHIFT, 202 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> 203 IXL_NVM_VERSION_LO_SHIFT, 204 hw->nvm.eetrack, 205 oem_ver, oem_build, oem_patch); 206 } 207 208 void 209 ixl_print_nvm_version(struct ixl_pf *pf) 210 { 211 struct i40e_hw *hw = &pf->hw; 212 device_t dev = pf->dev; 213 struct sbuf *sbuf; 214 215 sbuf = sbuf_new_auto(); 216 ixl_nvm_version_str(hw, sbuf); 217 sbuf_finish(sbuf); 218 device_printf(dev, "%s\n", sbuf_data(sbuf)); 219 sbuf_delete(sbuf); 220 } 221 222 /** 223 * ixl_get_fw_mode - Check the state of FW 224 * @hw: device hardware structure 225 * 226 * Identify state of FW. It might be in a recovery mode 227 * which limits functionality and requires special handling 228 * from the driver. 229 * 230 * @returns FW mode (normal, recovery, unexpected EMP reset) 231 */ 232 static enum ixl_fw_mode 233 ixl_get_fw_mode(struct ixl_pf *pf) 234 { 235 struct i40e_hw *hw = &pf->hw; 236 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL; 237 u32 fwsts; 238 239 #ifdef IXL_DEBUG 240 if (pf->recovery_mode) 241 return IXL_FW_MODE_RECOVERY; 242 #endif 243 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; 244 245 /* Is set and has one of expected values */ 246 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK && 247 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) || 248 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || 249 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK) 250 fw_mode = IXL_FW_MODE_RECOVERY; 251 else { 252 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 && 253 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10) 254 fw_mode = IXL_FW_MODE_UEMPR; 255 } 256 return (fw_mode); 257 } 258 259 /** 260 * ixl_pf_reset - Reset the PF 261 * @pf: PF structure 262 * 263 * Ensure that FW is in the right state and do the reset 264 * if needed. 265 * 266 * @returns zero on success, or an error code on failure. 267 */ 268 int 269 ixl_pf_reset(struct ixl_pf *pf) 270 { 271 struct i40e_hw *hw = &pf->hw; 272 enum i40e_status_code status; 273 enum ixl_fw_mode fw_mode; 274 275 fw_mode = ixl_get_fw_mode(pf); 276 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode); 277 if (fw_mode == IXL_FW_MODE_RECOVERY) { 278 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE); 279 /* Don't try to reset device if it's in recovery mode */ 280 return (0); 281 } 282 283 status = i40e_pf_reset(hw); 284 if (status == I40E_SUCCESS) 285 return (0); 286 287 /* Check FW mode again in case it has changed while 288 * waiting for reset to complete */ 289 fw_mode = ixl_get_fw_mode(pf); 290 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode); 291 if (fw_mode == IXL_FW_MODE_RECOVERY) { 292 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE); 293 return (0); 294 } 295 296 if (fw_mode == IXL_FW_MODE_UEMPR) 297 device_printf(pf->dev, 298 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); 299 else 300 device_printf(pf->dev, "PF reset failure %s\n", 301 i40e_stat_str(hw, status)); 302 return (EIO); 303 } 304 305 /** 306 * ixl_setup_hmc - Setup LAN Host Memory Cache 307 * @pf: PF structure 308 * 309 * Init and configure LAN Host Memory Cache 310 * 311 * @returns 0 on success, EIO on error 312 */ 313 int 314 ixl_setup_hmc(struct ixl_pf *pf) 315 { 316 struct i40e_hw *hw = &pf->hw; 317 enum i40e_status_code status; 318 319 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 320 hw->func_caps.num_rx_qp, 0, 0); 321 if (status) { 322 device_printf(pf->dev, "init_lan_hmc failed: %s\n", 323 i40e_stat_str(hw, status)); 324 return (EIO); 325 } 326 327 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 328 if (status) { 329 device_printf(pf->dev, "configure_lan_hmc failed: %s\n", 330 i40e_stat_str(hw, status)); 331 return (EIO); 332 } 333 334 return (0); 335 } 336 337 /** 338 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache 339 * @pf: PF structure 340 * 341 * Shutdown Host Memory Cache if configured. 342 * 343 */ 344 void 345 ixl_shutdown_hmc(struct ixl_pf *pf) 346 { 347 struct i40e_hw *hw = &pf->hw; 348 enum i40e_status_code status; 349 350 /* HMC not configured, no need to shutdown */ 351 if (hw->hmc.hmc_obj == NULL) 352 return; 353 354 status = i40e_shutdown_lan_hmc(hw); 355 if (status) 356 device_printf(pf->dev, 357 "Shutdown LAN HMC failed with code %s\n", 358 i40e_stat_str(hw, status)); 359 } 360 /* 361 * Write PF ITR values to queue ITR registers. 362 */ 363 void 364 ixl_configure_itr(struct ixl_pf *pf) 365 { 366 ixl_configure_tx_itr(pf); 367 ixl_configure_rx_itr(pf); 368 } 369 370 /********************************************************************* 371 * 372 * Get the hardware capabilities 373 * 374 **********************************************************************/ 375 376 int 377 ixl_get_hw_capabilities(struct ixl_pf *pf) 378 { 379 struct i40e_aqc_list_capabilities_element_resp *buf; 380 struct i40e_hw *hw = &pf->hw; 381 device_t dev = pf->dev; 382 enum i40e_status_code status; 383 int len, i2c_intfc_num; 384 bool again = TRUE; 385 u16 needed; 386 387 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 388 hw->func_caps.iwarp = 0; 389 return (0); 390 } 391 392 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 393 retry: 394 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) 395 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) { 396 device_printf(dev, "Unable to allocate cap memory\n"); 397 return (ENOMEM); 398 } 399 400 /* This populates the hw struct */ 401 status = i40e_aq_discover_capabilities(hw, buf, len, 402 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 403 free(buf, M_IXL); 404 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && 405 (again == TRUE)) { 406 /* retry once with a larger buffer */ 407 again = FALSE; 408 len = needed; 409 goto retry; 410 } else if (status != I40E_SUCCESS) { 411 device_printf(dev, "capability discovery failed; status %s, error %s\n", 412 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 413 return (ENODEV); 414 } 415 416 /* 417 * Some devices have both MDIO and I2C; since this isn't reported 418 * by the FW, check registers to see if an I2C interface exists. 419 */ 420 i2c_intfc_num = ixl_find_i2c_interface(pf); 421 if (i2c_intfc_num != -1) 422 pf->has_i2c = true; 423 424 /* Determine functions to use for driver I2C accesses */ 425 switch (pf->i2c_access_method) { 426 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: { 427 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 428 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 429 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 430 } else { 431 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 432 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 433 } 434 break; 435 } 436 case IXL_I2C_ACCESS_METHOD_AQ: 437 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 438 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 439 break; 440 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD: 441 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 442 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 443 break; 444 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS: 445 pf->read_i2c_byte = ixl_read_i2c_byte_bb; 446 pf->write_i2c_byte = ixl_write_i2c_byte_bb; 447 break; 448 default: 449 /* Should not happen */ 450 device_printf(dev, "Error setting I2C access functions\n"); 451 break; 452 } 453 454 /* Keep link active by default */ 455 ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN); 456 457 /* Print a subset of the capability information. */ 458 device_printf(dev, 459 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n", 460 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, 461 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, 462 (hw->func_caps.mdio_port_mode == 2) ? "I2C" : 463 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : 464 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : 465 "MDIO shared"); 466 467 return (0); 468 } 469 470 /* For the set_advertise sysctl */ 471 void 472 ixl_set_initial_advertised_speeds(struct ixl_pf *pf) 473 { 474 device_t dev = pf->dev; 475 int err; 476 477 /* Make sure to initialize the device to the complete list of 478 * supported speeds on driver load, to ensure unloading and 479 * reloading the driver will restore this value. 480 */ 481 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); 482 if (err) { 483 /* Non-fatal error */ 484 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", 485 __func__, err); 486 return; 487 } 488 489 pf->advertised_speed = 490 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 491 } 492 493 int 494 ixl_teardown_hw_structs(struct ixl_pf *pf) 495 { 496 enum i40e_status_code status = 0; 497 struct i40e_hw *hw = &pf->hw; 498 device_t dev = pf->dev; 499 500 /* Shutdown LAN HMC */ 501 if (hw->hmc.hmc_obj) { 502 status = i40e_shutdown_lan_hmc(hw); 503 if (status) { 504 device_printf(dev, 505 "init: LAN HMC shutdown failure; status %s\n", 506 i40e_stat_str(hw, status)); 507 goto err_out; 508 } 509 } 510 511 /* Shutdown admin queue */ 512 ixl_disable_intr0(hw); 513 status = i40e_shutdown_adminq(hw); 514 if (status) 515 device_printf(dev, 516 "init: Admin Queue shutdown failure; status %s\n", 517 i40e_stat_str(hw, status)); 518 519 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); 520 err_out: 521 return (status); 522 } 523 524 /* 525 ** Creates new filter with given MAC address and VLAN ID 526 */ 527 static struct ixl_mac_filter * 528 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 529 { 530 struct ixl_mac_filter *f; 531 532 /* create a new empty filter */ 533 f = malloc(sizeof(struct ixl_mac_filter), 534 M_IXL, M_NOWAIT | M_ZERO); 535 if (f) { 536 LIST_INSERT_HEAD(headp, f, ftle); 537 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); 538 f->vlan = vlan; 539 } 540 541 return (f); 542 } 543 544 /** 545 * ixl_free_filters - Free all filters in given list 546 * headp - pointer to list head 547 * 548 * Frees memory used by each entry in the list. 549 * Does not remove filters from HW. 550 */ 551 void 552 ixl_free_filters(struct ixl_ftl_head *headp) 553 { 554 struct ixl_mac_filter *f, *nf; 555 556 f = LIST_FIRST(headp); 557 while (f != NULL) { 558 nf = LIST_NEXT(f, ftle); 559 free(f, M_IXL); 560 f = nf; 561 } 562 563 LIST_INIT(headp); 564 } 565 566 static u_int 567 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 568 { 569 struct ixl_add_maddr_arg *ama = arg; 570 struct ixl_vsi *vsi = ama->vsi; 571 const u8 *macaddr = (u8*)LLADDR(sdl); 572 struct ixl_mac_filter *f; 573 574 /* Does one already exist */ 575 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 576 if (f != NULL) 577 return (0); 578 579 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY); 580 if (f == NULL) { 581 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 582 return (0); 583 } 584 f->flags |= IXL_FILTER_MC; 585 586 return (1); 587 } 588 589 /********************************************************************* 590 * Filter Routines 591 * 592 * Routines for multicast and vlan filter management. 593 * 594 *********************************************************************/ 595 void 596 ixl_add_multi(struct ixl_vsi *vsi) 597 { 598 if_t ifp = vsi->ifp; 599 struct i40e_hw *hw = vsi->hw; 600 int mcnt = 0; 601 struct ixl_add_maddr_arg cb_arg; 602 603 IOCTL_DEBUGOUT("ixl_add_multi: begin"); 604 605 mcnt = if_llmaddr_count(ifp); 606 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { 607 i40e_aq_set_vsi_multicast_promiscuous(hw, 608 vsi->seid, TRUE, NULL); 609 /* delete all existing MC filters */ 610 ixl_del_multi(vsi, true); 611 return; 612 } 613 614 cb_arg.vsi = vsi; 615 LIST_INIT(&cb_arg.to_add); 616 617 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg); 618 if (mcnt > 0) 619 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt); 620 621 IOCTL_DEBUGOUT("ixl_add_multi: end"); 622 } 623 624 static u_int 625 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 626 { 627 struct ixl_mac_filter *f = arg; 628 629 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl))) 630 return (1); 631 else 632 return (0); 633 } 634 635 void 636 ixl_del_multi(struct ixl_vsi *vsi, bool all) 637 { 638 struct ixl_ftl_head to_del; 639 if_t ifp = vsi->ifp; 640 struct ixl_mac_filter *f, *fn; 641 int mcnt = 0; 642 643 IOCTL_DEBUGOUT("ixl_del_multi: begin"); 644 645 LIST_INIT(&to_del); 646 /* Search for removed multicast addresses */ 647 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) { 648 if ((f->flags & IXL_FILTER_MC) == 0 || 649 (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0))) 650 continue; 651 652 LIST_REMOVE(f, ftle); 653 LIST_INSERT_HEAD(&to_del, f, ftle); 654 mcnt++; 655 } 656 657 if (mcnt > 0) 658 ixl_del_hw_filters(vsi, &to_del, mcnt); 659 } 660 661 void 662 ixl_link_up_msg(struct ixl_pf *pf) 663 { 664 struct i40e_hw *hw = &pf->hw; 665 if_t ifp = pf->vsi.ifp; 666 char *req_fec_string, *neg_fec_string; 667 u8 fec_abilities; 668 669 fec_abilities = hw->phy.link_info.req_fec_info; 670 /* If both RS and KR are requested, only show RS */ 671 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) 672 req_fec_string = ixl_fec_string[0]; 673 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) 674 req_fec_string = ixl_fec_string[1]; 675 else 676 req_fec_string = ixl_fec_string[2]; 677 678 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) 679 neg_fec_string = ixl_fec_string[0]; 680 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) 681 neg_fec_string = ixl_fec_string[1]; 682 else 683 neg_fec_string = ixl_fec_string[2]; 684 685 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 686 if_name(ifp), 687 ixl_link_speed_string(hw->phy.link_info.link_speed), 688 req_fec_string, neg_fec_string, 689 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", 690 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && 691 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 692 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? 693 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 694 ixl_fc_string[1] : ixl_fc_string[0]); 695 } 696 697 /* 698 * Configure admin queue/misc interrupt cause registers in hardware. 699 */ 700 void 701 ixl_configure_intr0_msix(struct ixl_pf *pf) 702 { 703 struct i40e_hw *hw = &pf->hw; 704 u32 reg; 705 706 /* First set up the adminq - vector 0 */ 707 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 708 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 709 710 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 711 I40E_PFINT_ICR0_ENA_GRST_MASK | 712 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 713 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 714 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 715 I40E_PFINT_ICR0_ENA_VFLR_MASK | 716 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 717 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; 718 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 719 720 /* 721 * 0x7FF is the end of the queue list. 722 * This means we won't use MSI-X vector 0 for a queue interrupt 723 * in MSI-X mode. 724 */ 725 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 726 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ 727 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); 728 729 wr32(hw, I40E_PFINT_DYN_CTL0, 730 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 731 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 732 733 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 734 } 735 736 void 737 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types) 738 { 739 /* Display supported media types */ 740 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) 741 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL); 742 743 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) 744 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL); 745 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) 746 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL); 747 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) 748 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL); 749 750 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T)) 751 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL); 752 753 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T)) 754 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL); 755 756 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || 757 phy_types & (I40E_CAP_PHY_TYPE_XFI) || 758 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) 759 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 760 761 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) 762 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL); 763 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) 764 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL); 765 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) 766 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL); 767 768 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || 769 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || 770 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || 771 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || 772 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 773 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL); 774 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) 775 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL); 776 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) 777 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL); 778 779 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) 780 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL); 781 782 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) 783 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) 784 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL); 785 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) 786 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL); 787 if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) 788 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL); 789 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) 790 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 791 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) 792 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL); 793 794 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) 795 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL); 796 797 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 798 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL); 799 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) 800 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); 801 802 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) 803 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL); 804 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) 805 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL); 806 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) 807 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL); 808 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) 809 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL); 810 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) 811 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL); 812 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) 813 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL); 814 } 815 816 /********************************************************************* 817 * 818 * Get Firmware Switch configuration 819 * - this will need to be more robust when more complex 820 * switch configurations are enabled. 821 * 822 **********************************************************************/ 823 int 824 ixl_switch_config(struct ixl_pf *pf) 825 { 826 struct i40e_hw *hw = &pf->hw; 827 struct ixl_vsi *vsi = &pf->vsi; 828 device_t dev = iflib_get_dev(vsi->ctx); 829 struct i40e_aqc_get_switch_config_resp *sw_config; 830 u8 aq_buf[I40E_AQ_LARGE_BUF]; 831 int ret; 832 u16 next = 0; 833 834 memset(&aq_buf, 0, sizeof(aq_buf)); 835 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 836 ret = i40e_aq_get_switch_config(hw, sw_config, 837 sizeof(aq_buf), &next, NULL); 838 if (ret) { 839 device_printf(dev, "aq_get_switch_config() failed, error %d," 840 " aq_error %d\n", ret, pf->hw.aq.asq_last_status); 841 return (ret); 842 } 843 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { 844 device_printf(dev, 845 "Switch config: header reported: %d in structure, %d total\n", 846 LE16_TO_CPU(sw_config->header.num_reported), 847 LE16_TO_CPU(sw_config->header.num_total)); 848 for (int i = 0; 849 i < LE16_TO_CPU(sw_config->header.num_reported); i++) { 850 device_printf(dev, 851 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, 852 sw_config->element[i].element_type, 853 LE16_TO_CPU(sw_config->element[i].seid), 854 LE16_TO_CPU(sw_config->element[i].uplink_seid), 855 LE16_TO_CPU(sw_config->element[i].downlink_seid)); 856 } 857 } 858 /* Simplified due to a single VSI */ 859 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid); 860 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid); 861 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid); 862 return (ret); 863 } 864 865 void 866 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls) 867 { 868 struct sysctl_oid *tree; 869 struct sysctl_oid_list *child; 870 struct sysctl_oid_list *vsi_list; 871 872 tree = device_get_sysctl_tree(vsi->dev); 873 child = SYSCTL_CHILDREN(tree); 874 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name, 875 CTLFLAG_RD, NULL, "VSI Number"); 876 877 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 878 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats); 879 880 /* Copy of netstat RX errors counter for validation purposes */ 881 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors", 882 CTLFLAG_RD, &vsi->ierrors, 883 "RX packet errors"); 884 885 if (queues_sysctls) 886 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx); 887 } 888 889 /* 890 * Used to set the Tx ITR value for all of the PF LAN VSI's queues. 891 * Writes to the ITR registers immediately. 892 */ 893 static int 894 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) 895 { 896 struct ixl_pf *pf = (struct ixl_pf *)arg1; 897 device_t dev = pf->dev; 898 int error = 0; 899 int requested_tx_itr; 900 901 requested_tx_itr = pf->tx_itr; 902 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); 903 if ((error) || (req->newptr == NULL)) 904 return (error); 905 if (pf->dynamic_tx_itr) { 906 device_printf(dev, 907 "Cannot set TX itr value while dynamic TX itr is enabled\n"); 908 return (EINVAL); 909 } 910 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { 911 device_printf(dev, 912 "Invalid TX itr value; value must be between 0 and %d\n", 913 IXL_MAX_ITR); 914 return (EINVAL); 915 } 916 917 pf->tx_itr = requested_tx_itr; 918 ixl_configure_tx_itr(pf); 919 920 return (error); 921 } 922 923 /* 924 * Used to set the Rx ITR value for all of the PF LAN VSI's queues. 925 * Writes to the ITR registers immediately. 926 */ 927 static int 928 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) 929 { 930 struct ixl_pf *pf = (struct ixl_pf *)arg1; 931 device_t dev = pf->dev; 932 int error = 0; 933 int requested_rx_itr; 934 935 requested_rx_itr = pf->rx_itr; 936 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); 937 if ((error) || (req->newptr == NULL)) 938 return (error); 939 if (pf->dynamic_rx_itr) { 940 device_printf(dev, 941 "Cannot set RX itr value while dynamic RX itr is enabled\n"); 942 return (EINVAL); 943 } 944 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { 945 device_printf(dev, 946 "Invalid RX itr value; value must be between 0 and %d\n", 947 IXL_MAX_ITR); 948 return (EINVAL); 949 } 950 951 pf->rx_itr = requested_rx_itr; 952 ixl_configure_rx_itr(pf); 953 954 return (error); 955 } 956 957 void 958 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 959 struct sysctl_oid_list *child, 960 struct i40e_hw_port_stats *stats) 961 { 962 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 963 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics"); 964 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); 965 966 struct i40e_eth_stats *eth_stats = &stats->eth; 967 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); 968 969 struct ixl_sysctl_info ctls[] = 970 { 971 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 972 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 973 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 974 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 975 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, 976 /* Packet Reception Stats */ 977 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 978 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 979 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 980 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 981 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 982 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 983 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 984 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 985 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 986 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 987 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 988 {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, 989 /* Packet Transmission Stats */ 990 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 991 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 992 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 993 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 994 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 995 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 996 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 997 /* Flow control */ 998 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 999 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 1000 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 1001 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 1002 /* End */ 1003 {0,0,0} 1004 }; 1005 1006 struct ixl_sysctl_info *entry = ctls; 1007 while (entry->stat != 0) 1008 { 1009 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, 1010 CTLFLAG_RD, entry->stat, 1011 entry->description); 1012 entry++; 1013 } 1014 } 1015 1016 void 1017 ixl_set_rss_key(struct ixl_pf *pf) 1018 { 1019 struct i40e_hw *hw = &pf->hw; 1020 struct ixl_vsi *vsi = &pf->vsi; 1021 device_t dev = pf->dev; 1022 u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; 1023 enum i40e_status_code status; 1024 1025 #ifdef RSS 1026 /* Fetch the configured RSS key */ 1027 rss_getkey((uint8_t *) &rss_seed); 1028 #else 1029 ixl_get_default_rss_key(rss_seed); 1030 #endif 1031 /* Fill out hash function seed */ 1032 if (hw->mac.type == I40E_MAC_X722) { 1033 struct i40e_aqc_get_set_rss_key_data key_data; 1034 bcopy(rss_seed, &key_data, 52); 1035 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); 1036 if (status) 1037 device_printf(dev, 1038 "i40e_aq_set_rss_key status %s, error %s\n", 1039 i40e_stat_str(hw, status), 1040 i40e_aq_str(hw, hw->aq.asq_last_status)); 1041 } else { 1042 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) 1043 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); 1044 } 1045 } 1046 1047 /* 1048 * Configure enabled PCTYPES for RSS. 1049 */ 1050 void 1051 ixl_set_rss_pctypes(struct ixl_pf *pf) 1052 { 1053 struct i40e_hw *hw = &pf->hw; 1054 u64 set_hena = 0, hena; 1055 1056 #ifdef RSS 1057 u32 rss_hash_config; 1058 1059 rss_hash_config = rss_gethashconfig(); 1060 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 1061 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); 1062 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 1063 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 1064 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1065 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 1066 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1067 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); 1068 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1069 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); 1070 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1071 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 1072 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1073 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 1074 #else 1075 if (hw->mac.type == I40E_MAC_X722) 1076 set_hena = IXL_DEFAULT_RSS_HENA_X722; 1077 else 1078 set_hena = IXL_DEFAULT_RSS_HENA_XL710; 1079 #endif 1080 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 1081 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 1082 hena |= set_hena; 1083 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 1084 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 1085 1086 } 1087 1088 /* 1089 ** Setup the PF's RSS parameters. 1090 */ 1091 void 1092 ixl_config_rss(struct ixl_pf *pf) 1093 { 1094 ixl_set_rss_key(pf); 1095 ixl_set_rss_pctypes(pf); 1096 ixl_set_rss_hlut(pf); 1097 } 1098 1099 /* 1100 * In some firmware versions there is default MAC/VLAN filter 1101 * configured which interferes with filters managed by driver. 1102 * Make sure it's removed. 1103 */ 1104 void 1105 ixl_del_default_hw_filters(struct ixl_vsi *vsi) 1106 { 1107 struct i40e_aqc_remove_macvlan_element_data e; 1108 1109 bzero(&e, sizeof(e)); 1110 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1111 e.vlan_tag = 0; 1112 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1113 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1114 1115 bzero(&e, sizeof(e)); 1116 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1117 e.vlan_tag = 0; 1118 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1119 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1120 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1121 } 1122 1123 /* 1124 ** Initialize filter list and add filters that the hardware 1125 ** needs to know about. 1126 ** 1127 ** Requires VSI's seid to be set before calling. 1128 */ 1129 void 1130 ixl_init_filters(struct ixl_vsi *vsi) 1131 { 1132 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1133 1134 ixl_dbg_filter(pf, "%s: start\n", __func__); 1135 1136 /* Initialize mac filter list for VSI */ 1137 LIST_INIT(&vsi->ftl); 1138 vsi->num_hw_filters = 0; 1139 1140 /* Receive broadcast Ethernet frames */ 1141 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL); 1142 1143 if (IXL_VSI_IS_VF(vsi)) 1144 return; 1145 1146 ixl_del_default_hw_filters(vsi); 1147 1148 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); 1149 1150 /* 1151 * Prevent Tx flow control frames from being sent out by 1152 * non-firmware transmitters. 1153 * This affects every VSI in the PF. 1154 */ 1155 #ifndef IXL_DEBUG_FC 1156 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1157 #else 1158 if (pf->enable_tx_fc_filter) 1159 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1160 #endif 1161 } 1162 1163 void 1164 ixl_reconfigure_filters(struct ixl_vsi *vsi) 1165 { 1166 struct i40e_hw *hw = vsi->hw; 1167 struct ixl_ftl_head tmp; 1168 int cnt; 1169 1170 /* 1171 * The ixl_add_hw_filters function adds filters configured 1172 * in HW to a list in VSI. Move all filters to a temporary 1173 * list to avoid corrupting it by concatenating to itself. 1174 */ 1175 LIST_INIT(&tmp); 1176 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle); 1177 cnt = vsi->num_hw_filters; 1178 vsi->num_hw_filters = 0; 1179 1180 ixl_add_hw_filters(vsi, &tmp, cnt); 1181 1182 /* 1183 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp 1184 * will be NULL. Furthermore, the ftl of such vsi already contains 1185 * IXL_VLAN_ANY filter so we can skip that as well. 1186 */ 1187 if (hw == NULL) 1188 return; 1189 1190 /* Filter could be removed if MAC address was changed */ 1191 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 1192 1193 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0) 1194 return; 1195 /* 1196 * VLAN HW filtering is enabled, make sure that filters 1197 * for all registered VLAN tags are configured 1198 */ 1199 ixl_add_vlan_filters(vsi, hw->mac.addr); 1200 } 1201 1202 /* 1203 * This routine adds a MAC/VLAN filter to the software filter 1204 * list, then adds that new filter to the HW if it doesn't already 1205 * exist in the SW filter list. 1206 */ 1207 void 1208 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1209 { 1210 struct ixl_mac_filter *f, *tmp; 1211 struct ixl_pf *pf; 1212 device_t dev; 1213 struct ixl_ftl_head to_add; 1214 int to_add_cnt; 1215 1216 pf = vsi->back; 1217 dev = pf->dev; 1218 to_add_cnt = 1; 1219 1220 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n", 1221 MAC_FORMAT_ARGS(macaddr), vlan); 1222 1223 /* Does one already exist */ 1224 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1225 if (f != NULL) 1226 return; 1227 1228 LIST_INIT(&to_add); 1229 f = ixl_new_filter(&to_add, macaddr, vlan); 1230 if (f == NULL) { 1231 device_printf(dev, "WARNING: no filter available!!\n"); 1232 return; 1233 } 1234 if (f->vlan != IXL_VLAN_ANY) 1235 f->flags |= IXL_FILTER_VLAN; 1236 else 1237 vsi->num_macs++; 1238 1239 /* 1240 ** Is this the first vlan being registered, if so we 1241 ** need to remove the ANY filter that indicates we are 1242 ** not in a vlan, and replace that with a 0 filter. 1243 */ 1244 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { 1245 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 1246 if (tmp != NULL) { 1247 struct ixl_ftl_head to_del; 1248 1249 /* Prepare new filter first to avoid removing 1250 * VLAN_ANY filter if allocation fails */ 1251 f = ixl_new_filter(&to_add, macaddr, 0); 1252 if (f == NULL) { 1253 device_printf(dev, "WARNING: no filter available!!\n"); 1254 free(LIST_FIRST(&to_add), M_IXL); 1255 return; 1256 } 1257 to_add_cnt++; 1258 1259 LIST_REMOVE(tmp, ftle); 1260 LIST_INIT(&to_del); 1261 LIST_INSERT_HEAD(&to_del, tmp, ftle); 1262 ixl_del_hw_filters(vsi, &to_del, 1); 1263 } 1264 } 1265 1266 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1267 } 1268 1269 /** 1270 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs 1271 * @vsi: pointer to VSI 1272 * @macaddr: MAC address 1273 * 1274 * Adds MAC/VLAN filter for each VLAN configured on the interface 1275 * if there is enough HW filters. Otherwise adds a single filter 1276 * for all tagged and untagged frames to allow all configured VLANs 1277 * to recieve traffic. 1278 */ 1279 void 1280 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1281 { 1282 struct ixl_ftl_head to_add; 1283 struct ixl_mac_filter *f; 1284 int to_add_cnt = 0; 1285 int i, vlan = 0; 1286 1287 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) { 1288 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1289 return; 1290 } 1291 LIST_INIT(&to_add); 1292 1293 /* Add filter for untagged frames if it does not exist yet */ 1294 f = ixl_find_filter(&vsi->ftl, macaddr, 0); 1295 if (f == NULL) { 1296 f = ixl_new_filter(&to_add, macaddr, 0); 1297 if (f == NULL) { 1298 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1299 return; 1300 } 1301 to_add_cnt++; 1302 } 1303 1304 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) { 1305 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan); 1306 if (vlan == -1) 1307 break; 1308 1309 /* Does one already exist */ 1310 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1311 if (f != NULL) 1312 continue; 1313 1314 f = ixl_new_filter(&to_add, macaddr, vlan); 1315 if (f == NULL) { 1316 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1317 ixl_free_filters(&to_add); 1318 return; 1319 } 1320 to_add_cnt++; 1321 } 1322 1323 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1324 } 1325 1326 void 1327 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1328 { 1329 struct ixl_mac_filter *f, *tmp; 1330 struct ixl_ftl_head ftl_head; 1331 int to_del_cnt = 1; 1332 1333 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1334 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n", 1335 MAC_FORMAT_ARGS(macaddr), vlan); 1336 1337 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1338 if (f == NULL) 1339 return; 1340 1341 LIST_REMOVE(f, ftle); 1342 LIST_INIT(&ftl_head); 1343 LIST_INSERT_HEAD(&ftl_head, f, ftle); 1344 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0) 1345 vsi->num_macs--; 1346 1347 /* If this is not the last vlan just remove the filter */ 1348 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) { 1349 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1350 return; 1351 } 1352 1353 /* It's the last vlan, we need to switch back to a non-vlan filter */ 1354 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0); 1355 if (tmp != NULL) { 1356 LIST_REMOVE(tmp, ftle); 1357 LIST_INSERT_AFTER(f, tmp, ftle); 1358 to_del_cnt++; 1359 } 1360 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1361 1362 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1363 } 1364 1365 /** 1366 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC 1367 * @vsi: VSI which filters need to be removed 1368 * @macaddr: MAC address 1369 * 1370 * Remove all MAC/VLAN filters with a given MAC address. For multicast 1371 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY) 1372 * so skip them to speed up processing. Those filters should be removed 1373 * using ixl_del_filter function. 1374 */ 1375 void 1376 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1377 { 1378 struct ixl_mac_filter *f, *tmp; 1379 struct ixl_ftl_head to_del; 1380 int to_del_cnt = 0; 1381 1382 LIST_INIT(&to_del); 1383 1384 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) { 1385 if ((f->flags & IXL_FILTER_MC) != 0 || 1386 !ixl_ether_is_equal(f->macaddr, macaddr)) 1387 continue; 1388 1389 LIST_REMOVE(f, ftle); 1390 LIST_INSERT_HEAD(&to_del, f, ftle); 1391 to_del_cnt++; 1392 } 1393 1394 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1395 "%s: " MAC_FORMAT ", to_del_cnt: %d\n", 1396 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt); 1397 if (to_del_cnt > 0) 1398 ixl_del_hw_filters(vsi, &to_del, to_del_cnt); 1399 } 1400 1401 /* 1402 ** Find the filter with both matching mac addr and vlan id 1403 */ 1404 struct ixl_mac_filter * 1405 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 1406 { 1407 struct ixl_mac_filter *f; 1408 1409 LIST_FOREACH(f, headp, ftle) { 1410 if (ixl_ether_is_equal(f->macaddr, macaddr) && 1411 (f->vlan == vlan)) { 1412 return (f); 1413 } 1414 } 1415 1416 return (NULL); 1417 } 1418 1419 /* 1420 ** This routine takes additions to the vsi filter 1421 ** table and creates an Admin Queue call to create 1422 ** the filters in the hardware. 1423 */ 1424 void 1425 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt) 1426 { 1427 struct i40e_aqc_add_macvlan_element_data *a, *b; 1428 struct ixl_mac_filter *f, *fn; 1429 struct ixl_pf *pf; 1430 struct i40e_hw *hw; 1431 device_t dev; 1432 enum i40e_status_code status; 1433 int j = 0; 1434 1435 pf = vsi->back; 1436 dev = vsi->dev; 1437 hw = &pf->hw; 1438 1439 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt); 1440 1441 if (cnt < 1) { 1442 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n"); 1443 return; 1444 } 1445 1446 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, 1447 M_IXL, M_NOWAIT | M_ZERO); 1448 if (a == NULL) { 1449 device_printf(dev, "add_hw_filters failed to get memory\n"); 1450 return; 1451 } 1452 1453 LIST_FOREACH(f, to_add, ftle) { 1454 b = &a[j]; // a pox on fvl long names :) 1455 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); 1456 if (f->vlan == IXL_VLAN_ANY) { 1457 b->vlan_tag = 0; 1458 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; 1459 } else { 1460 b->vlan_tag = f->vlan; 1461 b->flags = 0; 1462 } 1463 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1464 /* Some FW versions do not set match method 1465 * when adding filters fails. Initialize it with 1466 * expected error value to allow detection which 1467 * filters were not added */ 1468 b->match_method = I40E_AQC_MM_ERR_NO_RES; 1469 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n", 1470 MAC_FORMAT_ARGS(f->macaddr)); 1471 1472 if (++j == cnt) 1473 break; 1474 } 1475 if (j != cnt) { 1476 /* Something went wrong */ 1477 device_printf(dev, 1478 "%s ERROR: list of filters to short expected: %d, found: %d\n", 1479 __func__, cnt, j); 1480 ixl_free_filters(to_add); 1481 goto out_free; 1482 } 1483 1484 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); 1485 if (status == I40E_SUCCESS) { 1486 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle); 1487 vsi->num_hw_filters += j; 1488 goto out_free; 1489 } 1490 1491 device_printf(dev, 1492 "i40e_aq_add_macvlan status %s, error %s\n", 1493 i40e_stat_str(hw, status), 1494 i40e_aq_str(hw, hw->aq.asq_last_status)); 1495 j = 0; 1496 1497 /* Verify which filters were actually configured in HW 1498 * and add them to the list */ 1499 LIST_FOREACH_SAFE(f, to_add, ftle, fn) { 1500 LIST_REMOVE(f, ftle); 1501 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) { 1502 ixl_dbg_filter(pf, 1503 "%s filter " MAC_FORMAT " VTAG: %d not added\n", 1504 __func__, 1505 MAC_FORMAT_ARGS(f->macaddr), 1506 f->vlan); 1507 free(f, M_IXL); 1508 } else { 1509 LIST_INSERT_HEAD(&vsi->ftl, f, ftle); 1510 vsi->num_hw_filters++; 1511 } 1512 j++; 1513 } 1514 1515 out_free: 1516 free(a, M_IXL); 1517 } 1518 1519 /* 1520 ** This routine takes removals in the vsi filter 1521 ** table and creates an Admin Queue call to delete 1522 ** the filters in the hardware. 1523 */ 1524 void 1525 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt) 1526 { 1527 struct i40e_aqc_remove_macvlan_element_data *d, *e; 1528 struct ixl_pf *pf; 1529 struct i40e_hw *hw; 1530 device_t dev; 1531 struct ixl_mac_filter *f, *f_temp; 1532 enum i40e_status_code status; 1533 int j = 0; 1534 1535 pf = vsi->back; 1536 hw = &pf->hw; 1537 dev = vsi->dev; 1538 1539 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt); 1540 1541 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, 1542 M_IXL, M_NOWAIT | M_ZERO); 1543 if (d == NULL) { 1544 device_printf(dev, "%s: failed to get memory\n", __func__); 1545 return; 1546 } 1547 1548 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) { 1549 e = &d[j]; // a pox on fvl long names :) 1550 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); 1551 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1552 if (f->vlan == IXL_VLAN_ANY) { 1553 e->vlan_tag = 0; 1554 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1555 } else { 1556 e->vlan_tag = f->vlan; 1557 } 1558 1559 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n", 1560 MAC_FORMAT_ARGS(f->macaddr)); 1561 1562 /* delete entry from the list */ 1563 LIST_REMOVE(f, ftle); 1564 free(f, M_IXL); 1565 if (++j == cnt) 1566 break; 1567 } 1568 if (j != cnt || !LIST_EMPTY(to_del)) { 1569 /* Something went wrong */ 1570 device_printf(dev, 1571 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n", 1572 __func__, cnt, j); 1573 ixl_free_filters(to_del); 1574 goto out_free; 1575 } 1576 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); 1577 if (status) { 1578 device_printf(dev, 1579 "%s: i40e_aq_remove_macvlan status %s, error %s\n", 1580 __func__, i40e_stat_str(hw, status), 1581 i40e_aq_str(hw, hw->aq.asq_last_status)); 1582 for (int i = 0; i < j; i++) { 1583 if (d[i].error_code == 0) 1584 continue; 1585 device_printf(dev, 1586 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n", 1587 __func__, MAC_FORMAT_ARGS(d[i].mac_addr), 1588 d[i].vlan_tag); 1589 } 1590 } 1591 1592 vsi->num_hw_filters -= j; 1593 1594 out_free: 1595 free(d, M_IXL); 1596 1597 ixl_dbg_filter(pf, "%s: end\n", __func__); 1598 } 1599 1600 int 1601 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1602 { 1603 struct i40e_hw *hw = &pf->hw; 1604 int error = 0; 1605 u32 reg; 1606 u16 pf_qidx; 1607 1608 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1609 1610 ixl_dbg(pf, IXL_DBG_EN_DIS, 1611 "Enabling PF TX ring %4d / VSI TX ring %4d...\n", 1612 pf_qidx, vsi_qidx); 1613 1614 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); 1615 1616 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1617 reg |= I40E_QTX_ENA_QENA_REQ_MASK | 1618 I40E_QTX_ENA_QENA_STAT_MASK; 1619 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1620 /* Verify the enable took */ 1621 for (int j = 0; j < 10; j++) { 1622 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1623 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 1624 break; 1625 i40e_usec_delay(10); 1626 } 1627 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 1628 device_printf(pf->dev, "TX queue %d still disabled!\n", 1629 pf_qidx); 1630 error = ETIMEDOUT; 1631 } 1632 1633 return (error); 1634 } 1635 1636 int 1637 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1638 { 1639 struct i40e_hw *hw = &pf->hw; 1640 int error = 0; 1641 u32 reg; 1642 u16 pf_qidx; 1643 1644 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1645 1646 ixl_dbg(pf, IXL_DBG_EN_DIS, 1647 "Enabling PF RX ring %4d / VSI RX ring %4d...\n", 1648 pf_qidx, vsi_qidx); 1649 1650 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1651 reg |= I40E_QRX_ENA_QENA_REQ_MASK | 1652 I40E_QRX_ENA_QENA_STAT_MASK; 1653 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1654 /* Verify the enable took */ 1655 for (int j = 0; j < 10; j++) { 1656 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1657 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 1658 break; 1659 i40e_usec_delay(10); 1660 } 1661 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 1662 device_printf(pf->dev, "RX queue %d still disabled!\n", 1663 pf_qidx); 1664 error = ETIMEDOUT; 1665 } 1666 1667 return (error); 1668 } 1669 1670 int 1671 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1672 { 1673 int error = 0; 1674 1675 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); 1676 /* Called function already prints error message */ 1677 if (error) 1678 return (error); 1679 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); 1680 return (error); 1681 } 1682 1683 /* 1684 * Returns error on first ring that is detected hung. 1685 */ 1686 int 1687 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1688 { 1689 struct i40e_hw *hw = &pf->hw; 1690 int error = 0; 1691 u32 reg; 1692 u16 pf_qidx; 1693 1694 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1695 1696 ixl_dbg(pf, IXL_DBG_EN_DIS, 1697 "Disabling PF TX ring %4d / VSI TX ring %4d...\n", 1698 pf_qidx, vsi_qidx); 1699 1700 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); 1701 i40e_usec_delay(500); 1702 1703 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1704 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 1705 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1706 /* Verify the disable took */ 1707 for (int j = 0; j < 10; j++) { 1708 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1709 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) 1710 break; 1711 i40e_msec_delay(10); 1712 } 1713 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { 1714 device_printf(pf->dev, "TX queue %d still enabled!\n", 1715 pf_qidx); 1716 error = ETIMEDOUT; 1717 } 1718 1719 return (error); 1720 } 1721 1722 /* 1723 * Returns error on first ring that is detected hung. 1724 */ 1725 int 1726 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1727 { 1728 struct i40e_hw *hw = &pf->hw; 1729 int error = 0; 1730 u32 reg; 1731 u16 pf_qidx; 1732 1733 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1734 1735 ixl_dbg(pf, IXL_DBG_EN_DIS, 1736 "Disabling PF RX ring %4d / VSI RX ring %4d...\n", 1737 pf_qidx, vsi_qidx); 1738 1739 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1740 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 1741 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1742 /* Verify the disable took */ 1743 for (int j = 0; j < 10; j++) { 1744 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1745 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) 1746 break; 1747 i40e_msec_delay(10); 1748 } 1749 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { 1750 device_printf(pf->dev, "RX queue %d still enabled!\n", 1751 pf_qidx); 1752 error = ETIMEDOUT; 1753 } 1754 1755 return (error); 1756 } 1757 1758 int 1759 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1760 { 1761 int error = 0; 1762 1763 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); 1764 /* Called function already prints error message */ 1765 if (error) 1766 return (error); 1767 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); 1768 return (error); 1769 } 1770 1771 static void 1772 ixl_handle_tx_mdd_event(struct ixl_pf *pf) 1773 { 1774 struct i40e_hw *hw = &pf->hw; 1775 device_t dev = pf->dev; 1776 struct ixl_vf *vf; 1777 bool mdd_detected = false; 1778 bool pf_mdd_detected = false; 1779 bool vf_mdd_detected = false; 1780 u16 vf_num, queue; 1781 u8 pf_num, event; 1782 u8 pf_mdet_num, vp_mdet_num; 1783 u32 reg; 1784 1785 /* find what triggered the MDD event */ 1786 reg = rd32(hw, I40E_GL_MDET_TX); 1787 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 1788 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 1789 I40E_GL_MDET_TX_PF_NUM_SHIFT; 1790 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 1791 I40E_GL_MDET_TX_VF_NUM_SHIFT; 1792 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 1793 I40E_GL_MDET_TX_EVENT_SHIFT; 1794 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 1795 I40E_GL_MDET_TX_QUEUE_SHIFT; 1796 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 1797 mdd_detected = true; 1798 } 1799 1800 if (!mdd_detected) 1801 return; 1802 1803 reg = rd32(hw, I40E_PF_MDET_TX); 1804 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 1805 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 1806 pf_mdet_num = hw->pf_id; 1807 pf_mdd_detected = true; 1808 } 1809 1810 /* Check if MDD was caused by a VF */ 1811 for (int i = 0; i < pf->num_vfs; i++) { 1812 vf = &(pf->vfs[i]); 1813 reg = rd32(hw, I40E_VP_MDET_TX(i)); 1814 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 1815 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 1816 vp_mdet_num = i; 1817 vf->num_mdd_events++; 1818 vf_mdd_detected = true; 1819 } 1820 } 1821 1822 /* Print out an error message */ 1823 if (vf_mdd_detected && pf_mdd_detected) 1824 device_printf(dev, 1825 "Malicious Driver Detection event %d" 1826 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n", 1827 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num); 1828 else if (vf_mdd_detected && !pf_mdd_detected) 1829 device_printf(dev, 1830 "Malicious Driver Detection event %d" 1831 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n", 1832 event, queue, pf_num, vf_num, vp_mdet_num); 1833 else if (!vf_mdd_detected && pf_mdd_detected) 1834 device_printf(dev, 1835 "Malicious Driver Detection event %d" 1836 " on TX queue %d, pf number %d (PF-%d)\n", 1837 event, queue, pf_num, pf_mdet_num); 1838 /* Theoretically shouldn't happen */ 1839 else 1840 device_printf(dev, 1841 "TX Malicious Driver Detection event (unknown)\n"); 1842 } 1843 1844 static void 1845 ixl_handle_rx_mdd_event(struct ixl_pf *pf) 1846 { 1847 struct i40e_hw *hw = &pf->hw; 1848 device_t dev = pf->dev; 1849 struct ixl_vf *vf; 1850 bool mdd_detected = false; 1851 bool pf_mdd_detected = false; 1852 bool vf_mdd_detected = false; 1853 u16 queue; 1854 u8 pf_num, event; 1855 u8 pf_mdet_num, vp_mdet_num; 1856 u32 reg; 1857 1858 /* 1859 * GL_MDET_RX doesn't contain VF number information, unlike 1860 * GL_MDET_TX. 1861 */ 1862 reg = rd32(hw, I40E_GL_MDET_RX); 1863 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 1864 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 1865 I40E_GL_MDET_RX_FUNCTION_SHIFT; 1866 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 1867 I40E_GL_MDET_RX_EVENT_SHIFT; 1868 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 1869 I40E_GL_MDET_RX_QUEUE_SHIFT; 1870 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 1871 mdd_detected = true; 1872 } 1873 1874 if (!mdd_detected) 1875 return; 1876 1877 reg = rd32(hw, I40E_PF_MDET_RX); 1878 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 1879 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 1880 pf_mdet_num = hw->pf_id; 1881 pf_mdd_detected = true; 1882 } 1883 1884 /* Check if MDD was caused by a VF */ 1885 for (int i = 0; i < pf->num_vfs; i++) { 1886 vf = &(pf->vfs[i]); 1887 reg = rd32(hw, I40E_VP_MDET_RX(i)); 1888 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 1889 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 1890 vp_mdet_num = i; 1891 vf->num_mdd_events++; 1892 vf_mdd_detected = true; 1893 } 1894 } 1895 1896 /* Print out an error message */ 1897 if (vf_mdd_detected && pf_mdd_detected) 1898 device_printf(dev, 1899 "Malicious Driver Detection event %d" 1900 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n", 1901 event, queue, pf_num, pf_mdet_num, vp_mdet_num); 1902 else if (vf_mdd_detected && !pf_mdd_detected) 1903 device_printf(dev, 1904 "Malicious Driver Detection event %d" 1905 " on RX queue %d, pf number %d, (VF-%d)\n", 1906 event, queue, pf_num, vp_mdet_num); 1907 else if (!vf_mdd_detected && pf_mdd_detected) 1908 device_printf(dev, 1909 "Malicious Driver Detection event %d" 1910 " on RX queue %d, pf number %d (PF-%d)\n", 1911 event, queue, pf_num, pf_mdet_num); 1912 /* Theoretically shouldn't happen */ 1913 else 1914 device_printf(dev, 1915 "RX Malicious Driver Detection event (unknown)\n"); 1916 } 1917 1918 /** 1919 * ixl_handle_mdd_event 1920 * 1921 * Called from interrupt handler to identify possibly malicious vfs 1922 * (But also detects events from the PF, as well) 1923 **/ 1924 void 1925 ixl_handle_mdd_event(struct ixl_pf *pf) 1926 { 1927 struct i40e_hw *hw = &pf->hw; 1928 u32 reg; 1929 1930 /* 1931 * Handle both TX/RX because it's possible they could 1932 * both trigger in the same interrupt. 1933 */ 1934 ixl_handle_tx_mdd_event(pf); 1935 ixl_handle_rx_mdd_event(pf); 1936 1937 ixl_clear_state(&pf->state, IXL_STATE_MDD_PENDING); 1938 1939 /* re-enable mdd interrupt cause */ 1940 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1941 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 1942 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1943 ixl_flush(hw); 1944 } 1945 1946 void 1947 ixl_enable_intr0(struct i40e_hw *hw) 1948 { 1949 u32 reg; 1950 1951 /* Use IXL_ITR_NONE so ITR isn't updated here */ 1952 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | 1953 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 1954 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 1955 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1956 } 1957 1958 void 1959 ixl_disable_intr0(struct i40e_hw *hw) 1960 { 1961 u32 reg; 1962 1963 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; 1964 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1965 ixl_flush(hw); 1966 } 1967 1968 void 1969 ixl_enable_queue(struct i40e_hw *hw, int id) 1970 { 1971 u32 reg; 1972 1973 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | 1974 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1975 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 1976 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1977 } 1978 1979 void 1980 ixl_disable_queue(struct i40e_hw *hw, int id) 1981 { 1982 u32 reg; 1983 1984 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1985 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1986 } 1987 1988 void 1989 ixl_handle_empr_reset(struct ixl_pf *pf) 1990 { 1991 struct ixl_vsi *vsi = &pf->vsi; 1992 bool is_up = !!(if_getdrvflags(vsi->ifp) & IFF_DRV_RUNNING); 1993 1994 ixl_prepare_for_reset(pf, is_up); 1995 /* 1996 * i40e_pf_reset checks the type of reset and acts 1997 * accordingly. If EMP or Core reset was performed 1998 * doing PF reset is not necessary and it sometimes 1999 * fails. 2000 */ 2001 ixl_pf_reset(pf); 2002 2003 if (!IXL_PF_IN_RECOVERY_MODE(pf) && 2004 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) { 2005 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE); 2006 device_printf(pf->dev, 2007 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 2008 pf->link_up = FALSE; 2009 ixl_update_link_status(pf); 2010 } 2011 2012 ixl_rebuild_hw_structs_after_reset(pf, is_up); 2013 2014 ixl_clear_state(&pf->state, IXL_STATE_RESETTING); 2015 } 2016 2017 void 2018 ixl_update_stats_counters(struct ixl_pf *pf) 2019 { 2020 struct i40e_hw *hw = &pf->hw; 2021 struct ixl_vsi *vsi = &pf->vsi; 2022 struct ixl_vf *vf; 2023 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx; 2024 2025 struct i40e_hw_port_stats *nsd = &pf->stats; 2026 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 2027 2028 /* Update hw stats */ 2029 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 2030 pf->stat_offsets_loaded, 2031 &osd->crc_errors, &nsd->crc_errors); 2032 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 2033 pf->stat_offsets_loaded, 2034 &osd->illegal_bytes, &nsd->illegal_bytes); 2035 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 2036 I40E_GLPRT_GORCL(hw->port), 2037 pf->stat_offsets_loaded, 2038 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 2039 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 2040 I40E_GLPRT_GOTCL(hw->port), 2041 pf->stat_offsets_loaded, 2042 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 2043 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 2044 pf->stat_offsets_loaded, 2045 &osd->eth.rx_discards, 2046 &nsd->eth.rx_discards); 2047 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 2048 I40E_GLPRT_UPRCL(hw->port), 2049 pf->stat_offsets_loaded, 2050 &osd->eth.rx_unicast, 2051 &nsd->eth.rx_unicast); 2052 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 2053 I40E_GLPRT_UPTCL(hw->port), 2054 pf->stat_offsets_loaded, 2055 &osd->eth.tx_unicast, 2056 &nsd->eth.tx_unicast); 2057 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 2058 I40E_GLPRT_MPRCL(hw->port), 2059 pf->stat_offsets_loaded, 2060 &osd->eth.rx_multicast, 2061 &nsd->eth.rx_multicast); 2062 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 2063 I40E_GLPRT_MPTCL(hw->port), 2064 pf->stat_offsets_loaded, 2065 &osd->eth.tx_multicast, 2066 &nsd->eth.tx_multicast); 2067 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 2068 I40E_GLPRT_BPRCL(hw->port), 2069 pf->stat_offsets_loaded, 2070 &osd->eth.rx_broadcast, 2071 &nsd->eth.rx_broadcast); 2072 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 2073 I40E_GLPRT_BPTCL(hw->port), 2074 pf->stat_offsets_loaded, 2075 &osd->eth.tx_broadcast, 2076 &nsd->eth.tx_broadcast); 2077 2078 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 2079 pf->stat_offsets_loaded, 2080 &osd->tx_dropped_link_down, 2081 &nsd->tx_dropped_link_down); 2082 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 2083 pf->stat_offsets_loaded, 2084 &osd->mac_local_faults, 2085 &nsd->mac_local_faults); 2086 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 2087 pf->stat_offsets_loaded, 2088 &osd->mac_remote_faults, 2089 &nsd->mac_remote_faults); 2090 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 2091 pf->stat_offsets_loaded, 2092 &osd->rx_length_errors, 2093 &nsd->rx_length_errors); 2094 2095 /* Flow control (LFC) stats */ 2096 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 2097 pf->stat_offsets_loaded, 2098 &osd->link_xon_rx, &nsd->link_xon_rx); 2099 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 2100 pf->stat_offsets_loaded, 2101 &osd->link_xon_tx, &nsd->link_xon_tx); 2102 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 2103 pf->stat_offsets_loaded, 2104 &osd->link_xoff_rx, &nsd->link_xoff_rx); 2105 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 2106 pf->stat_offsets_loaded, 2107 &osd->link_xoff_tx, &nsd->link_xoff_tx); 2108 2109 /* 2110 * For watchdog management we need to know if we have been paused 2111 * during the last interval, so capture that here. 2112 */ 2113 if (pf->stats.link_xoff_rx != prev_link_xoff_rx) 2114 vsi->shared->isc_pause_frames = 1; 2115 2116 /* Packet size stats rx */ 2117 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 2118 I40E_GLPRT_PRC64L(hw->port), 2119 pf->stat_offsets_loaded, 2120 &osd->rx_size_64, &nsd->rx_size_64); 2121 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 2122 I40E_GLPRT_PRC127L(hw->port), 2123 pf->stat_offsets_loaded, 2124 &osd->rx_size_127, &nsd->rx_size_127); 2125 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 2126 I40E_GLPRT_PRC255L(hw->port), 2127 pf->stat_offsets_loaded, 2128 &osd->rx_size_255, &nsd->rx_size_255); 2129 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 2130 I40E_GLPRT_PRC511L(hw->port), 2131 pf->stat_offsets_loaded, 2132 &osd->rx_size_511, &nsd->rx_size_511); 2133 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 2134 I40E_GLPRT_PRC1023L(hw->port), 2135 pf->stat_offsets_loaded, 2136 &osd->rx_size_1023, &nsd->rx_size_1023); 2137 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 2138 I40E_GLPRT_PRC1522L(hw->port), 2139 pf->stat_offsets_loaded, 2140 &osd->rx_size_1522, &nsd->rx_size_1522); 2141 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 2142 I40E_GLPRT_PRC9522L(hw->port), 2143 pf->stat_offsets_loaded, 2144 &osd->rx_size_big, &nsd->rx_size_big); 2145 2146 /* Packet size stats tx */ 2147 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 2148 I40E_GLPRT_PTC64L(hw->port), 2149 pf->stat_offsets_loaded, 2150 &osd->tx_size_64, &nsd->tx_size_64); 2151 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 2152 I40E_GLPRT_PTC127L(hw->port), 2153 pf->stat_offsets_loaded, 2154 &osd->tx_size_127, &nsd->tx_size_127); 2155 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 2156 I40E_GLPRT_PTC255L(hw->port), 2157 pf->stat_offsets_loaded, 2158 &osd->tx_size_255, &nsd->tx_size_255); 2159 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 2160 I40E_GLPRT_PTC511L(hw->port), 2161 pf->stat_offsets_loaded, 2162 &osd->tx_size_511, &nsd->tx_size_511); 2163 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 2164 I40E_GLPRT_PTC1023L(hw->port), 2165 pf->stat_offsets_loaded, 2166 &osd->tx_size_1023, &nsd->tx_size_1023); 2167 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 2168 I40E_GLPRT_PTC1522L(hw->port), 2169 pf->stat_offsets_loaded, 2170 &osd->tx_size_1522, &nsd->tx_size_1522); 2171 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 2172 I40E_GLPRT_PTC9522L(hw->port), 2173 pf->stat_offsets_loaded, 2174 &osd->tx_size_big, &nsd->tx_size_big); 2175 2176 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 2177 pf->stat_offsets_loaded, 2178 &osd->rx_undersize, &nsd->rx_undersize); 2179 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 2180 pf->stat_offsets_loaded, 2181 &osd->rx_fragments, &nsd->rx_fragments); 2182 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 2183 pf->stat_offsets_loaded, 2184 &osd->rx_oversize, &nsd->rx_oversize); 2185 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 2186 pf->stat_offsets_loaded, 2187 &osd->rx_jabber, &nsd->rx_jabber); 2188 /* EEE */ 2189 i40e_get_phy_lpi_status(hw, nsd); 2190 2191 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded, 2192 &osd->tx_lpi_count, &nsd->tx_lpi_count, 2193 &osd->rx_lpi_count, &nsd->rx_lpi_count); 2194 2195 pf->stat_offsets_loaded = true; 2196 /* End hw stats */ 2197 2198 /* Update vsi stats */ 2199 ixl_update_vsi_stats(vsi); 2200 2201 for (int i = 0; i < pf->num_vfs; i++) { 2202 vf = &pf->vfs[i]; 2203 if (vf->vf_flags & VF_FLAG_ENABLED) 2204 ixl_update_eth_stats(&pf->vfs[i].vsi); 2205 } 2206 } 2207 2208 /** 2209 * Update VSI-specific ethernet statistics counters. 2210 **/ 2211 void 2212 ixl_update_eth_stats(struct ixl_vsi *vsi) 2213 { 2214 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 2215 struct i40e_hw *hw = &pf->hw; 2216 struct i40e_eth_stats *es; 2217 struct i40e_eth_stats *oes; 2218 u16 stat_idx = vsi->info.stat_counter_idx; 2219 2220 es = &vsi->eth_stats; 2221 oes = &vsi->eth_stats_offsets; 2222 2223 /* Gather up the stats that the hw collects */ 2224 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 2225 vsi->stat_offsets_loaded, 2226 &oes->tx_errors, &es->tx_errors); 2227 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 2228 vsi->stat_offsets_loaded, 2229 &oes->rx_discards, &es->rx_discards); 2230 2231 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 2232 I40E_GLV_GORCL(stat_idx), 2233 vsi->stat_offsets_loaded, 2234 &oes->rx_bytes, &es->rx_bytes); 2235 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 2236 I40E_GLV_UPRCL(stat_idx), 2237 vsi->stat_offsets_loaded, 2238 &oes->rx_unicast, &es->rx_unicast); 2239 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 2240 I40E_GLV_MPRCL(stat_idx), 2241 vsi->stat_offsets_loaded, 2242 &oes->rx_multicast, &es->rx_multicast); 2243 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 2244 I40E_GLV_BPRCL(stat_idx), 2245 vsi->stat_offsets_loaded, 2246 &oes->rx_broadcast, &es->rx_broadcast); 2247 2248 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 2249 I40E_GLV_GOTCL(stat_idx), 2250 vsi->stat_offsets_loaded, 2251 &oes->tx_bytes, &es->tx_bytes); 2252 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 2253 I40E_GLV_UPTCL(stat_idx), 2254 vsi->stat_offsets_loaded, 2255 &oes->tx_unicast, &es->tx_unicast); 2256 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 2257 I40E_GLV_MPTCL(stat_idx), 2258 vsi->stat_offsets_loaded, 2259 &oes->tx_multicast, &es->tx_multicast); 2260 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 2261 I40E_GLV_BPTCL(stat_idx), 2262 vsi->stat_offsets_loaded, 2263 &oes->tx_broadcast, &es->tx_broadcast); 2264 vsi->stat_offsets_loaded = true; 2265 } 2266 2267 void 2268 ixl_update_vsi_stats(struct ixl_vsi *vsi) 2269 { 2270 struct ixl_pf *pf; 2271 struct i40e_eth_stats *es; 2272 u64 tx_discards, csum_errs; 2273 2274 struct i40e_hw_port_stats *nsd; 2275 2276 pf = vsi->back; 2277 es = &vsi->eth_stats; 2278 nsd = &pf->stats; 2279 2280 ixl_update_eth_stats(vsi); 2281 2282 tx_discards = es->tx_discards + nsd->tx_dropped_link_down; 2283 2284 csum_errs = 0; 2285 for (int i = 0; i < vsi->num_rx_queues; i++) 2286 csum_errs += vsi->rx_queues[i].rxr.csum_errs; 2287 nsd->checksum_error = csum_errs; 2288 2289 /* Update ifnet stats */ 2290 IXL_SET_IPACKETS(vsi, es->rx_unicast + 2291 es->rx_multicast + 2292 es->rx_broadcast); 2293 IXL_SET_OPACKETS(vsi, es->tx_unicast + 2294 es->tx_multicast + 2295 es->tx_broadcast); 2296 IXL_SET_IBYTES(vsi, es->rx_bytes); 2297 IXL_SET_OBYTES(vsi, es->tx_bytes); 2298 IXL_SET_IMCASTS(vsi, es->rx_multicast); 2299 IXL_SET_OMCASTS(vsi, es->tx_multicast); 2300 2301 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + 2302 nsd->checksum_error + nsd->rx_length_errors + 2303 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize + 2304 nsd->rx_jabber); 2305 IXL_SET_OERRORS(vsi, es->tx_errors); 2306 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); 2307 IXL_SET_OQDROPS(vsi, tx_discards); 2308 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); 2309 IXL_SET_COLLISIONS(vsi, 0); 2310 } 2311 2312 /** 2313 * Reset all of the stats for the given pf 2314 **/ 2315 void 2316 ixl_pf_reset_stats(struct ixl_pf *pf) 2317 { 2318 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); 2319 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); 2320 pf->stat_offsets_loaded = false; 2321 } 2322 2323 /** 2324 * Resets all stats of the given vsi 2325 **/ 2326 void 2327 ixl_vsi_reset_stats(struct ixl_vsi *vsi) 2328 { 2329 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); 2330 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); 2331 vsi->stat_offsets_loaded = false; 2332 } 2333 2334 /** 2335 * Read and update a 48 bit stat from the hw 2336 * 2337 * Since the device stats are not reset at PFReset, they likely will not 2338 * be zeroed when the driver starts. We'll save the first values read 2339 * and use them as offsets to be subtracted from the raw values in order 2340 * to report stats that count from zero. 2341 **/ 2342 void 2343 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 2344 bool offset_loaded, u64 *offset, u64 *stat) 2345 { 2346 u64 new_data; 2347 2348 new_data = rd64(hw, loreg); 2349 2350 if (!offset_loaded) 2351 *offset = new_data; 2352 if (new_data >= *offset) 2353 *stat = new_data - *offset; 2354 else 2355 *stat = (new_data + ((u64)1 << 48)) - *offset; 2356 *stat &= 0xFFFFFFFFFFFFULL; 2357 } 2358 2359 /** 2360 * Read and update a 32 bit stat from the hw 2361 **/ 2362 void 2363 ixl_stat_update32(struct i40e_hw *hw, u32 reg, 2364 bool offset_loaded, u64 *offset, u64 *stat) 2365 { 2366 u32 new_data; 2367 2368 new_data = rd32(hw, reg); 2369 if (!offset_loaded) 2370 *offset = new_data; 2371 if (new_data >= *offset) 2372 *stat = (u32)(new_data - *offset); 2373 else 2374 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 2375 } 2376 2377 /** 2378 * Add subset of device sysctls safe to use in recovery mode 2379 */ 2380 void 2381 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf) 2382 { 2383 device_t dev = pf->dev; 2384 2385 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2386 struct sysctl_oid_list *ctx_list = 2387 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2388 2389 struct sysctl_oid *debug_node; 2390 struct sysctl_oid_list *debug_list; 2391 2392 SYSCTL_ADD_PROC(ctx, ctx_list, 2393 OID_AUTO, "fw_version", 2394 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2395 ixl_sysctl_show_fw, "A", "Firmware version"); 2396 2397 /* Add sysctls meant to print debug information, but don't list them 2398 * in "sysctl -a" output. */ 2399 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2400 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2401 "Debug Sysctls"); 2402 debug_list = SYSCTL_CHILDREN(debug_node); 2403 2404 SYSCTL_ADD_UINT(ctx, debug_list, 2405 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2406 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2407 2408 SYSCTL_ADD_UINT(ctx, debug_list, 2409 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2410 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2411 2412 SYSCTL_ADD_PROC(ctx, debug_list, 2413 OID_AUTO, "dump_debug_data", 2414 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2415 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2416 2417 SYSCTL_ADD_PROC(ctx, debug_list, 2418 OID_AUTO, "do_pf_reset", 2419 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2420 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2421 2422 SYSCTL_ADD_PROC(ctx, debug_list, 2423 OID_AUTO, "do_core_reset", 2424 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2425 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2426 2427 SYSCTL_ADD_PROC(ctx, debug_list, 2428 OID_AUTO, "do_global_reset", 2429 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2430 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2431 2432 SYSCTL_ADD_PROC(ctx, debug_list, 2433 OID_AUTO, "queue_interrupt_table", 2434 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2435 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2436 } 2437 2438 void 2439 ixl_add_device_sysctls(struct ixl_pf *pf) 2440 { 2441 device_t dev = pf->dev; 2442 struct i40e_hw *hw = &pf->hw; 2443 2444 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2445 struct sysctl_oid_list *ctx_list = 2446 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2447 2448 struct sysctl_oid *debug_node; 2449 struct sysctl_oid_list *debug_list; 2450 2451 struct sysctl_oid *fec_node; 2452 struct sysctl_oid_list *fec_list; 2453 struct sysctl_oid *eee_node; 2454 struct sysctl_oid_list *eee_list; 2455 2456 /* Set up sysctls */ 2457 SYSCTL_ADD_PROC(ctx, ctx_list, 2458 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2459 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); 2460 2461 SYSCTL_ADD_PROC(ctx, ctx_list, 2462 OID_AUTO, "advertise_speed", 2463 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2464 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); 2465 2466 SYSCTL_ADD_PROC(ctx, ctx_list, 2467 OID_AUTO, "supported_speeds", 2468 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2469 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); 2470 2471 SYSCTL_ADD_PROC(ctx, ctx_list, 2472 OID_AUTO, "current_speed", 2473 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2474 ixl_sysctl_current_speed, "A", "Current Port Speed"); 2475 2476 SYSCTL_ADD_PROC(ctx, ctx_list, 2477 OID_AUTO, "fw_version", 2478 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2479 ixl_sysctl_show_fw, "A", "Firmware version"); 2480 2481 SYSCTL_ADD_PROC(ctx, ctx_list, 2482 OID_AUTO, "unallocated_queues", 2483 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2484 ixl_sysctl_unallocated_queues, "I", 2485 "Queues not allocated to a PF or VF"); 2486 2487 SYSCTL_ADD_PROC(ctx, ctx_list, 2488 OID_AUTO, "tx_itr", 2489 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2490 ixl_sysctl_pf_tx_itr, "I", 2491 "Immediately set TX ITR value for all queues"); 2492 2493 SYSCTL_ADD_PROC(ctx, ctx_list, 2494 OID_AUTO, "rx_itr", 2495 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2496 ixl_sysctl_pf_rx_itr, "I", 2497 "Immediately set RX ITR value for all queues"); 2498 2499 SYSCTL_ADD_INT(ctx, ctx_list, 2500 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, 2501 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); 2502 2503 SYSCTL_ADD_INT(ctx, ctx_list, 2504 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, 2505 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); 2506 2507 /* Add FEC sysctls for 25G adapters */ 2508 if (i40e_is_25G_device(hw->device_id)) { 2509 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2510 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2511 "FEC Sysctls"); 2512 fec_list = SYSCTL_CHILDREN(fec_node); 2513 2514 SYSCTL_ADD_PROC(ctx, fec_list, 2515 OID_AUTO, "fc_ability", 2516 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2517 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); 2518 2519 SYSCTL_ADD_PROC(ctx, fec_list, 2520 OID_AUTO, "rs_ability", 2521 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2522 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); 2523 2524 SYSCTL_ADD_PROC(ctx, fec_list, 2525 OID_AUTO, "fc_requested", 2526 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2527 ixl_sysctl_fec_fc_request, "I", 2528 "FC FEC mode requested on link"); 2529 2530 SYSCTL_ADD_PROC(ctx, fec_list, 2531 OID_AUTO, "rs_requested", 2532 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2533 ixl_sysctl_fec_rs_request, "I", 2534 "RS FEC mode requested on link"); 2535 2536 SYSCTL_ADD_PROC(ctx, fec_list, 2537 OID_AUTO, "auto_fec_enabled", 2538 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2539 ixl_sysctl_fec_auto_enable, "I", 2540 "Let FW decide FEC ability/request modes"); 2541 } 2542 2543 SYSCTL_ADD_PROC(ctx, ctx_list, 2544 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2545 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); 2546 2547 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2548 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2549 "Energy Efficient Ethernet (EEE) Sysctls"); 2550 eee_list = SYSCTL_CHILDREN(eee_node); 2551 2552 SYSCTL_ADD_PROC(ctx, eee_list, 2553 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 2554 pf, 0, ixl_sysctl_eee_enable, "I", 2555 "Enable Energy Efficient Ethernet (EEE)"); 2556 2557 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status", 2558 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0, 2559 "TX LPI status"); 2560 2561 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status", 2562 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0, 2563 "RX LPI status"); 2564 2565 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count", 2566 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count, 2567 "TX LPI count"); 2568 2569 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count", 2570 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count, 2571 "RX LPI count"); 2572 2573 SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, 2574 "link_active_on_if_down", 2575 CTLTYPE_INT | CTLFLAG_RWTUN, 2576 pf, 0, ixl_sysctl_set_link_active, "I", 2577 IXL_SYSCTL_HELP_SET_LINK_ACTIVE); 2578 2579 /* Add sysctls meant to print debug information, but don't list them 2580 * in "sysctl -a" output. */ 2581 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2582 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2583 "Debug Sysctls"); 2584 debug_list = SYSCTL_CHILDREN(debug_node); 2585 2586 SYSCTL_ADD_UINT(ctx, debug_list, 2587 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2588 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2589 2590 SYSCTL_ADD_UINT(ctx, debug_list, 2591 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2592 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2593 2594 SYSCTL_ADD_PROC(ctx, debug_list, 2595 OID_AUTO, "link_status", 2596 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2597 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); 2598 2599 SYSCTL_ADD_PROC(ctx, debug_list, 2600 OID_AUTO, "phy_abilities_init", 2601 CTLTYPE_STRING | CTLFLAG_RD, 2602 pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities"); 2603 2604 SYSCTL_ADD_PROC(ctx, debug_list, 2605 OID_AUTO, "phy_abilities", 2606 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2607 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); 2608 2609 SYSCTL_ADD_PROC(ctx, debug_list, 2610 OID_AUTO, "filter_list", 2611 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2612 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); 2613 2614 SYSCTL_ADD_PROC(ctx, debug_list, 2615 OID_AUTO, "hw_res_alloc", 2616 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2617 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); 2618 2619 SYSCTL_ADD_PROC(ctx, debug_list, 2620 OID_AUTO, "switch_config", 2621 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2622 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); 2623 2624 SYSCTL_ADD_PROC(ctx, debug_list, 2625 OID_AUTO, "switch_vlans", 2626 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2627 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration"); 2628 2629 SYSCTL_ADD_PROC(ctx, debug_list, 2630 OID_AUTO, "rss_key", 2631 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2632 pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); 2633 2634 SYSCTL_ADD_PROC(ctx, debug_list, 2635 OID_AUTO, "rss_lut", 2636 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2637 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); 2638 2639 SYSCTL_ADD_PROC(ctx, debug_list, 2640 OID_AUTO, "rss_hena", 2641 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2642 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); 2643 2644 SYSCTL_ADD_PROC(ctx, debug_list, 2645 OID_AUTO, "disable_fw_link_management", 2646 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2647 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); 2648 2649 SYSCTL_ADD_PROC(ctx, debug_list, 2650 OID_AUTO, "dump_debug_data", 2651 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2652 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2653 2654 SYSCTL_ADD_PROC(ctx, debug_list, 2655 OID_AUTO, "do_pf_reset", 2656 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2657 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2658 2659 SYSCTL_ADD_PROC(ctx, debug_list, 2660 OID_AUTO, "do_core_reset", 2661 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2662 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2663 2664 SYSCTL_ADD_PROC(ctx, debug_list, 2665 OID_AUTO, "do_global_reset", 2666 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2667 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2668 2669 SYSCTL_ADD_PROC(ctx, debug_list, 2670 OID_AUTO, "queue_interrupt_table", 2671 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2672 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2673 2674 SYSCTL_ADD_PROC(ctx, debug_list, 2675 OID_AUTO, "phy_statistics", CTLTYPE_STRING | CTLFLAG_RD, 2676 pf, 0, ixl_sysctl_phy_statistics, "A", "PHY Statistics"); 2677 2678 if (pf->has_i2c) { 2679 SYSCTL_ADD_PROC(ctx, debug_list, 2680 OID_AUTO, "read_i2c_byte", 2681 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2682 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C); 2683 2684 SYSCTL_ADD_PROC(ctx, debug_list, 2685 OID_AUTO, "write_i2c_byte", 2686 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2687 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C); 2688 2689 SYSCTL_ADD_PROC(ctx, debug_list, 2690 OID_AUTO, "read_i2c_diag_data", 2691 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2692 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); 2693 } 2694 } 2695 2696 /* 2697 * Primarily for finding out how many queues can be assigned to VFs, 2698 * at runtime. 2699 */ 2700 static int 2701 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) 2702 { 2703 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2704 int queues; 2705 2706 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); 2707 2708 return sysctl_handle_int(oidp, NULL, queues, req); 2709 } 2710 2711 static const char * 2712 ixl_link_speed_string(enum i40e_aq_link_speed link_speed) 2713 { 2714 const char * link_speed_str[] = { 2715 "Unknown", 2716 "100 Mbps", 2717 "1 Gbps", 2718 "10 Gbps", 2719 "40 Gbps", 2720 "20 Gbps", 2721 "25 Gbps", 2722 "2.5 Gbps", 2723 "5 Gbps" 2724 }; 2725 int index; 2726 2727 switch (link_speed) { 2728 case I40E_LINK_SPEED_100MB: 2729 index = 1; 2730 break; 2731 case I40E_LINK_SPEED_1GB: 2732 index = 2; 2733 break; 2734 case I40E_LINK_SPEED_10GB: 2735 index = 3; 2736 break; 2737 case I40E_LINK_SPEED_40GB: 2738 index = 4; 2739 break; 2740 case I40E_LINK_SPEED_20GB: 2741 index = 5; 2742 break; 2743 case I40E_LINK_SPEED_25GB: 2744 index = 6; 2745 break; 2746 case I40E_LINK_SPEED_2_5GB: 2747 index = 7; 2748 break; 2749 case I40E_LINK_SPEED_5GB: 2750 index = 8; 2751 break; 2752 case I40E_LINK_SPEED_UNKNOWN: 2753 default: 2754 index = 0; 2755 break; 2756 } 2757 2758 return (link_speed_str[index]); 2759 } 2760 2761 int 2762 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2763 { 2764 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2765 struct i40e_hw *hw = &pf->hw; 2766 int error = 0; 2767 2768 ixl_update_link_status(pf); 2769 2770 error = sysctl_handle_string(oidp, 2771 __DECONST(void *, 2772 ixl_link_speed_string(hw->phy.link_info.link_speed)), 2773 8, req); 2774 2775 return (error); 2776 } 2777 2778 /* 2779 * Converts 8-bit speeds value to and from sysctl flags and 2780 * Admin Queue flags. 2781 */ 2782 static u8 2783 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) 2784 { 2785 #define SPEED_MAP_SIZE 8 2786 static u16 speedmap[SPEED_MAP_SIZE] = { 2787 (I40E_LINK_SPEED_100MB | (0x1 << 8)), 2788 (I40E_LINK_SPEED_1GB | (0x2 << 8)), 2789 (I40E_LINK_SPEED_10GB | (0x4 << 8)), 2790 (I40E_LINK_SPEED_20GB | (0x8 << 8)), 2791 (I40E_LINK_SPEED_25GB | (0x10 << 8)), 2792 (I40E_LINK_SPEED_40GB | (0x20 << 8)), 2793 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)), 2794 (I40E_LINK_SPEED_5GB | (0x80 << 8)), 2795 }; 2796 u8 retval = 0; 2797 2798 for (int i = 0; i < SPEED_MAP_SIZE; i++) { 2799 if (to_aq) 2800 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; 2801 else 2802 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; 2803 } 2804 2805 return (retval); 2806 } 2807 2808 int 2809 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) 2810 { 2811 struct i40e_hw *hw = &pf->hw; 2812 device_t dev = pf->dev; 2813 struct i40e_aq_get_phy_abilities_resp abilities; 2814 struct i40e_aq_set_phy_config config; 2815 enum i40e_status_code aq_error = 0; 2816 2817 /* Get current capability information */ 2818 aq_error = i40e_aq_get_phy_capabilities(hw, 2819 FALSE, FALSE, &abilities, NULL); 2820 if (aq_error) { 2821 device_printf(dev, 2822 "%s: Error getting phy capabilities %d," 2823 " aq error: %d\n", __func__, aq_error, 2824 hw->aq.asq_last_status); 2825 return (EIO); 2826 } 2827 2828 /* Prepare new config */ 2829 bzero(&config, sizeof(config)); 2830 if (from_aq) 2831 config.link_speed = speeds; 2832 else 2833 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); 2834 config.phy_type = abilities.phy_type; 2835 config.phy_type_ext = abilities.phy_type_ext; 2836 config.abilities = abilities.abilities 2837 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 2838 config.eee_capability = abilities.eee_capability; 2839 config.eeer = abilities.eeer_val; 2840 config.low_power_ctrl = abilities.d3_lpan; 2841 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 2842 & I40E_AQ_PHY_FEC_CONFIG_MASK; 2843 2844 /* Do aq command & restart link */ 2845 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 2846 if (aq_error) { 2847 device_printf(dev, 2848 "%s: Error setting new phy config %d," 2849 " aq error: %d\n", __func__, aq_error, 2850 hw->aq.asq_last_status); 2851 return (EIO); 2852 } 2853 2854 return (0); 2855 } 2856 2857 /* 2858 ** Supported link speeds 2859 ** Flags: 2860 ** 0x1 - 100 Mb 2861 ** 0x2 - 1G 2862 ** 0x4 - 10G 2863 ** 0x8 - 20G 2864 ** 0x10 - 25G 2865 ** 0x20 - 40G 2866 ** 0x40 - 2.5G 2867 ** 0x80 - 5G 2868 */ 2869 static int 2870 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) 2871 { 2872 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2873 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 2874 2875 return sysctl_handle_int(oidp, NULL, supported, req); 2876 } 2877 2878 /* 2879 ** Control link advertise speed: 2880 ** Flags: 2881 ** 0x1 - advertise 100 Mb 2882 ** 0x2 - advertise 1G 2883 ** 0x4 - advertise 10G 2884 ** 0x8 - advertise 20G 2885 ** 0x10 - advertise 25G 2886 ** 0x20 - advertise 40G 2887 ** 0x40 - advertise 2.5G 2888 ** 0x80 - advertise 5G 2889 ** 2890 ** Set to 0 to disable link 2891 */ 2892 int 2893 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) 2894 { 2895 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2896 device_t dev = pf->dev; 2897 u8 converted_speeds; 2898 int requested_ls = 0; 2899 int error = 0; 2900 2901 /* Read in new mode */ 2902 requested_ls = pf->advertised_speed; 2903 error = sysctl_handle_int(oidp, &requested_ls, 0, req); 2904 if ((error) || (req->newptr == NULL)) 2905 return (error); 2906 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 2907 device_printf(dev, "Interface is currently in FW recovery mode. " 2908 "Setting advertise speed not supported\n"); 2909 return (EINVAL); 2910 } 2911 2912 /* Error out if bits outside of possible flag range are set */ 2913 if ((requested_ls & ~((u8)0xFF)) != 0) { 2914 device_printf(dev, "Input advertised speed out of range; " 2915 "valid flags are: 0x%02x\n", 2916 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2917 return (EINVAL); 2918 } 2919 2920 /* Check if adapter supports input value */ 2921 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); 2922 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { 2923 device_printf(dev, "Invalid advertised speed; " 2924 "valid flags are: 0x%02x\n", 2925 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2926 return (EINVAL); 2927 } 2928 2929 error = ixl_set_advertised_speeds(pf, requested_ls, false); 2930 if (error) 2931 return (error); 2932 2933 pf->advertised_speed = requested_ls; 2934 ixl_update_link_status(pf); 2935 return (0); 2936 } 2937 2938 /* 2939 * Input: bitmap of enum i40e_aq_link_speed 2940 */ 2941 u64 2942 ixl_max_aq_speed_to_value(u8 link_speeds) 2943 { 2944 if (link_speeds & I40E_LINK_SPEED_40GB) 2945 return IF_Gbps(40); 2946 if (link_speeds & I40E_LINK_SPEED_25GB) 2947 return IF_Gbps(25); 2948 if (link_speeds & I40E_LINK_SPEED_20GB) 2949 return IF_Gbps(20); 2950 if (link_speeds & I40E_LINK_SPEED_10GB) 2951 return IF_Gbps(10); 2952 if (link_speeds & I40E_LINK_SPEED_5GB) 2953 return IF_Gbps(5); 2954 if (link_speeds & I40E_LINK_SPEED_2_5GB) 2955 return IF_Mbps(2500); 2956 if (link_speeds & I40E_LINK_SPEED_1GB) 2957 return IF_Gbps(1); 2958 if (link_speeds & I40E_LINK_SPEED_100MB) 2959 return IF_Mbps(100); 2960 else 2961 /* Minimum supported link speed */ 2962 return IF_Mbps(100); 2963 } 2964 2965 /* 2966 ** Get the width and transaction speed of 2967 ** the bus this adapter is plugged into. 2968 */ 2969 void 2970 ixl_get_bus_info(struct ixl_pf *pf) 2971 { 2972 struct i40e_hw *hw = &pf->hw; 2973 device_t dev = pf->dev; 2974 u16 link; 2975 u32 offset, num_ports; 2976 u64 max_speed; 2977 2978 /* Some devices don't use PCIE */ 2979 if (hw->mac.type == I40E_MAC_X722) 2980 return; 2981 2982 /* Read PCI Express Capabilities Link Status Register */ 2983 pci_find_cap(dev, PCIY_EXPRESS, &offset); 2984 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2985 2986 /* Fill out hw struct with PCIE info */ 2987 i40e_set_pci_config_data(hw, link); 2988 2989 /* Use info to print out bandwidth messages */ 2990 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 2991 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": 2992 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": 2993 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), 2994 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : 2995 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : 2996 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : 2997 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : 2998 ("Unknown")); 2999 3000 /* 3001 * If adapter is in slot with maximum supported speed, 3002 * no warning message needs to be printed out. 3003 */ 3004 if (hw->bus.speed >= i40e_bus_speed_8000 3005 && hw->bus.width >= i40e_bus_width_pcie_x8) 3006 return; 3007 3008 num_ports = bitcount32(hw->func_caps.valid_functions); 3009 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; 3010 3011 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { 3012 device_printf(dev, "PCI-Express bandwidth available" 3013 " for this device may be insufficient for" 3014 " optimal performance.\n"); 3015 device_printf(dev, "Please move the device to a different" 3016 " PCI-e link with more lanes and/or higher" 3017 " transfer rate.\n"); 3018 } 3019 } 3020 3021 static int 3022 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 3023 { 3024 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3025 struct i40e_hw *hw = &pf->hw; 3026 struct sbuf *sbuf; 3027 3028 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3029 ixl_nvm_version_str(hw, sbuf); 3030 sbuf_finish(sbuf); 3031 sbuf_delete(sbuf); 3032 3033 return (0); 3034 } 3035 3036 void 3037 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) 3038 { 3039 u8 nvma_ptr = nvma->config & 0xFF; 3040 u8 nvma_flags = (nvma->config & 0xF00) >> 8; 3041 const char * cmd_str; 3042 3043 switch (nvma->command) { 3044 case I40E_NVM_READ: 3045 if (nvma_ptr == 0xF && nvma_flags == 0xF && 3046 nvma->offset == 0 && nvma->data_size == 1) { 3047 device_printf(dev, "NVMUPD: Get Driver Status Command\n"); 3048 return; 3049 } 3050 cmd_str = "READ "; 3051 break; 3052 case I40E_NVM_WRITE: 3053 cmd_str = "WRITE"; 3054 break; 3055 default: 3056 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command); 3057 return; 3058 } 3059 device_printf(dev, 3060 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n", 3061 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size); 3062 } 3063 3064 int 3065 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) 3066 { 3067 struct i40e_hw *hw = &pf->hw; 3068 struct i40e_nvm_access *nvma; 3069 device_t dev = pf->dev; 3070 enum i40e_status_code status = 0; 3071 size_t nvma_size, ifd_len, exp_len; 3072 int err, perrno; 3073 3074 DEBUGFUNC("ixl_handle_nvmupd_cmd"); 3075 3076 /* Sanity checks */ 3077 nvma_size = sizeof(struct i40e_nvm_access); 3078 ifd_len = ifd->ifd_len; 3079 3080 if (ifd_len < nvma_size || 3081 ifd->ifd_data == NULL) { 3082 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", 3083 __func__); 3084 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", 3085 __func__, ifd_len, nvma_size); 3086 device_printf(dev, "%s: data pointer: %p\n", __func__, 3087 ifd->ifd_data); 3088 return (EINVAL); 3089 } 3090 3091 nvma = malloc(ifd_len, M_IXL, M_WAITOK); 3092 err = copyin(ifd->ifd_data, nvma, ifd_len); 3093 if (err) { 3094 device_printf(dev, "%s: Cannot get request from user space\n", 3095 __func__); 3096 free(nvma, M_IXL); 3097 return (err); 3098 } 3099 3100 if (pf->dbg_mask & IXL_DBG_NVMUPD) 3101 ixl_print_nvm_cmd(dev, nvma); 3102 3103 if (IXL_PF_IS_RESETTING(pf)) { 3104 int count = 0; 3105 while (count++ < 100) { 3106 i40e_msec_delay(100); 3107 if (!(IXL_PF_IS_RESETTING(pf))) 3108 break; 3109 } 3110 } 3111 3112 if (IXL_PF_IS_RESETTING(pf)) { 3113 device_printf(dev, 3114 "%s: timeout waiting for EMP reset to finish\n", 3115 __func__); 3116 free(nvma, M_IXL); 3117 return (-EBUSY); 3118 } 3119 3120 if (nvma->data_size < 1 || nvma->data_size > 4096) { 3121 device_printf(dev, 3122 "%s: invalid request, data size not in supported range\n", 3123 __func__); 3124 free(nvma, M_IXL); 3125 return (EINVAL); 3126 } 3127 3128 /* 3129 * Older versions of the NVM update tool don't set ifd_len to the size 3130 * of the entire buffer passed to the ioctl. Check the data_size field 3131 * in the contained i40e_nvm_access struct and ensure everything is 3132 * copied in from userspace. 3133 */ 3134 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */ 3135 3136 if (ifd_len < exp_len) { 3137 ifd_len = exp_len; 3138 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK); 3139 err = copyin(ifd->ifd_data, nvma, ifd_len); 3140 if (err) { 3141 device_printf(dev, "%s: Cannot get request from user space\n", 3142 __func__); 3143 free(nvma, M_IXL); 3144 return (err); 3145 } 3146 } 3147 3148 // TODO: Might need a different lock here 3149 // IXL_PF_LOCK(pf); 3150 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); 3151 // IXL_PF_UNLOCK(pf); 3152 3153 err = copyout(nvma, ifd->ifd_data, ifd_len); 3154 free(nvma, M_IXL); 3155 if (err) { 3156 device_printf(dev, "%s: Cannot return data to user space\n", 3157 __func__); 3158 return (err); 3159 } 3160 3161 /* Let the nvmupdate report errors, show them only when debug is enabled */ 3162 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) 3163 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", 3164 i40e_stat_str(hw, status), perrno); 3165 3166 /* 3167 * -EPERM is actually ERESTART, which the kernel interprets as it needing 3168 * to run this ioctl again. So use -EACCES for -EPERM instead. 3169 */ 3170 if (perrno == -EPERM) 3171 return (-EACCES); 3172 else 3173 return (perrno); 3174 } 3175 3176 int 3177 ixl_find_i2c_interface(struct ixl_pf *pf) 3178 { 3179 struct i40e_hw *hw = &pf->hw; 3180 bool i2c_en, port_matched; 3181 u32 reg; 3182 3183 for (int i = 0; i < 4; i++) { 3184 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); 3185 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); 3186 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) 3187 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) 3188 & BIT(hw->port); 3189 if (i2c_en && port_matched) 3190 return (i); 3191 } 3192 3193 return (-1); 3194 } 3195 3196 void 3197 ixl_set_link(struct ixl_pf *pf, bool enable) 3198 { 3199 struct i40e_hw *hw = &pf->hw; 3200 device_t dev = pf->dev; 3201 struct i40e_aq_get_phy_abilities_resp abilities; 3202 struct i40e_aq_set_phy_config config; 3203 enum i40e_status_code aq_error = 0; 3204 u32 phy_type, phy_type_ext; 3205 3206 /* Get initial capability information */ 3207 aq_error = i40e_aq_get_phy_capabilities(hw, 3208 FALSE, TRUE, &abilities, NULL); 3209 if (aq_error) { 3210 device_printf(dev, 3211 "%s: Error getting phy capabilities %d," 3212 " aq error: %d\n", __func__, aq_error, 3213 hw->aq.asq_last_status); 3214 return; 3215 } 3216 3217 phy_type = abilities.phy_type; 3218 phy_type_ext = abilities.phy_type_ext; 3219 3220 /* Get current capability information */ 3221 aq_error = i40e_aq_get_phy_capabilities(hw, 3222 FALSE, FALSE, &abilities, NULL); 3223 if (aq_error) { 3224 device_printf(dev, 3225 "%s: Error getting phy capabilities %d," 3226 " aq error: %d\n", __func__, aq_error, 3227 hw->aq.asq_last_status); 3228 return; 3229 } 3230 3231 /* Prepare new config */ 3232 memset(&config, 0, sizeof(config)); 3233 config.link_speed = abilities.link_speed; 3234 config.abilities = abilities.abilities; 3235 config.eee_capability = abilities.eee_capability; 3236 config.eeer = abilities.eeer_val; 3237 config.low_power_ctrl = abilities.d3_lpan; 3238 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 3239 & I40E_AQ_PHY_FEC_CONFIG_MASK; 3240 config.phy_type = 0; 3241 config.phy_type_ext = 0; 3242 3243 config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX | 3244 I40E_AQ_PHY_FLAG_PAUSE_RX); 3245 3246 switch (pf->fc) { 3247 case I40E_FC_FULL: 3248 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX | 3249 I40E_AQ_PHY_FLAG_PAUSE_RX; 3250 break; 3251 case I40E_FC_RX_PAUSE: 3252 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX; 3253 break; 3254 case I40E_FC_TX_PAUSE: 3255 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX; 3256 break; 3257 default: 3258 break; 3259 } 3260 3261 if (enable) { 3262 config.phy_type = phy_type; 3263 config.phy_type_ext = phy_type_ext; 3264 3265 } 3266 3267 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 3268 if (aq_error) { 3269 device_printf(dev, 3270 "%s: Error setting new phy config %d," 3271 " aq error: %d\n", __func__, aq_error, 3272 hw->aq.asq_last_status); 3273 return; 3274 } 3275 3276 aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL); 3277 if (aq_error) { 3278 device_printf(dev, 3279 "%s: Error set link config %d," 3280 " aq error: %d\n", __func__, aq_error, 3281 hw->aq.asq_last_status); 3282 return; 3283 } 3284 } 3285 3286 static char * 3287 ixl_phy_type_string(u32 bit_pos, bool ext) 3288 { 3289 static char * phy_types_str[32] = { 3290 "SGMII", 3291 "1000BASE-KX", 3292 "10GBASE-KX4", 3293 "10GBASE-KR", 3294 "40GBASE-KR4", 3295 "XAUI", 3296 "XFI", 3297 "SFI", 3298 "XLAUI", 3299 "XLPPI", 3300 "40GBASE-CR4", 3301 "10GBASE-CR1", 3302 "SFP+ Active DA", 3303 "QSFP+ Active DA", 3304 "Reserved (14)", 3305 "Reserved (15)", 3306 "Reserved (16)", 3307 "100BASE-TX", 3308 "1000BASE-T", 3309 "10GBASE-T", 3310 "10GBASE-SR", 3311 "10GBASE-LR", 3312 "10GBASE-SFP+Cu", 3313 "10GBASE-CR1", 3314 "40GBASE-CR4", 3315 "40GBASE-SR4", 3316 "40GBASE-LR4", 3317 "1000BASE-SX", 3318 "1000BASE-LX", 3319 "1000BASE-T Optical", 3320 "20GBASE-KR2", 3321 "Reserved (31)" 3322 }; 3323 static char * ext_phy_types_str[8] = { 3324 "25GBASE-KR", 3325 "25GBASE-CR", 3326 "25GBASE-SR", 3327 "25GBASE-LR", 3328 "25GBASE-AOC", 3329 "25GBASE-ACC", 3330 "2.5GBASE-T", 3331 "5GBASE-T" 3332 }; 3333 3334 if (ext && bit_pos > 7) return "Invalid_Ext"; 3335 if (bit_pos > 31) return "Invalid"; 3336 3337 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; 3338 } 3339 3340 /* TODO: ERJ: I don't this is necessary anymore. */ 3341 int 3342 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) 3343 { 3344 device_t dev = pf->dev; 3345 struct i40e_hw *hw = &pf->hw; 3346 struct i40e_aq_desc desc; 3347 enum i40e_status_code status; 3348 3349 struct i40e_aqc_get_link_status *aq_link_status = 3350 (struct i40e_aqc_get_link_status *)&desc.params.raw; 3351 3352 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 3353 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); 3354 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 3355 if (status) { 3356 device_printf(dev, 3357 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", 3358 __func__, i40e_stat_str(hw, status), 3359 i40e_aq_str(hw, hw->aq.asq_last_status)); 3360 return (EIO); 3361 } 3362 3363 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); 3364 return (0); 3365 } 3366 3367 static char * 3368 ixl_phy_type_string_ls(u8 val) 3369 { 3370 if (val >= 0x1F) 3371 return ixl_phy_type_string(val - 0x1F, true); 3372 else 3373 return ixl_phy_type_string(val, false); 3374 } 3375 3376 static int 3377 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) 3378 { 3379 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3380 device_t dev = pf->dev; 3381 struct sbuf *buf; 3382 int error = 0; 3383 3384 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3385 if (!buf) { 3386 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3387 return (ENOMEM); 3388 } 3389 3390 struct i40e_aqc_get_link_status link_status; 3391 error = ixl_aq_get_link_status(pf, &link_status); 3392 if (error) { 3393 sbuf_delete(buf); 3394 return (error); 3395 } 3396 3397 sbuf_printf(buf, "\n" 3398 "PHY Type : 0x%02x<%s>\n" 3399 "Speed : 0x%02x\n" 3400 "Link info: 0x%02x\n" 3401 "AN info : 0x%02x\n" 3402 "Ext info : 0x%02x\n" 3403 "Loopback : 0x%02x\n" 3404 "Max Frame: %d\n" 3405 "Config : 0x%02x\n" 3406 "Power : 0x%02x", 3407 link_status.phy_type, 3408 ixl_phy_type_string_ls(link_status.phy_type), 3409 link_status.link_speed, 3410 link_status.link_info, 3411 link_status.an_info, 3412 link_status.ext_info, 3413 link_status.loopback, 3414 link_status.max_frame_size, 3415 link_status.config, 3416 link_status.power_desc); 3417 3418 error = sbuf_finish(buf); 3419 if (error) 3420 device_printf(dev, "Error finishing sbuf: %d\n", error); 3421 3422 sbuf_delete(buf); 3423 return (error); 3424 } 3425 3426 static int 3427 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) 3428 { 3429 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3430 struct i40e_hw *hw = &pf->hw; 3431 device_t dev = pf->dev; 3432 enum i40e_status_code status; 3433 struct i40e_aq_get_phy_abilities_resp abilities; 3434 struct sbuf *buf; 3435 int error = 0; 3436 3437 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3438 if (!buf) { 3439 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3440 return (ENOMEM); 3441 } 3442 3443 status = i40e_aq_get_phy_capabilities(hw, 3444 FALSE, arg2 != 0, &abilities, NULL); 3445 if (status) { 3446 device_printf(dev, 3447 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 3448 __func__, i40e_stat_str(hw, status), 3449 i40e_aq_str(hw, hw->aq.asq_last_status)); 3450 sbuf_delete(buf); 3451 return (EIO); 3452 } 3453 3454 sbuf_printf(buf, "\n" 3455 "PHY Type : %08x", 3456 abilities.phy_type); 3457 3458 if (abilities.phy_type != 0) { 3459 sbuf_printf(buf, "<"); 3460 for (int i = 0; i < 32; i++) 3461 if ((1 << i) & abilities.phy_type) 3462 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); 3463 sbuf_printf(buf, ">"); 3464 } 3465 3466 sbuf_printf(buf, "\nPHY Ext : %02x", 3467 abilities.phy_type_ext); 3468 3469 if (abilities.phy_type_ext != 0) { 3470 sbuf_printf(buf, "<"); 3471 for (int i = 0; i < 4; i++) 3472 if ((1 << i) & abilities.phy_type_ext) 3473 sbuf_printf(buf, "%s,", 3474 ixl_phy_type_string(i, true)); 3475 sbuf_printf(buf, ">"); 3476 } 3477 3478 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed); 3479 if (abilities.link_speed != 0) { 3480 u8 link_speed; 3481 sbuf_printf(buf, " <"); 3482 for (int i = 0; i < 8; i++) { 3483 link_speed = (1 << i) & abilities.link_speed; 3484 if (link_speed) 3485 sbuf_printf(buf, "%s, ", 3486 ixl_link_speed_string(link_speed)); 3487 } 3488 sbuf_printf(buf, ">"); 3489 } 3490 3491 sbuf_printf(buf, "\n" 3492 "Abilities: %02x\n" 3493 "EEE cap : %04x\n" 3494 "EEER reg : %08x\n" 3495 "D3 Lpan : %02x\n" 3496 "ID : %02x %02x %02x %02x\n" 3497 "ModType : %02x %02x %02x\n" 3498 "ModType E: %01x\n" 3499 "FEC Cfg : %02x\n" 3500 "Ext CC : %02x", 3501 abilities.abilities, abilities.eee_capability, 3502 abilities.eeer_val, abilities.d3_lpan, 3503 abilities.phy_id[0], abilities.phy_id[1], 3504 abilities.phy_id[2], abilities.phy_id[3], 3505 abilities.module_type[0], abilities.module_type[1], 3506 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, 3507 abilities.fec_cfg_curr_mod_ext_info & 0x1F, 3508 abilities.ext_comp_code); 3509 3510 error = sbuf_finish(buf); 3511 if (error) 3512 device_printf(dev, "Error finishing sbuf: %d\n", error); 3513 3514 sbuf_delete(buf); 3515 return (error); 3516 } 3517 3518 static int 3519 ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS) 3520 { 3521 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3522 struct i40e_hw *hw = &pf->hw; 3523 device_t dev = pf->dev; 3524 struct sbuf *buf; 3525 int error = 0; 3526 3527 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3528 if (buf == NULL) { 3529 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3530 return (ENOMEM); 3531 } 3532 3533 if (hw->mac.type == I40E_MAC_X722) { 3534 sbuf_printf(buf, "\n" 3535 "PCS Link Control Register: unavailable\n" 3536 "PCS Link Status 1: unavailable\n" 3537 "PCS Link Status 2: unavailable\n" 3538 "XGMII FIFO Status: unavailable\n" 3539 "Auto-Negotiation (AN) Status: unavailable\n" 3540 "KR PCS Status: unavailable\n" 3541 "KR FEC Status 1 – FEC Correctable Blocks Counter: unavailable\n" 3542 "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: unavailable" 3543 ); 3544 } else { 3545 sbuf_printf(buf, "\n" 3546 "PCS Link Control Register: %#010X\n" 3547 "PCS Link Status 1: %#010X\n" 3548 "PCS Link Status 2: %#010X\n" 3549 "XGMII FIFO Status: %#010X\n" 3550 "Auto-Negotiation (AN) Status: %#010X\n" 3551 "KR PCS Status: %#010X\n" 3552 "KR FEC Status 1 – FEC Correctable Blocks Counter: %#010X\n" 3553 "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: %#010X", 3554 rd32(hw, I40E_PRTMAC_PCS_LINK_CTRL), 3555 rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS1(0)), 3556 rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS2), 3557 rd32(hw, I40E_PRTMAC_PCS_XGMII_FIFO_STATUS), 3558 rd32(hw, I40E_PRTMAC_PCS_AN_LP_STATUS), 3559 rd32(hw, I40E_PRTMAC_PCS_KR_STATUS), 3560 rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS1), 3561 rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS2) 3562 ); 3563 } 3564 3565 error = sbuf_finish(buf); 3566 if (error) 3567 device_printf(dev, "Error finishing sbuf: %d\n", error); 3568 3569 sbuf_delete(buf); 3570 return (error); 3571 } 3572 3573 static int 3574 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) 3575 { 3576 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3577 struct ixl_vsi *vsi = &pf->vsi; 3578 struct ixl_mac_filter *f; 3579 device_t dev = pf->dev; 3580 int error = 0, ftl_len = 0, ftl_counter = 0; 3581 3582 struct sbuf *buf; 3583 3584 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3585 if (!buf) { 3586 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3587 return (ENOMEM); 3588 } 3589 3590 sbuf_printf(buf, "\n"); 3591 3592 /* Print MAC filters */ 3593 sbuf_printf(buf, "PF Filters:\n"); 3594 LIST_FOREACH(f, &vsi->ftl, ftle) 3595 ftl_len++; 3596 3597 if (ftl_len < 1) 3598 sbuf_printf(buf, "(none)\n"); 3599 else { 3600 LIST_FOREACH(f, &vsi->ftl, ftle) { 3601 sbuf_printf(buf, 3602 MAC_FORMAT ", vlan %4d, flags %#06x", 3603 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3604 /* don't print '\n' for last entry */ 3605 if (++ftl_counter != ftl_len) 3606 sbuf_printf(buf, "\n"); 3607 } 3608 } 3609 3610 #ifdef PCI_IOV 3611 /* TODO: Give each VF its own filter list sysctl */ 3612 struct ixl_vf *vf; 3613 if (pf->num_vfs > 0) { 3614 sbuf_printf(buf, "\n\n"); 3615 for (int i = 0; i < pf->num_vfs; i++) { 3616 vf = &pf->vfs[i]; 3617 if (!(vf->vf_flags & VF_FLAG_ENABLED)) 3618 continue; 3619 3620 vsi = &vf->vsi; 3621 ftl_len = 0, ftl_counter = 0; 3622 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num); 3623 LIST_FOREACH(f, &vsi->ftl, ftle) 3624 ftl_len++; 3625 3626 if (ftl_len < 1) 3627 sbuf_printf(buf, "(none)\n"); 3628 else { 3629 LIST_FOREACH(f, &vsi->ftl, ftle) { 3630 sbuf_printf(buf, 3631 MAC_FORMAT ", vlan %4d, flags %#06x\n", 3632 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3633 } 3634 } 3635 } 3636 } 3637 #endif 3638 3639 error = sbuf_finish(buf); 3640 if (error) 3641 device_printf(dev, "Error finishing sbuf: %d\n", error); 3642 sbuf_delete(buf); 3643 3644 return (error); 3645 } 3646 3647 #define IXL_SW_RES_SIZE 0x14 3648 int 3649 ixl_res_alloc_cmp(const void *a, const void *b) 3650 { 3651 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; 3652 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; 3653 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; 3654 3655 return ((int)one->resource_type - (int)two->resource_type); 3656 } 3657 3658 /* 3659 * Longest string length: 25 3660 */ 3661 const char * 3662 ixl_switch_res_type_string(u8 type) 3663 { 3664 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = { 3665 "VEB", 3666 "VSI", 3667 "Perfect Match MAC address", 3668 "S-tag", 3669 "(Reserved)", 3670 "Multicast hash entry", 3671 "Unicast hash entry", 3672 "VLAN", 3673 "VSI List entry", 3674 "(Reserved)", 3675 "VLAN Statistic Pool", 3676 "Mirror Rule", 3677 "Queue Set", 3678 "Inner VLAN Forward filter", 3679 "(Reserved)", 3680 "Inner MAC", 3681 "IP", 3682 "GRE/VN1 Key", 3683 "VN2 Key", 3684 "Tunneling Port" 3685 }; 3686 3687 if (type < IXL_SW_RES_SIZE) 3688 return ixl_switch_res_type_strings[type]; 3689 else 3690 return "(Reserved)"; 3691 } 3692 3693 static int 3694 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) 3695 { 3696 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3697 struct i40e_hw *hw = &pf->hw; 3698 device_t dev = pf->dev; 3699 struct sbuf *buf; 3700 enum i40e_status_code status; 3701 int error = 0; 3702 3703 u8 num_entries; 3704 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; 3705 3706 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3707 if (!buf) { 3708 device_printf(dev, "Could not allocate sbuf for output.\n"); 3709 return (ENOMEM); 3710 } 3711 3712 bzero(resp, sizeof(resp)); 3713 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, 3714 resp, 3715 IXL_SW_RES_SIZE, 3716 NULL); 3717 if (status) { 3718 device_printf(dev, 3719 "%s: get_switch_resource_alloc() error %s, aq error %s\n", 3720 __func__, i40e_stat_str(hw, status), 3721 i40e_aq_str(hw, hw->aq.asq_last_status)); 3722 sbuf_delete(buf); 3723 return (error); 3724 } 3725 3726 /* Sort entries by type for display */ 3727 qsort(resp, num_entries, 3728 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), 3729 &ixl_res_alloc_cmp); 3730 3731 sbuf_cat(buf, "\n"); 3732 sbuf_printf(buf, "# of entries: %d\n", num_entries); 3733 sbuf_printf(buf, 3734 " Type | Guaranteed | Total | Used | Un-allocated\n" 3735 " | (this) | (all) | (this) | (all) \n"); 3736 for (int i = 0; i < num_entries; i++) { 3737 sbuf_printf(buf, 3738 "%25s | %10d %5d %6d %12d", 3739 ixl_switch_res_type_string(resp[i].resource_type), 3740 resp[i].guaranteed, 3741 resp[i].total, 3742 resp[i].used, 3743 resp[i].total_unalloced); 3744 if (i < num_entries - 1) 3745 sbuf_cat(buf, "\n"); 3746 } 3747 3748 error = sbuf_finish(buf); 3749 if (error) 3750 device_printf(dev, "Error finishing sbuf: %d\n", error); 3751 3752 sbuf_delete(buf); 3753 return (error); 3754 } 3755 3756 enum ixl_sw_seid_offset { 3757 IXL_SW_SEID_EMP = 1, 3758 IXL_SW_SEID_MAC_START = 2, 3759 IXL_SW_SEID_MAC_END = 5, 3760 IXL_SW_SEID_PF_START = 16, 3761 IXL_SW_SEID_PF_END = 31, 3762 IXL_SW_SEID_VF_START = 32, 3763 IXL_SW_SEID_VF_END = 159, 3764 }; 3765 3766 /* 3767 * Caller must init and delete sbuf; this function will clear and 3768 * finish it for caller. 3769 * 3770 * Note: The SEID argument only applies for elements defined by FW at 3771 * power-on; these include the EMP, Ports, PFs and VFs. 3772 */ 3773 static char * 3774 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid) 3775 { 3776 sbuf_clear(s); 3777 3778 /* If SEID is in certain ranges, then we can infer the 3779 * mapping of SEID to switch element. 3780 */ 3781 if (seid == IXL_SW_SEID_EMP) { 3782 sbuf_cat(s, "EMP"); 3783 goto out; 3784 } else if (seid >= IXL_SW_SEID_MAC_START && 3785 seid <= IXL_SW_SEID_MAC_END) { 3786 sbuf_printf(s, "MAC %2d", 3787 seid - IXL_SW_SEID_MAC_START); 3788 goto out; 3789 } else if (seid >= IXL_SW_SEID_PF_START && 3790 seid <= IXL_SW_SEID_PF_END) { 3791 sbuf_printf(s, "PF %3d", 3792 seid - IXL_SW_SEID_PF_START); 3793 goto out; 3794 } else if (seid >= IXL_SW_SEID_VF_START && 3795 seid <= IXL_SW_SEID_VF_END) { 3796 sbuf_printf(s, "VF %3d", 3797 seid - IXL_SW_SEID_VF_START); 3798 goto out; 3799 } 3800 3801 switch (element_type) { 3802 case I40E_AQ_SW_ELEM_TYPE_BMC: 3803 sbuf_cat(s, "BMC"); 3804 break; 3805 case I40E_AQ_SW_ELEM_TYPE_PV: 3806 sbuf_cat(s, "PV"); 3807 break; 3808 case I40E_AQ_SW_ELEM_TYPE_VEB: 3809 sbuf_cat(s, "VEB"); 3810 break; 3811 case I40E_AQ_SW_ELEM_TYPE_PA: 3812 sbuf_cat(s, "PA"); 3813 break; 3814 case I40E_AQ_SW_ELEM_TYPE_VSI: 3815 sbuf_printf(s, "VSI"); 3816 break; 3817 default: 3818 sbuf_cat(s, "?"); 3819 break; 3820 } 3821 3822 out: 3823 sbuf_finish(s); 3824 return sbuf_data(s); 3825 } 3826 3827 static int 3828 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b) 3829 { 3830 const struct i40e_aqc_switch_config_element_resp *one, *two; 3831 one = (const struct i40e_aqc_switch_config_element_resp *)a; 3832 two = (const struct i40e_aqc_switch_config_element_resp *)b; 3833 3834 return ((int)one->seid - (int)two->seid); 3835 } 3836 3837 static int 3838 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) 3839 { 3840 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3841 struct i40e_hw *hw = &pf->hw; 3842 device_t dev = pf->dev; 3843 struct sbuf *buf; 3844 struct sbuf *nmbuf; 3845 enum i40e_status_code status; 3846 int error = 0; 3847 u16 next = 0; 3848 u8 aq_buf[I40E_AQ_LARGE_BUF]; 3849 3850 struct i40e_aqc_switch_config_element_resp *elem; 3851 struct i40e_aqc_get_switch_config_resp *sw_config; 3852 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 3853 3854 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3855 if (!buf) { 3856 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3857 return (ENOMEM); 3858 } 3859 3860 status = i40e_aq_get_switch_config(hw, sw_config, 3861 sizeof(aq_buf), &next, NULL); 3862 if (status) { 3863 device_printf(dev, 3864 "%s: aq_get_switch_config() error %s, aq error %s\n", 3865 __func__, i40e_stat_str(hw, status), 3866 i40e_aq_str(hw, hw->aq.asq_last_status)); 3867 sbuf_delete(buf); 3868 return error; 3869 } 3870 if (next) 3871 device_printf(dev, "%s: TODO: get more config with SEID %d\n", 3872 __func__, next); 3873 3874 nmbuf = sbuf_new_auto(); 3875 if (!nmbuf) { 3876 device_printf(dev, "Could not allocate sbuf for name output.\n"); 3877 sbuf_delete(buf); 3878 return (ENOMEM); 3879 } 3880 3881 /* Sort entries by SEID for display */ 3882 qsort(sw_config->element, sw_config->header.num_reported, 3883 sizeof(struct i40e_aqc_switch_config_element_resp), 3884 &ixl_sw_cfg_elem_seid_cmp); 3885 3886 sbuf_cat(buf, "\n"); 3887 /* Assuming <= 255 elements in switch */ 3888 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); 3889 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); 3890 /* Exclude: 3891 * Revision -- all elements are revision 1 for now 3892 */ 3893 sbuf_printf(buf, 3894 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n" 3895 " | | | (uplink)\n"); 3896 for (int i = 0; i < sw_config->header.num_reported; i++) { 3897 elem = &sw_config->element[i]; 3898 3899 // "%4d (%8s) | %8s %8s %#8x", 3900 sbuf_printf(buf, "%4d", elem->seid); 3901 sbuf_cat(buf, " "); 3902 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3903 elem->element_type, elem->seid)); 3904 sbuf_cat(buf, " | "); 3905 sbuf_printf(buf, "%4d", elem->uplink_seid); 3906 sbuf_cat(buf, " "); 3907 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3908 0, elem->uplink_seid)); 3909 sbuf_cat(buf, " | "); 3910 sbuf_printf(buf, "%4d", elem->downlink_seid); 3911 sbuf_cat(buf, " "); 3912 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3913 0, elem->downlink_seid)); 3914 sbuf_cat(buf, " | "); 3915 sbuf_printf(buf, "%8d", elem->connection_type); 3916 if (i < sw_config->header.num_reported - 1) 3917 sbuf_cat(buf, "\n"); 3918 } 3919 sbuf_delete(nmbuf); 3920 3921 error = sbuf_finish(buf); 3922 if (error) 3923 device_printf(dev, "Error finishing sbuf: %d\n", error); 3924 3925 sbuf_delete(buf); 3926 3927 return (error); 3928 } 3929 3930 static int 3931 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS) 3932 { 3933 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3934 struct i40e_hw *hw = &pf->hw; 3935 device_t dev = pf->dev; 3936 int requested_vlan = -1; 3937 enum i40e_status_code status = 0; 3938 int error = 0; 3939 3940 error = sysctl_handle_int(oidp, &requested_vlan, 0, req); 3941 if ((error) || (req->newptr == NULL)) 3942 return (error); 3943 3944 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) { 3945 device_printf(dev, "Flags disallow setting of vlans\n"); 3946 return (ENODEV); 3947 } 3948 3949 hw->switch_tag = requested_vlan; 3950 device_printf(dev, 3951 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n", 3952 hw->switch_tag, hw->first_tag, hw->second_tag); 3953 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL); 3954 if (status) { 3955 device_printf(dev, 3956 "%s: aq_set_switch_config() error %s, aq error %s\n", 3957 __func__, i40e_stat_str(hw, status), 3958 i40e_aq_str(hw, hw->aq.asq_last_status)); 3959 return (status); 3960 } 3961 return (0); 3962 } 3963 3964 static int 3965 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) 3966 { 3967 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3968 struct i40e_hw *hw = &pf->hw; 3969 device_t dev = pf->dev; 3970 struct sbuf *buf; 3971 int error = 0; 3972 enum i40e_status_code status; 3973 u32 reg; 3974 3975 struct i40e_aqc_get_set_rss_key_data key_data; 3976 3977 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3978 if (!buf) { 3979 device_printf(dev, "Could not allocate sbuf for output.\n"); 3980 return (ENOMEM); 3981 } 3982 3983 bzero(&key_data, sizeof(key_data)); 3984 3985 sbuf_cat(buf, "\n"); 3986 if (hw->mac.type == I40E_MAC_X722) { 3987 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); 3988 if (status) 3989 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", 3990 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3991 } else { 3992 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 3993 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 3994 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); 3995 } 3996 } 3997 3998 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); 3999 4000 error = sbuf_finish(buf); 4001 if (error) 4002 device_printf(dev, "Error finishing sbuf: %d\n", error); 4003 sbuf_delete(buf); 4004 4005 return (error); 4006 } 4007 4008 static void 4009 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) 4010 { 4011 int i, j, k, width; 4012 char c; 4013 4014 if (length < 1 || buf == NULL) return; 4015 4016 int byte_stride = 16; 4017 int lines = length / byte_stride; 4018 int rem = length % byte_stride; 4019 if (rem > 0) 4020 lines++; 4021 4022 for (i = 0; i < lines; i++) { 4023 width = (rem > 0 && i == lines - 1) 4024 ? rem : byte_stride; 4025 4026 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); 4027 4028 for (j = 0; j < width; j++) 4029 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); 4030 4031 if (width < byte_stride) { 4032 for (k = 0; k < (byte_stride - width); k++) 4033 sbuf_printf(sb, " "); 4034 } 4035 4036 if (!text) { 4037 sbuf_printf(sb, "\n"); 4038 continue; 4039 } 4040 4041 for (j = 0; j < width; j++) { 4042 c = (char)buf[i * byte_stride + j]; 4043 if (c < 32 || c > 126) 4044 sbuf_printf(sb, "."); 4045 else 4046 sbuf_printf(sb, "%c", c); 4047 4048 if (j == width - 1) 4049 sbuf_printf(sb, "\n"); 4050 } 4051 } 4052 } 4053 4054 static int 4055 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) 4056 { 4057 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4058 struct i40e_hw *hw = &pf->hw; 4059 device_t dev = pf->dev; 4060 struct sbuf *buf; 4061 int error = 0; 4062 enum i40e_status_code status; 4063 u8 hlut[512]; 4064 u32 reg; 4065 4066 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4067 if (!buf) { 4068 device_printf(dev, "Could not allocate sbuf for output.\n"); 4069 return (ENOMEM); 4070 } 4071 4072 bzero(hlut, sizeof(hlut)); 4073 sbuf_cat(buf, "\n"); 4074 if (hw->mac.type == I40E_MAC_X722) { 4075 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); 4076 if (status) 4077 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", 4078 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 4079 } else { 4080 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { 4081 reg = rd32(hw, I40E_PFQF_HLUT(i)); 4082 bcopy(®, &hlut[i << 2], 4); 4083 } 4084 } 4085 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); 4086 4087 error = sbuf_finish(buf); 4088 if (error) 4089 device_printf(dev, "Error finishing sbuf: %d\n", error); 4090 sbuf_delete(buf); 4091 4092 return (error); 4093 } 4094 4095 static int 4096 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) 4097 { 4098 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4099 struct i40e_hw *hw = &pf->hw; 4100 u64 hena; 4101 4102 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 4103 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 4104 4105 return sysctl_handle_long(oidp, NULL, hena, req); 4106 } 4107 4108 /* 4109 * Sysctl to disable firmware's link management 4110 * 4111 * 1 - Disable link management on this port 4112 * 0 - Re-enable link management 4113 * 4114 * On normal NVMs, firmware manages link by default. 4115 */ 4116 static int 4117 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) 4118 { 4119 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4120 struct i40e_hw *hw = &pf->hw; 4121 device_t dev = pf->dev; 4122 int requested_mode = -1; 4123 enum i40e_status_code status = 0; 4124 int error = 0; 4125 4126 /* Read in new mode */ 4127 error = sysctl_handle_int(oidp, &requested_mode, 0, req); 4128 if ((error) || (req->newptr == NULL)) 4129 return (error); 4130 /* Check for sane value */ 4131 if (requested_mode < 0 || requested_mode > 1) { 4132 device_printf(dev, "Valid modes are 0 or 1\n"); 4133 return (EINVAL); 4134 } 4135 4136 /* Set new mode */ 4137 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); 4138 if (status) { 4139 device_printf(dev, 4140 "%s: Error setting new phy debug mode %s," 4141 " aq error: %s\n", __func__, i40e_stat_str(hw, status), 4142 i40e_aq_str(hw, hw->aq.asq_last_status)); 4143 return (EIO); 4144 } 4145 4146 return (0); 4147 } 4148 4149 /* 4150 * Read some diagnostic data from a (Q)SFP+ module 4151 * 4152 * SFP A2 QSFP Lower Page 4153 * Temperature 96-97 22-23 4154 * Vcc 98-99 26-27 4155 * TX power 102-103 34-35..40-41 4156 * RX power 104-105 50-51..56-57 4157 */ 4158 static int 4159 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 4160 { 4161 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4162 device_t dev = pf->dev; 4163 struct sbuf *sbuf; 4164 int error = 0; 4165 u8 output; 4166 4167 if (req->oldptr == NULL) { 4168 error = SYSCTL_OUT(req, 0, 128); 4169 return (0); 4170 } 4171 4172 error = pf->read_i2c_byte(pf, 0, 0xA0, &output); 4173 if (error) { 4174 device_printf(dev, "Error reading from i2c\n"); 4175 return (error); 4176 } 4177 4178 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 4179 if (output == 0x3) { 4180 /* 4181 * Check for: 4182 * - Internally calibrated data 4183 * - Diagnostic monitoring is implemented 4184 */ 4185 pf->read_i2c_byte(pf, 92, 0xA0, &output); 4186 if (!(output & 0x60)) { 4187 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); 4188 return (0); 4189 } 4190 4191 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4192 4193 for (u8 offset = 96; offset < 100; offset++) { 4194 pf->read_i2c_byte(pf, offset, 0xA2, &output); 4195 sbuf_printf(sbuf, "%02X ", output); 4196 } 4197 for (u8 offset = 102; offset < 106; offset++) { 4198 pf->read_i2c_byte(pf, offset, 0xA2, &output); 4199 sbuf_printf(sbuf, "%02X ", output); 4200 } 4201 } else if (output == 0xD || output == 0x11) { 4202 /* 4203 * QSFP+ modules are always internally calibrated, and must indicate 4204 * what types of diagnostic monitoring are implemented 4205 */ 4206 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4207 4208 for (u8 offset = 22; offset < 24; offset++) { 4209 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4210 sbuf_printf(sbuf, "%02X ", output); 4211 } 4212 for (u8 offset = 26; offset < 28; offset++) { 4213 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4214 sbuf_printf(sbuf, "%02X ", output); 4215 } 4216 /* Read the data from the first lane */ 4217 for (u8 offset = 34; offset < 36; offset++) { 4218 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4219 sbuf_printf(sbuf, "%02X ", output); 4220 } 4221 for (u8 offset = 50; offset < 52; offset++) { 4222 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4223 sbuf_printf(sbuf, "%02X ", output); 4224 } 4225 } else { 4226 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output); 4227 return (0); 4228 } 4229 4230 sbuf_finish(sbuf); 4231 sbuf_delete(sbuf); 4232 4233 return (0); 4234 } 4235 4236 /* 4237 * Sysctl to read a byte from I2C bus. 4238 * 4239 * Input: 32-bit value: 4240 * bits 0-7: device address (0xA0 or 0xA2) 4241 * bits 8-15: offset (0-255) 4242 * bits 16-31: unused 4243 * Output: 8-bit value read 4244 */ 4245 static int 4246 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) 4247 { 4248 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4249 device_t dev = pf->dev; 4250 int input = -1, error = 0; 4251 u8 dev_addr, offset, output; 4252 4253 /* Read in I2C read parameters */ 4254 error = sysctl_handle_int(oidp, &input, 0, req); 4255 if ((error) || (req->newptr == NULL)) 4256 return (error); 4257 /* Validate device address */ 4258 dev_addr = input & 0xFF; 4259 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4260 return (EINVAL); 4261 } 4262 offset = (input >> 8) & 0xFF; 4263 4264 error = pf->read_i2c_byte(pf, offset, dev_addr, &output); 4265 if (error) 4266 return (error); 4267 4268 device_printf(dev, "%02X\n", output); 4269 return (0); 4270 } 4271 4272 /* 4273 * Sysctl to write a byte to the I2C bus. 4274 * 4275 * Input: 32-bit value: 4276 * bits 0-7: device address (0xA0 or 0xA2) 4277 * bits 8-15: offset (0-255) 4278 * bits 16-23: value to write 4279 * bits 24-31: unused 4280 * Output: 8-bit value written 4281 */ 4282 static int 4283 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) 4284 { 4285 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4286 device_t dev = pf->dev; 4287 int input = -1, error = 0; 4288 u8 dev_addr, offset, value; 4289 4290 /* Read in I2C write parameters */ 4291 error = sysctl_handle_int(oidp, &input, 0, req); 4292 if ((error) || (req->newptr == NULL)) 4293 return (error); 4294 /* Validate device address */ 4295 dev_addr = input & 0xFF; 4296 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4297 return (EINVAL); 4298 } 4299 offset = (input >> 8) & 0xFF; 4300 value = (input >> 16) & 0xFF; 4301 4302 error = pf->write_i2c_byte(pf, offset, dev_addr, value); 4303 if (error) 4304 return (error); 4305 4306 device_printf(dev, "%02X written\n", value); 4307 return (0); 4308 } 4309 4310 static int 4311 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4312 u8 bit_pos, int *is_set) 4313 { 4314 device_t dev = pf->dev; 4315 struct i40e_hw *hw = &pf->hw; 4316 enum i40e_status_code status; 4317 4318 if (IXL_PF_IN_RECOVERY_MODE(pf)) 4319 return (EIO); 4320 4321 status = i40e_aq_get_phy_capabilities(hw, 4322 FALSE, FALSE, abilities, NULL); 4323 if (status) { 4324 device_printf(dev, 4325 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 4326 __func__, i40e_stat_str(hw, status), 4327 i40e_aq_str(hw, hw->aq.asq_last_status)); 4328 return (EIO); 4329 } 4330 4331 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); 4332 return (0); 4333 } 4334 4335 static int 4336 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4337 u8 bit_pos, int set) 4338 { 4339 device_t dev = pf->dev; 4340 struct i40e_hw *hw = &pf->hw; 4341 struct i40e_aq_set_phy_config config; 4342 enum i40e_status_code status; 4343 4344 /* Set new PHY config */ 4345 memset(&config, 0, sizeof(config)); 4346 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); 4347 if (set) 4348 config.fec_config |= bit_pos; 4349 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { 4350 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 4351 config.phy_type = abilities->phy_type; 4352 config.phy_type_ext = abilities->phy_type_ext; 4353 config.link_speed = abilities->link_speed; 4354 config.eee_capability = abilities->eee_capability; 4355 config.eeer = abilities->eeer_val; 4356 config.low_power_ctrl = abilities->d3_lpan; 4357 status = i40e_aq_set_phy_config(hw, &config, NULL); 4358 4359 if (status) { 4360 device_printf(dev, 4361 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", 4362 __func__, i40e_stat_str(hw, status), 4363 i40e_aq_str(hw, hw->aq.asq_last_status)); 4364 return (EIO); 4365 } 4366 } 4367 4368 return (0); 4369 } 4370 4371 static int 4372 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) 4373 { 4374 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4375 int mode, error = 0; 4376 4377 struct i40e_aq_get_phy_abilities_resp abilities; 4378 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); 4379 if (error) 4380 return (error); 4381 /* Read in new mode */ 4382 error = sysctl_handle_int(oidp, &mode, 0, req); 4383 if ((error) || (req->newptr == NULL)) 4384 return (error); 4385 4386 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); 4387 } 4388 4389 static int 4390 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) 4391 { 4392 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4393 int mode, error = 0; 4394 4395 struct i40e_aq_get_phy_abilities_resp abilities; 4396 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); 4397 if (error) 4398 return (error); 4399 /* Read in new mode */ 4400 error = sysctl_handle_int(oidp, &mode, 0, req); 4401 if ((error) || (req->newptr == NULL)) 4402 return (error); 4403 4404 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); 4405 } 4406 4407 static int 4408 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) 4409 { 4410 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4411 int mode, error = 0; 4412 4413 struct i40e_aq_get_phy_abilities_resp abilities; 4414 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); 4415 if (error) 4416 return (error); 4417 /* Read in new mode */ 4418 error = sysctl_handle_int(oidp, &mode, 0, req); 4419 if ((error) || (req->newptr == NULL)) 4420 return (error); 4421 4422 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); 4423 } 4424 4425 static int 4426 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) 4427 { 4428 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4429 int mode, error = 0; 4430 4431 struct i40e_aq_get_phy_abilities_resp abilities; 4432 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); 4433 if (error) 4434 return (error); 4435 /* Read in new mode */ 4436 error = sysctl_handle_int(oidp, &mode, 0, req); 4437 if ((error) || (req->newptr == NULL)) 4438 return (error); 4439 4440 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); 4441 } 4442 4443 static int 4444 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) 4445 { 4446 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4447 int mode, error = 0; 4448 4449 struct i40e_aq_get_phy_abilities_resp abilities; 4450 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); 4451 if (error) 4452 return (error); 4453 /* Read in new mode */ 4454 error = sysctl_handle_int(oidp, &mode, 0, req); 4455 if ((error) || (req->newptr == NULL)) 4456 return (error); 4457 4458 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); 4459 } 4460 4461 static int 4462 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) 4463 { 4464 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4465 struct i40e_hw *hw = &pf->hw; 4466 device_t dev = pf->dev; 4467 struct sbuf *buf; 4468 int error = 0; 4469 enum i40e_status_code status; 4470 4471 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4472 if (!buf) { 4473 device_printf(dev, "Could not allocate sbuf for output.\n"); 4474 return (ENOMEM); 4475 } 4476 4477 u8 *final_buff; 4478 /* This amount is only necessary if reading the entire cluster into memory */ 4479 #define IXL_FINAL_BUFF_SIZE (1280 * 1024) 4480 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT); 4481 if (final_buff == NULL) { 4482 device_printf(dev, "Could not allocate memory for output.\n"); 4483 goto out; 4484 } 4485 int final_buff_len = 0; 4486 4487 u8 cluster_id = 1; 4488 bool more = true; 4489 4490 u8 dump_buf[4096]; 4491 u16 curr_buff_size = 4096; 4492 u8 curr_next_table = 0; 4493 u32 curr_next_index = 0; 4494 4495 u16 ret_buff_size; 4496 u8 ret_next_table; 4497 u32 ret_next_index; 4498 4499 sbuf_cat(buf, "\n"); 4500 4501 while (more) { 4502 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, 4503 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); 4504 if (status) { 4505 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", 4506 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 4507 goto free_out; 4508 } 4509 4510 /* copy info out of temp buffer */ 4511 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); 4512 final_buff_len += ret_buff_size; 4513 4514 if (ret_next_table != curr_next_table) { 4515 /* We're done with the current table; we can dump out read data. */ 4516 sbuf_printf(buf, "%d:", curr_next_table); 4517 int bytes_printed = 0; 4518 while (bytes_printed <= final_buff_len) { 4519 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); 4520 bytes_printed += 16; 4521 } 4522 sbuf_cat(buf, "\n"); 4523 4524 /* The entire cluster has been read; we're finished */ 4525 if (ret_next_table == 0xFF) 4526 break; 4527 4528 /* Otherwise clear the output buffer and continue reading */ 4529 bzero(final_buff, IXL_FINAL_BUFF_SIZE); 4530 final_buff_len = 0; 4531 } 4532 4533 if (ret_next_index == 0xFFFFFFFF) 4534 ret_next_index = 0; 4535 4536 bzero(dump_buf, sizeof(dump_buf)); 4537 curr_next_table = ret_next_table; 4538 curr_next_index = ret_next_index; 4539 } 4540 4541 free_out: 4542 free(final_buff, M_IXL); 4543 out: 4544 error = sbuf_finish(buf); 4545 if (error) 4546 device_printf(dev, "Error finishing sbuf: %d\n", error); 4547 sbuf_delete(buf); 4548 4549 return (error); 4550 } 4551 4552 static int 4553 ixl_start_fw_lldp(struct ixl_pf *pf) 4554 { 4555 struct i40e_hw *hw = &pf->hw; 4556 enum i40e_status_code status; 4557 4558 status = i40e_aq_start_lldp(hw, false, NULL); 4559 if (status != I40E_SUCCESS) { 4560 switch (hw->aq.asq_last_status) { 4561 case I40E_AQ_RC_EEXIST: 4562 device_printf(pf->dev, 4563 "FW LLDP agent is already running\n"); 4564 break; 4565 case I40E_AQ_RC_EPERM: 4566 device_printf(pf->dev, 4567 "Device configuration forbids SW from starting " 4568 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII " 4569 "attribute to \"Enabled\" to use this sysctl\n"); 4570 return (EINVAL); 4571 default: 4572 device_printf(pf->dev, 4573 "Starting FW LLDP agent failed: error: %s, %s\n", 4574 i40e_stat_str(hw, status), 4575 i40e_aq_str(hw, hw->aq.asq_last_status)); 4576 return (EINVAL); 4577 } 4578 } 4579 4580 ixl_clear_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED); 4581 return (0); 4582 } 4583 4584 static int 4585 ixl_stop_fw_lldp(struct ixl_pf *pf) 4586 { 4587 struct i40e_hw *hw = &pf->hw; 4588 device_t dev = pf->dev; 4589 enum i40e_status_code status; 4590 4591 if (hw->func_caps.npar_enable != 0) { 4592 device_printf(dev, 4593 "Disabling FW LLDP agent is not supported on this device\n"); 4594 return (EINVAL); 4595 } 4596 4597 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) { 4598 device_printf(dev, 4599 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); 4600 return (EINVAL); 4601 } 4602 4603 status = i40e_aq_stop_lldp(hw, true, false, NULL); 4604 if (status != I40E_SUCCESS) { 4605 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) { 4606 device_printf(dev, 4607 "Disabling FW LLDP agent failed: error: %s, %s\n", 4608 i40e_stat_str(hw, status), 4609 i40e_aq_str(hw, hw->aq.asq_last_status)); 4610 return (EINVAL); 4611 } 4612 4613 device_printf(dev, "FW LLDP agent is already stopped\n"); 4614 } 4615 4616 i40e_aq_set_dcb_parameters(hw, true, NULL); 4617 ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED); 4618 return (0); 4619 } 4620 4621 static int 4622 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) 4623 { 4624 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4625 int state, new_state, error = 0; 4626 4627 state = new_state = !ixl_test_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED); 4628 4629 /* Read in new mode */ 4630 error = sysctl_handle_int(oidp, &new_state, 0, req); 4631 if ((error) || (req->newptr == NULL)) 4632 return (error); 4633 4634 /* Already in requested state */ 4635 if (new_state == state) 4636 return (error); 4637 4638 if (new_state == 0) 4639 return ixl_stop_fw_lldp(pf); 4640 4641 return ixl_start_fw_lldp(pf); 4642 } 4643 4644 static int 4645 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 4646 { 4647 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4648 int state, new_state; 4649 int sysctl_handle_status = 0; 4650 enum i40e_status_code cmd_status; 4651 4652 /* Init states' values */ 4653 state = new_state = ixl_test_state(&pf->state, IXL_STATE_EEE_ENABLED); 4654 4655 /* Get requested mode */ 4656 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req); 4657 if ((sysctl_handle_status) || (req->newptr == NULL)) 4658 return (sysctl_handle_status); 4659 4660 /* Check if state has changed */ 4661 if (new_state == state) 4662 return (0); 4663 4664 /* Set new state */ 4665 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state)); 4666 4667 /* Save new state or report error */ 4668 if (!cmd_status) { 4669 if (new_state == 0) 4670 ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED); 4671 else 4672 ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED); 4673 } else if (cmd_status == I40E_ERR_CONFIG) 4674 return (EPERM); 4675 else 4676 return (EIO); 4677 4678 return (0); 4679 } 4680 4681 static int 4682 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS) 4683 { 4684 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4685 int error, state; 4686 4687 state = ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN); 4688 4689 error = sysctl_handle_int(oidp, &state, 0, req); 4690 if ((error) || (req->newptr == NULL)) 4691 return (error); 4692 4693 if (state == 0) 4694 ixl_clear_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN); 4695 else 4696 ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN); 4697 4698 return (0); 4699 } 4700 4701 4702 int 4703 ixl_attach_get_link_status(struct ixl_pf *pf) 4704 { 4705 struct i40e_hw *hw = &pf->hw; 4706 device_t dev = pf->dev; 4707 enum i40e_status_code status; 4708 4709 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 4710 (hw->aq.fw_maj_ver < 4)) { 4711 i40e_msec_delay(75); 4712 status = i40e_aq_set_link_restart_an(hw, TRUE, NULL); 4713 if (status != I40E_SUCCESS) { 4714 device_printf(dev, 4715 "%s link restart failed status: %s, aq_err=%s\n", 4716 __func__, i40e_stat_str(hw, status), 4717 i40e_aq_str(hw, hw->aq.asq_last_status)); 4718 return (EINVAL); 4719 } 4720 } 4721 4722 /* Determine link state */ 4723 hw->phy.get_link_info = TRUE; 4724 status = i40e_get_link_status(hw, &pf->link_up); 4725 if (status != I40E_SUCCESS) { 4726 device_printf(dev, 4727 "%s get link status, status: %s aq_err=%s\n", 4728 __func__, i40e_stat_str(hw, status), 4729 i40e_aq_str(hw, hw->aq.asq_last_status)); 4730 /* 4731 * Most probably FW has not finished configuring PHY. 4732 * Retry periodically in a timer callback. 4733 */ 4734 ixl_set_state(&pf->state, IXL_STATE_LINK_POLLING); 4735 pf->link_poll_start = getsbinuptime(); 4736 return (EAGAIN); 4737 } 4738 ixl_dbg_link(pf, "%s link_up: %d\n", __func__, pf->link_up); 4739 4740 /* Flow Control mode not set by user, read current FW settings */ 4741 if (pf->fc == -1) 4742 pf->fc = hw->fc.current_mode; 4743 4744 return (0); 4745 } 4746 4747 static int 4748 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS) 4749 { 4750 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4751 int requested = 0, error = 0; 4752 4753 /* Read in new mode */ 4754 error = sysctl_handle_int(oidp, &requested, 0, req); 4755 if ((error) || (req->newptr == NULL)) 4756 return (error); 4757 4758 /* Initiate the PF reset later in the admin task */ 4759 ixl_set_state(&pf->state, IXL_STATE_PF_RESET_REQ); 4760 4761 return (error); 4762 } 4763 4764 static int 4765 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS) 4766 { 4767 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4768 struct i40e_hw *hw = &pf->hw; 4769 int requested = 0, error = 0; 4770 4771 /* Read in new mode */ 4772 error = sysctl_handle_int(oidp, &requested, 0, req); 4773 if ((error) || (req->newptr == NULL)) 4774 return (error); 4775 4776 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 4777 4778 return (error); 4779 } 4780 4781 static int 4782 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS) 4783 { 4784 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4785 struct i40e_hw *hw = &pf->hw; 4786 int requested = 0, error = 0; 4787 4788 /* Read in new mode */ 4789 error = sysctl_handle_int(oidp, &requested, 0, req); 4790 if ((error) || (req->newptr == NULL)) 4791 return (error); 4792 4793 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK); 4794 4795 return (error); 4796 } 4797 4798 /* 4799 * Print out mapping of TX queue indexes and Rx queue indexes 4800 * to MSI-X vectors. 4801 */ 4802 static int 4803 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) 4804 { 4805 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4806 struct ixl_vsi *vsi = &pf->vsi; 4807 device_t dev = pf->dev; 4808 struct sbuf *buf; 4809 int error = 0; 4810 4811 struct ixl_rx_queue *rx_que = vsi->rx_queues; 4812 struct ixl_tx_queue *tx_que = vsi->tx_queues; 4813 4814 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4815 if (!buf) { 4816 device_printf(dev, "Could not allocate sbuf for output.\n"); 4817 return (ENOMEM); 4818 } 4819 4820 sbuf_cat(buf, "\n"); 4821 for (int i = 0; i < vsi->num_rx_queues; i++) { 4822 rx_que = &vsi->rx_queues[i]; 4823 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); 4824 } 4825 for (int i = 0; i < vsi->num_tx_queues; i++) { 4826 tx_que = &vsi->tx_queues[i]; 4827 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); 4828 } 4829 4830 error = sbuf_finish(buf); 4831 if (error) 4832 device_printf(dev, "Error finishing sbuf: %d\n", error); 4833 sbuf_delete(buf); 4834 4835 return (error); 4836 } 4837