1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "ixl_pf.h" 37 38 #ifdef PCI_IOV 39 #include "ixl_pf_iov.h" 40 #endif 41 42 #ifdef IXL_IW 43 #include "ixl_iw.h" 44 #include "ixl_iw_int.h" 45 #endif 46 47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); 48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); 49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed); 50 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int); 51 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int); 52 static char * ixl_switch_element_string(struct sbuf *, u8, u16); 53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *); 54 55 /* Sysctls */ 56 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); 57 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); 58 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 59 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 60 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); 61 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); 62 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); 63 64 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 65 static int ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS); 66 67 /* Debug Sysctls */ 68 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); 69 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); 70 static int ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS); 71 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); 72 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); 73 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); 74 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS); 75 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); 76 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); 77 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); 78 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); 79 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); 80 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); 81 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); 82 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); 83 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); 84 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); 85 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); 86 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); 87 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); 88 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 89 90 /* Debug Sysctls */ 91 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS); 92 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS); 93 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS); 94 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); 95 #ifdef IXL_DEBUG 96 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); 97 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); 98 #endif 99 100 #ifdef IXL_IW 101 extern int ixl_enable_iwarp; 102 extern int ixl_limit_iwarp_msix; 103 #endif 104 105 static const char * const ixl_fc_string[6] = { 106 "None", 107 "Rx", 108 "Tx", 109 "Full", 110 "Priority", 111 "Default" 112 }; 113 114 static char *ixl_fec_string[3] = { 115 "CL108 RS-FEC", 116 "CL74 FC-FEC/BASE-R", 117 "None" 118 }; 119 120 /* Functions for setting and checking driver state. Note the functions take 121 * bit positions, not bitmasks. The atomic_set_32 and atomic_clear_32 122 * operations require bitmasks. This can easily lead to programming error, so 123 * we provide wrapper functions to avoid this. 124 */ 125 126 /** 127 * ixl_set_state - Set the specified state 128 * @s: the state bitmap 129 * @bit: the state to set 130 * 131 * Atomically update the state bitmap with the specified bit set. 132 */ 133 inline void 134 ixl_set_state(volatile u32 *s, enum ixl_state bit) 135 { 136 /* atomic_set_32 expects a bitmask */ 137 atomic_set_32(s, BIT(bit)); 138 } 139 140 /** 141 * ixl_clear_state - Clear the specified state 142 * @s: the state bitmap 143 * @bit: the state to clear 144 * 145 * Atomically update the state bitmap with the specified bit cleared. 146 */ 147 inline void 148 ixl_clear_state(volatile u32 *s, enum ixl_state bit) 149 { 150 /* atomic_clear_32 expects a bitmask */ 151 atomic_clear_32(s, BIT(bit)); 152 } 153 154 /** 155 * ixl_test_state - Test the specified state 156 * @s: the state bitmap 157 * @bit: the bit to test 158 * 159 * Return true if the state is set, false otherwise. Use this only if the flow 160 * does not need to update the state. If you must update the state as well, 161 * prefer ixl_testandset_state. 162 */ 163 inline bool 164 ixl_test_state(volatile u32 *s, enum ixl_state bit) 165 { 166 return !!(*s & BIT(bit)); 167 } 168 169 /** 170 * ixl_testandset_state - Test and set the specified state 171 * @s: the state bitmap 172 * @bit: the bit to test 173 * 174 * Atomically update the state bitmap, setting the specified bit. Returns the 175 * previous value of the bit. 176 */ 177 inline u32 178 ixl_testandset_state(volatile u32 *s, enum ixl_state bit) 179 { 180 /* atomic_testandset_32 expects a bit position, as opposed to bitmask 181 expected by other atomic functions */ 182 return atomic_testandset_32(s, bit); 183 } 184 185 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); 186 187 /* 188 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string 189 */ 190 void 191 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) 192 { 193 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); 194 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); 195 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); 196 197 sbuf_printf(buf, 198 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", 199 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 200 hw->aq.api_maj_ver, hw->aq.api_min_ver, 201 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> 202 IXL_NVM_VERSION_HI_SHIFT, 203 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> 204 IXL_NVM_VERSION_LO_SHIFT, 205 hw->nvm.eetrack, 206 oem_ver, oem_build, oem_patch); 207 } 208 209 void 210 ixl_print_nvm_version(struct ixl_pf *pf) 211 { 212 struct i40e_hw *hw = &pf->hw; 213 device_t dev = pf->dev; 214 struct sbuf *sbuf; 215 216 sbuf = sbuf_new_auto(); 217 ixl_nvm_version_str(hw, sbuf); 218 sbuf_finish(sbuf); 219 device_printf(dev, "%s\n", sbuf_data(sbuf)); 220 sbuf_delete(sbuf); 221 } 222 223 /** 224 * ixl_get_fw_mode - Check the state of FW 225 * @hw: device hardware structure 226 * 227 * Identify state of FW. It might be in a recovery mode 228 * which limits functionality and requires special handling 229 * from the driver. 230 * 231 * @returns FW mode (normal, recovery, unexpected EMP reset) 232 */ 233 static enum ixl_fw_mode 234 ixl_get_fw_mode(struct ixl_pf *pf) 235 { 236 struct i40e_hw *hw = &pf->hw; 237 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL; 238 u32 fwsts; 239 240 #ifdef IXL_DEBUG 241 if (pf->recovery_mode) 242 return IXL_FW_MODE_RECOVERY; 243 #endif 244 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; 245 246 /* Is set and has one of expected values */ 247 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK && 248 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) || 249 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || 250 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK) 251 fw_mode = IXL_FW_MODE_RECOVERY; 252 else { 253 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 && 254 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10) 255 fw_mode = IXL_FW_MODE_UEMPR; 256 } 257 return (fw_mode); 258 } 259 260 /** 261 * ixl_pf_reset - Reset the PF 262 * @pf: PF structure 263 * 264 * Ensure that FW is in the right state and do the reset 265 * if needed. 266 * 267 * @returns zero on success, or an error code on failure. 268 */ 269 int 270 ixl_pf_reset(struct ixl_pf *pf) 271 { 272 struct i40e_hw *hw = &pf->hw; 273 enum i40e_status_code status; 274 enum ixl_fw_mode fw_mode; 275 276 fw_mode = ixl_get_fw_mode(pf); 277 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode); 278 if (fw_mode == IXL_FW_MODE_RECOVERY) { 279 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE); 280 /* Don't try to reset device if it's in recovery mode */ 281 return (0); 282 } 283 284 status = i40e_pf_reset(hw); 285 if (status == I40E_SUCCESS) 286 return (0); 287 288 /* Check FW mode again in case it has changed while 289 * waiting for reset to complete */ 290 fw_mode = ixl_get_fw_mode(pf); 291 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode); 292 if (fw_mode == IXL_FW_MODE_RECOVERY) { 293 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE); 294 return (0); 295 } 296 297 if (fw_mode == IXL_FW_MODE_UEMPR) 298 device_printf(pf->dev, 299 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); 300 else 301 device_printf(pf->dev, "PF reset failure %s\n", 302 i40e_stat_str(hw, status)); 303 return (EIO); 304 } 305 306 /** 307 * ixl_setup_hmc - Setup LAN Host Memory Cache 308 * @pf: PF structure 309 * 310 * Init and configure LAN Host Memory Cache 311 * 312 * @returns 0 on success, EIO on error 313 */ 314 int 315 ixl_setup_hmc(struct ixl_pf *pf) 316 { 317 struct i40e_hw *hw = &pf->hw; 318 enum i40e_status_code status; 319 320 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 321 hw->func_caps.num_rx_qp, 0, 0); 322 if (status) { 323 device_printf(pf->dev, "init_lan_hmc failed: %s\n", 324 i40e_stat_str(hw, status)); 325 return (EIO); 326 } 327 328 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 329 if (status) { 330 device_printf(pf->dev, "configure_lan_hmc failed: %s\n", 331 i40e_stat_str(hw, status)); 332 return (EIO); 333 } 334 335 return (0); 336 } 337 338 /** 339 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache 340 * @pf: PF structure 341 * 342 * Shutdown Host Memory Cache if configured. 343 * 344 */ 345 void 346 ixl_shutdown_hmc(struct ixl_pf *pf) 347 { 348 struct i40e_hw *hw = &pf->hw; 349 enum i40e_status_code status; 350 351 /* HMC not configured, no need to shutdown */ 352 if (hw->hmc.hmc_obj == NULL) 353 return; 354 355 status = i40e_shutdown_lan_hmc(hw); 356 if (status) 357 device_printf(pf->dev, 358 "Shutdown LAN HMC failed with code %s\n", 359 i40e_stat_str(hw, status)); 360 } 361 /* 362 * Write PF ITR values to queue ITR registers. 363 */ 364 void 365 ixl_configure_itr(struct ixl_pf *pf) 366 { 367 ixl_configure_tx_itr(pf); 368 ixl_configure_rx_itr(pf); 369 } 370 371 /********************************************************************* 372 * 373 * Get the hardware capabilities 374 * 375 **********************************************************************/ 376 377 int 378 ixl_get_hw_capabilities(struct ixl_pf *pf) 379 { 380 struct i40e_aqc_list_capabilities_element_resp *buf; 381 struct i40e_hw *hw = &pf->hw; 382 device_t dev = pf->dev; 383 enum i40e_status_code status; 384 int len, i2c_intfc_num; 385 bool again = TRUE; 386 u16 needed; 387 388 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 389 hw->func_caps.iwarp = 0; 390 return (0); 391 } 392 393 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 394 retry: 395 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) 396 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) { 397 device_printf(dev, "Unable to allocate cap memory\n"); 398 return (ENOMEM); 399 } 400 401 /* This populates the hw struct */ 402 status = i40e_aq_discover_capabilities(hw, buf, len, 403 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 404 free(buf, M_IXL); 405 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && 406 (again == TRUE)) { 407 /* retry once with a larger buffer */ 408 again = FALSE; 409 len = needed; 410 goto retry; 411 } else if (status != I40E_SUCCESS) { 412 device_printf(dev, "capability discovery failed; status %s, error %s\n", 413 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 414 return (ENODEV); 415 } 416 417 /* 418 * Some devices have both MDIO and I2C; since this isn't reported 419 * by the FW, check registers to see if an I2C interface exists. 420 */ 421 i2c_intfc_num = ixl_find_i2c_interface(pf); 422 if (i2c_intfc_num != -1) 423 pf->has_i2c = true; 424 425 /* Determine functions to use for driver I2C accesses */ 426 switch (pf->i2c_access_method) { 427 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: { 428 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 429 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 430 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 431 } else { 432 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 433 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 434 } 435 break; 436 } 437 case IXL_I2C_ACCESS_METHOD_AQ: 438 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 439 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 440 break; 441 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD: 442 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 443 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 444 break; 445 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS: 446 pf->read_i2c_byte = ixl_read_i2c_byte_bb; 447 pf->write_i2c_byte = ixl_write_i2c_byte_bb; 448 break; 449 default: 450 /* Should not happen */ 451 device_printf(dev, "Error setting I2C access functions\n"); 452 break; 453 } 454 455 /* Keep link active by default */ 456 ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN); 457 458 /* Print a subset of the capability information. */ 459 device_printf(dev, 460 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n", 461 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, 462 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, 463 (hw->func_caps.mdio_port_mode == 2) ? "I2C" : 464 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : 465 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : 466 "MDIO shared"); 467 468 return (0); 469 } 470 471 /* For the set_advertise sysctl */ 472 void 473 ixl_set_initial_advertised_speeds(struct ixl_pf *pf) 474 { 475 device_t dev = pf->dev; 476 int err; 477 478 /* Make sure to initialize the device to the complete list of 479 * supported speeds on driver load, to ensure unloading and 480 * reloading the driver will restore this value. 481 */ 482 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); 483 if (err) { 484 /* Non-fatal error */ 485 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", 486 __func__, err); 487 return; 488 } 489 490 pf->advertised_speed = 491 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 492 } 493 494 int 495 ixl_teardown_hw_structs(struct ixl_pf *pf) 496 { 497 enum i40e_status_code status = 0; 498 struct i40e_hw *hw = &pf->hw; 499 device_t dev = pf->dev; 500 501 /* Shutdown LAN HMC */ 502 if (hw->hmc.hmc_obj) { 503 status = i40e_shutdown_lan_hmc(hw); 504 if (status) { 505 device_printf(dev, 506 "init: LAN HMC shutdown failure; status %s\n", 507 i40e_stat_str(hw, status)); 508 goto err_out; 509 } 510 } 511 512 /* Shutdown admin queue */ 513 ixl_disable_intr0(hw); 514 status = i40e_shutdown_adminq(hw); 515 if (status) 516 device_printf(dev, 517 "init: Admin Queue shutdown failure; status %s\n", 518 i40e_stat_str(hw, status)); 519 520 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); 521 err_out: 522 return (status); 523 } 524 525 /* 526 ** Creates new filter with given MAC address and VLAN ID 527 */ 528 static struct ixl_mac_filter * 529 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 530 { 531 struct ixl_mac_filter *f; 532 533 /* create a new empty filter */ 534 f = malloc(sizeof(struct ixl_mac_filter), 535 M_IXL, M_NOWAIT | M_ZERO); 536 if (f) { 537 LIST_INSERT_HEAD(headp, f, ftle); 538 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); 539 f->vlan = vlan; 540 } 541 542 return (f); 543 } 544 545 /** 546 * ixl_free_filters - Free all filters in given list 547 * headp - pointer to list head 548 * 549 * Frees memory used by each entry in the list. 550 * Does not remove filters from HW. 551 */ 552 void 553 ixl_free_filters(struct ixl_ftl_head *headp) 554 { 555 struct ixl_mac_filter *f, *nf; 556 557 f = LIST_FIRST(headp); 558 while (f != NULL) { 559 nf = LIST_NEXT(f, ftle); 560 free(f, M_IXL); 561 f = nf; 562 } 563 564 LIST_INIT(headp); 565 } 566 567 static u_int 568 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 569 { 570 struct ixl_add_maddr_arg *ama = arg; 571 struct ixl_vsi *vsi = ama->vsi; 572 const u8 *macaddr = (u8*)LLADDR(sdl); 573 struct ixl_mac_filter *f; 574 575 /* Does one already exist */ 576 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 577 if (f != NULL) 578 return (0); 579 580 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY); 581 if (f == NULL) { 582 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 583 return (0); 584 } 585 f->flags |= IXL_FILTER_MC; 586 587 return (1); 588 } 589 590 /********************************************************************* 591 * Filter Routines 592 * 593 * Routines for multicast and vlan filter management. 594 * 595 *********************************************************************/ 596 void 597 ixl_add_multi(struct ixl_vsi *vsi) 598 { 599 if_t ifp = vsi->ifp; 600 struct i40e_hw *hw = vsi->hw; 601 int mcnt = 0; 602 struct ixl_add_maddr_arg cb_arg; 603 604 IOCTL_DEBUGOUT("ixl_add_multi: begin"); 605 606 mcnt = if_llmaddr_count(ifp); 607 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { 608 i40e_aq_set_vsi_multicast_promiscuous(hw, 609 vsi->seid, TRUE, NULL); 610 /* delete all existing MC filters */ 611 ixl_del_multi(vsi, true); 612 return; 613 } 614 615 cb_arg.vsi = vsi; 616 LIST_INIT(&cb_arg.to_add); 617 618 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg); 619 if (mcnt > 0) 620 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt); 621 622 IOCTL_DEBUGOUT("ixl_add_multi: end"); 623 } 624 625 static u_int 626 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 627 { 628 struct ixl_mac_filter *f = arg; 629 630 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl))) 631 return (1); 632 else 633 return (0); 634 } 635 636 void 637 ixl_del_multi(struct ixl_vsi *vsi, bool all) 638 { 639 struct ixl_ftl_head to_del; 640 if_t ifp = vsi->ifp; 641 struct ixl_mac_filter *f, *fn; 642 int mcnt = 0; 643 644 IOCTL_DEBUGOUT("ixl_del_multi: begin"); 645 646 LIST_INIT(&to_del); 647 /* Search for removed multicast addresses */ 648 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) { 649 if ((f->flags & IXL_FILTER_MC) == 0 || 650 (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0))) 651 continue; 652 653 LIST_REMOVE(f, ftle); 654 LIST_INSERT_HEAD(&to_del, f, ftle); 655 mcnt++; 656 } 657 658 if (mcnt > 0) 659 ixl_del_hw_filters(vsi, &to_del, mcnt); 660 } 661 662 void 663 ixl_link_up_msg(struct ixl_pf *pf) 664 { 665 struct i40e_hw *hw = &pf->hw; 666 if_t ifp = pf->vsi.ifp; 667 char *req_fec_string, *neg_fec_string; 668 u8 fec_abilities; 669 670 fec_abilities = hw->phy.link_info.req_fec_info; 671 /* If both RS and KR are requested, only show RS */ 672 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) 673 req_fec_string = ixl_fec_string[0]; 674 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) 675 req_fec_string = ixl_fec_string[1]; 676 else 677 req_fec_string = ixl_fec_string[2]; 678 679 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) 680 neg_fec_string = ixl_fec_string[0]; 681 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) 682 neg_fec_string = ixl_fec_string[1]; 683 else 684 neg_fec_string = ixl_fec_string[2]; 685 686 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 687 if_name(ifp), 688 ixl_link_speed_string(hw->phy.link_info.link_speed), 689 req_fec_string, neg_fec_string, 690 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", 691 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && 692 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 693 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? 694 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 695 ixl_fc_string[1] : ixl_fc_string[0]); 696 } 697 698 /* 699 * Configure admin queue/misc interrupt cause registers in hardware. 700 */ 701 void 702 ixl_configure_intr0_msix(struct ixl_pf *pf) 703 { 704 struct i40e_hw *hw = &pf->hw; 705 u32 reg; 706 707 /* First set up the adminq - vector 0 */ 708 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 709 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 710 711 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 712 I40E_PFINT_ICR0_ENA_GRST_MASK | 713 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 714 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 715 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 716 I40E_PFINT_ICR0_ENA_VFLR_MASK | 717 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 718 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; 719 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 720 721 /* 722 * 0x7FF is the end of the queue list. 723 * This means we won't use MSI-X vector 0 for a queue interrupt 724 * in MSI-X mode. 725 */ 726 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 727 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ 728 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); 729 730 wr32(hw, I40E_PFINT_DYN_CTL0, 731 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 732 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 733 734 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 735 } 736 737 void 738 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types) 739 { 740 /* Display supported media types */ 741 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) 742 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL); 743 744 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) 745 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL); 746 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) 747 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL); 748 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) 749 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL); 750 751 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T)) 752 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL); 753 754 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T)) 755 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL); 756 757 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || 758 phy_types & (I40E_CAP_PHY_TYPE_XFI) || 759 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) 760 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 761 762 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) 763 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL); 764 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) 765 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL); 766 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) 767 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL); 768 769 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || 770 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || 771 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || 772 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || 773 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 774 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL); 775 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) 776 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL); 777 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) 778 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL); 779 780 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) 781 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL); 782 783 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) 784 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) 785 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL); 786 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) 787 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL); 788 if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) 789 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL); 790 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) 791 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 792 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) 793 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL); 794 795 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) 796 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL); 797 798 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 799 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL); 800 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) 801 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); 802 803 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) 804 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL); 805 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) 806 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL); 807 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) 808 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL); 809 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) 810 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL); 811 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) 812 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL); 813 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) 814 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL); 815 } 816 817 /********************************************************************* 818 * 819 * Get Firmware Switch configuration 820 * - this will need to be more robust when more complex 821 * switch configurations are enabled. 822 * 823 **********************************************************************/ 824 int 825 ixl_switch_config(struct ixl_pf *pf) 826 { 827 struct i40e_hw *hw = &pf->hw; 828 struct ixl_vsi *vsi = &pf->vsi; 829 device_t dev = iflib_get_dev(vsi->ctx); 830 struct i40e_aqc_get_switch_config_resp *sw_config; 831 u8 aq_buf[I40E_AQ_LARGE_BUF]; 832 int ret; 833 u16 next = 0; 834 835 memset(&aq_buf, 0, sizeof(aq_buf)); 836 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 837 ret = i40e_aq_get_switch_config(hw, sw_config, 838 sizeof(aq_buf), &next, NULL); 839 if (ret) { 840 device_printf(dev, "aq_get_switch_config() failed, error %d," 841 " aq_error %d\n", ret, pf->hw.aq.asq_last_status); 842 return (ret); 843 } 844 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { 845 device_printf(dev, 846 "Switch config: header reported: %d in structure, %d total\n", 847 LE16_TO_CPU(sw_config->header.num_reported), 848 LE16_TO_CPU(sw_config->header.num_total)); 849 for (int i = 0; 850 i < LE16_TO_CPU(sw_config->header.num_reported); i++) { 851 device_printf(dev, 852 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, 853 sw_config->element[i].element_type, 854 LE16_TO_CPU(sw_config->element[i].seid), 855 LE16_TO_CPU(sw_config->element[i].uplink_seid), 856 LE16_TO_CPU(sw_config->element[i].downlink_seid)); 857 } 858 } 859 /* Simplified due to a single VSI */ 860 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid); 861 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid); 862 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid); 863 return (ret); 864 } 865 866 void 867 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls) 868 { 869 struct sysctl_oid *tree; 870 struct sysctl_oid_list *child; 871 struct sysctl_oid_list *vsi_list; 872 873 tree = device_get_sysctl_tree(vsi->dev); 874 child = SYSCTL_CHILDREN(tree); 875 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name, 876 CTLFLAG_RD, NULL, "VSI Number"); 877 878 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 879 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats); 880 881 /* Copy of netstat RX errors counter for validation purposes */ 882 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors", 883 CTLFLAG_RD, &vsi->ierrors, 884 "RX packet errors"); 885 886 if (queues_sysctls) 887 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx); 888 } 889 890 /* 891 * Used to set the Tx ITR value for all of the PF LAN VSI's queues. 892 * Writes to the ITR registers immediately. 893 */ 894 static int 895 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) 896 { 897 struct ixl_pf *pf = (struct ixl_pf *)arg1; 898 device_t dev = pf->dev; 899 int error = 0; 900 int requested_tx_itr; 901 902 requested_tx_itr = pf->tx_itr; 903 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); 904 if ((error) || (req->newptr == NULL)) 905 return (error); 906 if (pf->dynamic_tx_itr) { 907 device_printf(dev, 908 "Cannot set TX itr value while dynamic TX itr is enabled\n"); 909 return (EINVAL); 910 } 911 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { 912 device_printf(dev, 913 "Invalid TX itr value; value must be between 0 and %d\n", 914 IXL_MAX_ITR); 915 return (EINVAL); 916 } 917 918 pf->tx_itr = requested_tx_itr; 919 ixl_configure_tx_itr(pf); 920 921 return (error); 922 } 923 924 /* 925 * Used to set the Rx ITR value for all of the PF LAN VSI's queues. 926 * Writes to the ITR registers immediately. 927 */ 928 static int 929 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) 930 { 931 struct ixl_pf *pf = (struct ixl_pf *)arg1; 932 device_t dev = pf->dev; 933 int error = 0; 934 int requested_rx_itr; 935 936 requested_rx_itr = pf->rx_itr; 937 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); 938 if ((error) || (req->newptr == NULL)) 939 return (error); 940 if (pf->dynamic_rx_itr) { 941 device_printf(dev, 942 "Cannot set RX itr value while dynamic RX itr is enabled\n"); 943 return (EINVAL); 944 } 945 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { 946 device_printf(dev, 947 "Invalid RX itr value; value must be between 0 and %d\n", 948 IXL_MAX_ITR); 949 return (EINVAL); 950 } 951 952 pf->rx_itr = requested_rx_itr; 953 ixl_configure_rx_itr(pf); 954 955 return (error); 956 } 957 958 void 959 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 960 struct sysctl_oid_list *child, 961 struct i40e_hw_port_stats *stats) 962 { 963 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 964 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics"); 965 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); 966 967 struct i40e_eth_stats *eth_stats = &stats->eth; 968 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); 969 970 struct ixl_sysctl_info ctls[] = 971 { 972 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 973 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 974 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 975 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 976 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, 977 /* Packet Reception Stats */ 978 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 979 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 980 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 981 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 982 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 983 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 984 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 985 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 986 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 987 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 988 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 989 {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, 990 /* Packet Transmission Stats */ 991 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 992 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 993 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 994 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 995 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 996 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 997 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 998 /* Flow control */ 999 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 1000 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 1001 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 1002 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 1003 /* End */ 1004 {0,0,0} 1005 }; 1006 1007 struct ixl_sysctl_info *entry = ctls; 1008 while (entry->stat != 0) 1009 { 1010 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, 1011 CTLFLAG_RD, entry->stat, 1012 entry->description); 1013 entry++; 1014 } 1015 } 1016 1017 void 1018 ixl_set_rss_key(struct ixl_pf *pf) 1019 { 1020 struct i40e_hw *hw = &pf->hw; 1021 struct ixl_vsi *vsi = &pf->vsi; 1022 device_t dev = pf->dev; 1023 u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; 1024 enum i40e_status_code status; 1025 1026 #ifdef RSS 1027 /* Fetch the configured RSS key */ 1028 rss_getkey((uint8_t *) &rss_seed); 1029 #else 1030 ixl_get_default_rss_key(rss_seed); 1031 #endif 1032 /* Fill out hash function seed */ 1033 if (hw->mac.type == I40E_MAC_X722) { 1034 struct i40e_aqc_get_set_rss_key_data key_data; 1035 bcopy(rss_seed, &key_data, 52); 1036 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); 1037 if (status) 1038 device_printf(dev, 1039 "i40e_aq_set_rss_key status %s, error %s\n", 1040 i40e_stat_str(hw, status), 1041 i40e_aq_str(hw, hw->aq.asq_last_status)); 1042 } else { 1043 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) 1044 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); 1045 } 1046 } 1047 1048 /* 1049 * Configure enabled PCTYPES for RSS. 1050 */ 1051 void 1052 ixl_set_rss_pctypes(struct ixl_pf *pf) 1053 { 1054 struct i40e_hw *hw = &pf->hw; 1055 u64 set_hena = 0, hena; 1056 1057 #ifdef RSS 1058 u32 rss_hash_config; 1059 1060 rss_hash_config = rss_gethashconfig(); 1061 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 1062 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); 1063 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 1064 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 1065 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1066 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 1067 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1068 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); 1069 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1070 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); 1071 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1072 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 1073 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1074 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 1075 #else 1076 if (hw->mac.type == I40E_MAC_X722) 1077 set_hena = IXL_DEFAULT_RSS_HENA_X722; 1078 else 1079 set_hena = IXL_DEFAULT_RSS_HENA_XL710; 1080 #endif 1081 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 1082 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 1083 hena |= set_hena; 1084 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 1085 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 1086 1087 } 1088 1089 /* 1090 ** Setup the PF's RSS parameters. 1091 */ 1092 void 1093 ixl_config_rss(struct ixl_pf *pf) 1094 { 1095 ixl_set_rss_key(pf); 1096 ixl_set_rss_pctypes(pf); 1097 ixl_set_rss_hlut(pf); 1098 } 1099 1100 /* 1101 * In some firmware versions there is default MAC/VLAN filter 1102 * configured which interferes with filters managed by driver. 1103 * Make sure it's removed. 1104 */ 1105 void 1106 ixl_del_default_hw_filters(struct ixl_vsi *vsi) 1107 { 1108 struct i40e_aqc_remove_macvlan_element_data e; 1109 1110 bzero(&e, sizeof(e)); 1111 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1112 e.vlan_tag = 0; 1113 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1114 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1115 1116 bzero(&e, sizeof(e)); 1117 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1118 e.vlan_tag = 0; 1119 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1120 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1121 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1122 } 1123 1124 /* 1125 ** Initialize filter list and add filters that the hardware 1126 ** needs to know about. 1127 ** 1128 ** Requires VSI's seid to be set before calling. 1129 */ 1130 void 1131 ixl_init_filters(struct ixl_vsi *vsi) 1132 { 1133 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1134 1135 ixl_dbg_filter(pf, "%s: start\n", __func__); 1136 1137 /* Initialize mac filter list for VSI */ 1138 LIST_INIT(&vsi->ftl); 1139 vsi->num_hw_filters = 0; 1140 1141 /* Receive broadcast Ethernet frames */ 1142 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL); 1143 1144 if (IXL_VSI_IS_VF(vsi)) 1145 return; 1146 1147 ixl_del_default_hw_filters(vsi); 1148 1149 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); 1150 1151 /* 1152 * Prevent Tx flow control frames from being sent out by 1153 * non-firmware transmitters. 1154 * This affects every VSI in the PF. 1155 */ 1156 #ifndef IXL_DEBUG_FC 1157 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1158 #else 1159 if (pf->enable_tx_fc_filter) 1160 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1161 #endif 1162 } 1163 1164 void 1165 ixl_reconfigure_filters(struct ixl_vsi *vsi) 1166 { 1167 struct i40e_hw *hw = vsi->hw; 1168 struct ixl_ftl_head tmp; 1169 int cnt; 1170 1171 /* 1172 * The ixl_add_hw_filters function adds filters configured 1173 * in HW to a list in VSI. Move all filters to a temporary 1174 * list to avoid corrupting it by concatenating to itself. 1175 */ 1176 LIST_INIT(&tmp); 1177 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle); 1178 cnt = vsi->num_hw_filters; 1179 vsi->num_hw_filters = 0; 1180 1181 ixl_add_hw_filters(vsi, &tmp, cnt); 1182 1183 /* 1184 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp 1185 * will be NULL. Furthermore, the ftl of such vsi already contains 1186 * IXL_VLAN_ANY filter so we can skip that as well. 1187 */ 1188 if (hw == NULL) 1189 return; 1190 1191 /* Filter could be removed if MAC address was changed */ 1192 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 1193 1194 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0) 1195 return; 1196 /* 1197 * VLAN HW filtering is enabled, make sure that filters 1198 * for all registered VLAN tags are configured 1199 */ 1200 ixl_add_vlan_filters(vsi, hw->mac.addr); 1201 } 1202 1203 /* 1204 * This routine adds a MAC/VLAN filter to the software filter 1205 * list, then adds that new filter to the HW if it doesn't already 1206 * exist in the SW filter list. 1207 */ 1208 void 1209 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1210 { 1211 struct ixl_mac_filter *f, *tmp; 1212 struct ixl_pf *pf; 1213 device_t dev; 1214 struct ixl_ftl_head to_add; 1215 int to_add_cnt; 1216 1217 pf = vsi->back; 1218 dev = pf->dev; 1219 to_add_cnt = 1; 1220 1221 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n", 1222 MAC_FORMAT_ARGS(macaddr), vlan); 1223 1224 /* Does one already exist */ 1225 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1226 if (f != NULL) 1227 return; 1228 1229 LIST_INIT(&to_add); 1230 f = ixl_new_filter(&to_add, macaddr, vlan); 1231 if (f == NULL) { 1232 device_printf(dev, "WARNING: no filter available!!\n"); 1233 return; 1234 } 1235 if (f->vlan != IXL_VLAN_ANY) 1236 f->flags |= IXL_FILTER_VLAN; 1237 else 1238 vsi->num_macs++; 1239 1240 /* 1241 ** Is this the first vlan being registered, if so we 1242 ** need to remove the ANY filter that indicates we are 1243 ** not in a vlan, and replace that with a 0 filter. 1244 */ 1245 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { 1246 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 1247 if (tmp != NULL) { 1248 struct ixl_ftl_head to_del; 1249 1250 /* Prepare new filter first to avoid removing 1251 * VLAN_ANY filter if allocation fails */ 1252 f = ixl_new_filter(&to_add, macaddr, 0); 1253 if (f == NULL) { 1254 device_printf(dev, "WARNING: no filter available!!\n"); 1255 free(LIST_FIRST(&to_add), M_IXL); 1256 return; 1257 } 1258 to_add_cnt++; 1259 1260 LIST_REMOVE(tmp, ftle); 1261 LIST_INIT(&to_del); 1262 LIST_INSERT_HEAD(&to_del, tmp, ftle); 1263 ixl_del_hw_filters(vsi, &to_del, 1); 1264 } 1265 } 1266 1267 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1268 } 1269 1270 /** 1271 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs 1272 * @vsi: pointer to VSI 1273 * @macaddr: MAC address 1274 * 1275 * Adds MAC/VLAN filter for each VLAN configured on the interface 1276 * if there is enough HW filters. Otherwise adds a single filter 1277 * for all tagged and untagged frames to allow all configured VLANs 1278 * to recieve traffic. 1279 */ 1280 void 1281 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1282 { 1283 struct ixl_ftl_head to_add; 1284 struct ixl_mac_filter *f; 1285 int to_add_cnt = 0; 1286 int i, vlan = 0; 1287 1288 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) { 1289 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1290 return; 1291 } 1292 LIST_INIT(&to_add); 1293 1294 /* Add filter for untagged frames if it does not exist yet */ 1295 f = ixl_find_filter(&vsi->ftl, macaddr, 0); 1296 if (f == NULL) { 1297 f = ixl_new_filter(&to_add, macaddr, 0); 1298 if (f == NULL) { 1299 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1300 return; 1301 } 1302 to_add_cnt++; 1303 } 1304 1305 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) { 1306 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan); 1307 if (vlan == -1) 1308 break; 1309 1310 /* Does one already exist */ 1311 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1312 if (f != NULL) 1313 continue; 1314 1315 f = ixl_new_filter(&to_add, macaddr, vlan); 1316 if (f == NULL) { 1317 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1318 ixl_free_filters(&to_add); 1319 return; 1320 } 1321 to_add_cnt++; 1322 } 1323 1324 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1325 } 1326 1327 void 1328 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1329 { 1330 struct ixl_mac_filter *f, *tmp; 1331 struct ixl_ftl_head ftl_head; 1332 int to_del_cnt = 1; 1333 1334 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1335 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n", 1336 MAC_FORMAT_ARGS(macaddr), vlan); 1337 1338 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1339 if (f == NULL) 1340 return; 1341 1342 LIST_REMOVE(f, ftle); 1343 LIST_INIT(&ftl_head); 1344 LIST_INSERT_HEAD(&ftl_head, f, ftle); 1345 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0) 1346 vsi->num_macs--; 1347 1348 /* If this is not the last vlan just remove the filter */ 1349 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) { 1350 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1351 return; 1352 } 1353 1354 /* It's the last vlan, we need to switch back to a non-vlan filter */ 1355 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0); 1356 if (tmp != NULL) { 1357 LIST_REMOVE(tmp, ftle); 1358 LIST_INSERT_AFTER(f, tmp, ftle); 1359 to_del_cnt++; 1360 } 1361 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1362 1363 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1364 } 1365 1366 /** 1367 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC 1368 * @vsi: VSI which filters need to be removed 1369 * @macaddr: MAC address 1370 * 1371 * Remove all MAC/VLAN filters with a given MAC address. For multicast 1372 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY) 1373 * so skip them to speed up processing. Those filters should be removed 1374 * using ixl_del_filter function. 1375 */ 1376 void 1377 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1378 { 1379 struct ixl_mac_filter *f, *tmp; 1380 struct ixl_ftl_head to_del; 1381 int to_del_cnt = 0; 1382 1383 LIST_INIT(&to_del); 1384 1385 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) { 1386 if ((f->flags & IXL_FILTER_MC) != 0 || 1387 !ixl_ether_is_equal(f->macaddr, macaddr)) 1388 continue; 1389 1390 LIST_REMOVE(f, ftle); 1391 LIST_INSERT_HEAD(&to_del, f, ftle); 1392 to_del_cnt++; 1393 } 1394 1395 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1396 "%s: " MAC_FORMAT ", to_del_cnt: %d\n", 1397 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt); 1398 if (to_del_cnt > 0) 1399 ixl_del_hw_filters(vsi, &to_del, to_del_cnt); 1400 } 1401 1402 /* 1403 ** Find the filter with both matching mac addr and vlan id 1404 */ 1405 struct ixl_mac_filter * 1406 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 1407 { 1408 struct ixl_mac_filter *f; 1409 1410 LIST_FOREACH(f, headp, ftle) { 1411 if (ixl_ether_is_equal(f->macaddr, macaddr) && 1412 (f->vlan == vlan)) { 1413 return (f); 1414 } 1415 } 1416 1417 return (NULL); 1418 } 1419 1420 /* 1421 ** This routine takes additions to the vsi filter 1422 ** table and creates an Admin Queue call to create 1423 ** the filters in the hardware. 1424 */ 1425 void 1426 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt) 1427 { 1428 struct i40e_aqc_add_macvlan_element_data *a, *b; 1429 struct ixl_mac_filter *f, *fn; 1430 struct ixl_pf *pf; 1431 struct i40e_hw *hw; 1432 device_t dev; 1433 enum i40e_status_code status; 1434 int j = 0; 1435 1436 pf = vsi->back; 1437 dev = vsi->dev; 1438 hw = &pf->hw; 1439 1440 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt); 1441 1442 if (cnt < 1) { 1443 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n"); 1444 return; 1445 } 1446 1447 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, 1448 M_IXL, M_NOWAIT | M_ZERO); 1449 if (a == NULL) { 1450 device_printf(dev, "add_hw_filters failed to get memory\n"); 1451 return; 1452 } 1453 1454 LIST_FOREACH(f, to_add, ftle) { 1455 b = &a[j]; // a pox on fvl long names :) 1456 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); 1457 if (f->vlan == IXL_VLAN_ANY) { 1458 b->vlan_tag = 0; 1459 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; 1460 } else { 1461 b->vlan_tag = f->vlan; 1462 b->flags = 0; 1463 } 1464 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1465 /* Some FW versions do not set match method 1466 * when adding filters fails. Initialize it with 1467 * expected error value to allow detection which 1468 * filters were not added */ 1469 b->match_method = I40E_AQC_MM_ERR_NO_RES; 1470 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n", 1471 MAC_FORMAT_ARGS(f->macaddr)); 1472 1473 if (++j == cnt) 1474 break; 1475 } 1476 if (j != cnt) { 1477 /* Something went wrong */ 1478 device_printf(dev, 1479 "%s ERROR: list of filters to short expected: %d, found: %d\n", 1480 __func__, cnt, j); 1481 ixl_free_filters(to_add); 1482 goto out_free; 1483 } 1484 1485 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); 1486 if (status == I40E_SUCCESS) { 1487 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle); 1488 vsi->num_hw_filters += j; 1489 goto out_free; 1490 } 1491 1492 device_printf(dev, 1493 "i40e_aq_add_macvlan status %s, error %s\n", 1494 i40e_stat_str(hw, status), 1495 i40e_aq_str(hw, hw->aq.asq_last_status)); 1496 j = 0; 1497 1498 /* Verify which filters were actually configured in HW 1499 * and add them to the list */ 1500 LIST_FOREACH_SAFE(f, to_add, ftle, fn) { 1501 LIST_REMOVE(f, ftle); 1502 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) { 1503 ixl_dbg_filter(pf, 1504 "%s filter " MAC_FORMAT " VTAG: %d not added\n", 1505 __func__, 1506 MAC_FORMAT_ARGS(f->macaddr), 1507 f->vlan); 1508 free(f, M_IXL); 1509 } else { 1510 LIST_INSERT_HEAD(&vsi->ftl, f, ftle); 1511 vsi->num_hw_filters++; 1512 } 1513 j++; 1514 } 1515 1516 out_free: 1517 free(a, M_IXL); 1518 } 1519 1520 /* 1521 ** This routine takes removals in the vsi filter 1522 ** table and creates an Admin Queue call to delete 1523 ** the filters in the hardware. 1524 */ 1525 void 1526 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt) 1527 { 1528 struct i40e_aqc_remove_macvlan_element_data *d, *e; 1529 struct ixl_pf *pf; 1530 struct i40e_hw *hw; 1531 device_t dev; 1532 struct ixl_mac_filter *f, *f_temp; 1533 enum i40e_status_code status; 1534 int j = 0; 1535 1536 pf = vsi->back; 1537 hw = &pf->hw; 1538 dev = vsi->dev; 1539 1540 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt); 1541 1542 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, 1543 M_IXL, M_NOWAIT | M_ZERO); 1544 if (d == NULL) { 1545 device_printf(dev, "%s: failed to get memory\n", __func__); 1546 return; 1547 } 1548 1549 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) { 1550 e = &d[j]; // a pox on fvl long names :) 1551 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); 1552 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1553 if (f->vlan == IXL_VLAN_ANY) { 1554 e->vlan_tag = 0; 1555 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1556 } else { 1557 e->vlan_tag = f->vlan; 1558 } 1559 1560 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n", 1561 MAC_FORMAT_ARGS(f->macaddr)); 1562 1563 /* delete entry from the list */ 1564 LIST_REMOVE(f, ftle); 1565 free(f, M_IXL); 1566 if (++j == cnt) 1567 break; 1568 } 1569 if (j != cnt || !LIST_EMPTY(to_del)) { 1570 /* Something went wrong */ 1571 device_printf(dev, 1572 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n", 1573 __func__, cnt, j); 1574 ixl_free_filters(to_del); 1575 goto out_free; 1576 } 1577 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); 1578 if (status) { 1579 device_printf(dev, 1580 "%s: i40e_aq_remove_macvlan status %s, error %s\n", 1581 __func__, i40e_stat_str(hw, status), 1582 i40e_aq_str(hw, hw->aq.asq_last_status)); 1583 for (int i = 0; i < j; i++) { 1584 if (d[i].error_code == 0) 1585 continue; 1586 device_printf(dev, 1587 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n", 1588 __func__, MAC_FORMAT_ARGS(d[i].mac_addr), 1589 d[i].vlan_tag); 1590 } 1591 } 1592 1593 vsi->num_hw_filters -= j; 1594 1595 out_free: 1596 free(d, M_IXL); 1597 1598 ixl_dbg_filter(pf, "%s: end\n", __func__); 1599 } 1600 1601 int 1602 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1603 { 1604 struct i40e_hw *hw = &pf->hw; 1605 int error = 0; 1606 u32 reg; 1607 u16 pf_qidx; 1608 1609 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1610 1611 ixl_dbg(pf, IXL_DBG_EN_DIS, 1612 "Enabling PF TX ring %4d / VSI TX ring %4d...\n", 1613 pf_qidx, vsi_qidx); 1614 1615 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); 1616 1617 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1618 reg |= I40E_QTX_ENA_QENA_REQ_MASK | 1619 I40E_QTX_ENA_QENA_STAT_MASK; 1620 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1621 /* Verify the enable took */ 1622 for (int j = 0; j < 10; j++) { 1623 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1624 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 1625 break; 1626 i40e_usec_delay(10); 1627 } 1628 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 1629 device_printf(pf->dev, "TX queue %d still disabled!\n", 1630 pf_qidx); 1631 error = ETIMEDOUT; 1632 } 1633 1634 return (error); 1635 } 1636 1637 int 1638 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1639 { 1640 struct i40e_hw *hw = &pf->hw; 1641 int error = 0; 1642 u32 reg; 1643 u16 pf_qidx; 1644 1645 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1646 1647 ixl_dbg(pf, IXL_DBG_EN_DIS, 1648 "Enabling PF RX ring %4d / VSI RX ring %4d...\n", 1649 pf_qidx, vsi_qidx); 1650 1651 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1652 reg |= I40E_QRX_ENA_QENA_REQ_MASK | 1653 I40E_QRX_ENA_QENA_STAT_MASK; 1654 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1655 /* Verify the enable took */ 1656 for (int j = 0; j < 10; j++) { 1657 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1658 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 1659 break; 1660 i40e_usec_delay(10); 1661 } 1662 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 1663 device_printf(pf->dev, "RX queue %d still disabled!\n", 1664 pf_qidx); 1665 error = ETIMEDOUT; 1666 } 1667 1668 return (error); 1669 } 1670 1671 int 1672 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1673 { 1674 int error = 0; 1675 1676 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); 1677 /* Called function already prints error message */ 1678 if (error) 1679 return (error); 1680 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); 1681 return (error); 1682 } 1683 1684 /* 1685 * Returns error on first ring that is detected hung. 1686 */ 1687 int 1688 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1689 { 1690 struct i40e_hw *hw = &pf->hw; 1691 int error = 0; 1692 u32 reg; 1693 u16 pf_qidx; 1694 1695 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1696 1697 ixl_dbg(pf, IXL_DBG_EN_DIS, 1698 "Disabling PF TX ring %4d / VSI TX ring %4d...\n", 1699 pf_qidx, vsi_qidx); 1700 1701 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); 1702 i40e_usec_delay(500); 1703 1704 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1705 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 1706 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1707 /* Verify the disable took */ 1708 for (int j = 0; j < 10; j++) { 1709 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1710 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) 1711 break; 1712 i40e_msec_delay(10); 1713 } 1714 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { 1715 device_printf(pf->dev, "TX queue %d still enabled!\n", 1716 pf_qidx); 1717 error = ETIMEDOUT; 1718 } 1719 1720 return (error); 1721 } 1722 1723 /* 1724 * Returns error on first ring that is detected hung. 1725 */ 1726 int 1727 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1728 { 1729 struct i40e_hw *hw = &pf->hw; 1730 int error = 0; 1731 u32 reg; 1732 u16 pf_qidx; 1733 1734 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1735 1736 ixl_dbg(pf, IXL_DBG_EN_DIS, 1737 "Disabling PF RX ring %4d / VSI RX ring %4d...\n", 1738 pf_qidx, vsi_qidx); 1739 1740 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1741 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 1742 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1743 /* Verify the disable took */ 1744 for (int j = 0; j < 10; j++) { 1745 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1746 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) 1747 break; 1748 i40e_msec_delay(10); 1749 } 1750 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { 1751 device_printf(pf->dev, "RX queue %d still enabled!\n", 1752 pf_qidx); 1753 error = ETIMEDOUT; 1754 } 1755 1756 return (error); 1757 } 1758 1759 int 1760 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1761 { 1762 int error = 0; 1763 1764 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); 1765 /* Called function already prints error message */ 1766 if (error) 1767 return (error); 1768 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); 1769 return (error); 1770 } 1771 1772 static void 1773 ixl_handle_tx_mdd_event(struct ixl_pf *pf) 1774 { 1775 struct i40e_hw *hw = &pf->hw; 1776 device_t dev = pf->dev; 1777 struct ixl_vf *vf; 1778 bool mdd_detected = false; 1779 bool pf_mdd_detected = false; 1780 bool vf_mdd_detected = false; 1781 u16 vf_num, queue; 1782 u8 pf_num, event; 1783 u8 pf_mdet_num, vp_mdet_num; 1784 u32 reg; 1785 1786 /* find what triggered the MDD event */ 1787 reg = rd32(hw, I40E_GL_MDET_TX); 1788 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 1789 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 1790 I40E_GL_MDET_TX_PF_NUM_SHIFT; 1791 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 1792 I40E_GL_MDET_TX_VF_NUM_SHIFT; 1793 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 1794 I40E_GL_MDET_TX_EVENT_SHIFT; 1795 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 1796 I40E_GL_MDET_TX_QUEUE_SHIFT; 1797 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 1798 mdd_detected = true; 1799 } 1800 1801 if (!mdd_detected) 1802 return; 1803 1804 reg = rd32(hw, I40E_PF_MDET_TX); 1805 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 1806 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 1807 pf_mdet_num = hw->pf_id; 1808 pf_mdd_detected = true; 1809 } 1810 1811 /* Check if MDD was caused by a VF */ 1812 for (int i = 0; i < pf->num_vfs; i++) { 1813 vf = &(pf->vfs[i]); 1814 reg = rd32(hw, I40E_VP_MDET_TX(i)); 1815 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 1816 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 1817 vp_mdet_num = i; 1818 vf->num_mdd_events++; 1819 vf_mdd_detected = true; 1820 } 1821 } 1822 1823 /* Print out an error message */ 1824 if (vf_mdd_detected && pf_mdd_detected) 1825 device_printf(dev, 1826 "Malicious Driver Detection event %d" 1827 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n", 1828 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num); 1829 else if (vf_mdd_detected && !pf_mdd_detected) 1830 device_printf(dev, 1831 "Malicious Driver Detection event %d" 1832 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n", 1833 event, queue, pf_num, vf_num, vp_mdet_num); 1834 else if (!vf_mdd_detected && pf_mdd_detected) 1835 device_printf(dev, 1836 "Malicious Driver Detection event %d" 1837 " on TX queue %d, pf number %d (PF-%d)\n", 1838 event, queue, pf_num, pf_mdet_num); 1839 /* Theoretically shouldn't happen */ 1840 else 1841 device_printf(dev, 1842 "TX Malicious Driver Detection event (unknown)\n"); 1843 } 1844 1845 static void 1846 ixl_handle_rx_mdd_event(struct ixl_pf *pf) 1847 { 1848 struct i40e_hw *hw = &pf->hw; 1849 device_t dev = pf->dev; 1850 struct ixl_vf *vf; 1851 bool mdd_detected = false; 1852 bool pf_mdd_detected = false; 1853 bool vf_mdd_detected = false; 1854 u16 queue; 1855 u8 pf_num, event; 1856 u8 pf_mdet_num, vp_mdet_num; 1857 u32 reg; 1858 1859 /* 1860 * GL_MDET_RX doesn't contain VF number information, unlike 1861 * GL_MDET_TX. 1862 */ 1863 reg = rd32(hw, I40E_GL_MDET_RX); 1864 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 1865 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 1866 I40E_GL_MDET_RX_FUNCTION_SHIFT; 1867 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 1868 I40E_GL_MDET_RX_EVENT_SHIFT; 1869 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 1870 I40E_GL_MDET_RX_QUEUE_SHIFT; 1871 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 1872 mdd_detected = true; 1873 } 1874 1875 if (!mdd_detected) 1876 return; 1877 1878 reg = rd32(hw, I40E_PF_MDET_RX); 1879 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 1880 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 1881 pf_mdet_num = hw->pf_id; 1882 pf_mdd_detected = true; 1883 } 1884 1885 /* Check if MDD was caused by a VF */ 1886 for (int i = 0; i < pf->num_vfs; i++) { 1887 vf = &(pf->vfs[i]); 1888 reg = rd32(hw, I40E_VP_MDET_RX(i)); 1889 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 1890 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 1891 vp_mdet_num = i; 1892 vf->num_mdd_events++; 1893 vf_mdd_detected = true; 1894 } 1895 } 1896 1897 /* Print out an error message */ 1898 if (vf_mdd_detected && pf_mdd_detected) 1899 device_printf(dev, 1900 "Malicious Driver Detection event %d" 1901 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n", 1902 event, queue, pf_num, pf_mdet_num, vp_mdet_num); 1903 else if (vf_mdd_detected && !pf_mdd_detected) 1904 device_printf(dev, 1905 "Malicious Driver Detection event %d" 1906 " on RX queue %d, pf number %d, (VF-%d)\n", 1907 event, queue, pf_num, vp_mdet_num); 1908 else if (!vf_mdd_detected && pf_mdd_detected) 1909 device_printf(dev, 1910 "Malicious Driver Detection event %d" 1911 " on RX queue %d, pf number %d (PF-%d)\n", 1912 event, queue, pf_num, pf_mdet_num); 1913 /* Theoretically shouldn't happen */ 1914 else 1915 device_printf(dev, 1916 "RX Malicious Driver Detection event (unknown)\n"); 1917 } 1918 1919 /** 1920 * ixl_handle_mdd_event 1921 * 1922 * Called from interrupt handler to identify possibly malicious vfs 1923 * (But also detects events from the PF, as well) 1924 **/ 1925 void 1926 ixl_handle_mdd_event(struct ixl_pf *pf) 1927 { 1928 struct i40e_hw *hw = &pf->hw; 1929 u32 reg; 1930 1931 /* 1932 * Handle both TX/RX because it's possible they could 1933 * both trigger in the same interrupt. 1934 */ 1935 ixl_handle_tx_mdd_event(pf); 1936 ixl_handle_rx_mdd_event(pf); 1937 1938 ixl_clear_state(&pf->state, IXL_STATE_MDD_PENDING); 1939 1940 /* re-enable mdd interrupt cause */ 1941 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1942 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 1943 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1944 ixl_flush(hw); 1945 } 1946 1947 void 1948 ixl_enable_intr0(struct i40e_hw *hw) 1949 { 1950 u32 reg; 1951 1952 /* Use IXL_ITR_NONE so ITR isn't updated here */ 1953 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | 1954 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 1955 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 1956 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1957 } 1958 1959 void 1960 ixl_disable_intr0(struct i40e_hw *hw) 1961 { 1962 u32 reg; 1963 1964 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; 1965 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1966 ixl_flush(hw); 1967 } 1968 1969 void 1970 ixl_enable_queue(struct i40e_hw *hw, int id) 1971 { 1972 u32 reg; 1973 1974 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | 1975 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1976 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 1977 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1978 } 1979 1980 void 1981 ixl_disable_queue(struct i40e_hw *hw, int id) 1982 { 1983 u32 reg; 1984 1985 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1986 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1987 } 1988 1989 void 1990 ixl_handle_empr_reset(struct ixl_pf *pf) 1991 { 1992 struct ixl_vsi *vsi = &pf->vsi; 1993 bool is_up = !!(if_getdrvflags(vsi->ifp) & IFF_DRV_RUNNING); 1994 1995 ixl_prepare_for_reset(pf, is_up); 1996 /* 1997 * i40e_pf_reset checks the type of reset and acts 1998 * accordingly. If EMP or Core reset was performed 1999 * doing PF reset is not necessary and it sometimes 2000 * fails. 2001 */ 2002 ixl_pf_reset(pf); 2003 2004 if (!IXL_PF_IN_RECOVERY_MODE(pf) && 2005 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) { 2006 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE); 2007 device_printf(pf->dev, 2008 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 2009 pf->link_up = FALSE; 2010 ixl_update_link_status(pf); 2011 } 2012 2013 ixl_rebuild_hw_structs_after_reset(pf, is_up); 2014 2015 ixl_clear_state(&pf->state, IXL_STATE_RESETTING); 2016 } 2017 2018 void 2019 ixl_update_stats_counters(struct ixl_pf *pf) 2020 { 2021 struct i40e_hw *hw = &pf->hw; 2022 struct ixl_vsi *vsi = &pf->vsi; 2023 struct ixl_vf *vf; 2024 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx; 2025 2026 struct i40e_hw_port_stats *nsd = &pf->stats; 2027 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 2028 2029 /* Update hw stats */ 2030 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 2031 pf->stat_offsets_loaded, 2032 &osd->crc_errors, &nsd->crc_errors); 2033 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 2034 pf->stat_offsets_loaded, 2035 &osd->illegal_bytes, &nsd->illegal_bytes); 2036 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 2037 I40E_GLPRT_GORCL(hw->port), 2038 pf->stat_offsets_loaded, 2039 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 2040 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 2041 I40E_GLPRT_GOTCL(hw->port), 2042 pf->stat_offsets_loaded, 2043 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 2044 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 2045 pf->stat_offsets_loaded, 2046 &osd->eth.rx_discards, 2047 &nsd->eth.rx_discards); 2048 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 2049 I40E_GLPRT_UPRCL(hw->port), 2050 pf->stat_offsets_loaded, 2051 &osd->eth.rx_unicast, 2052 &nsd->eth.rx_unicast); 2053 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 2054 I40E_GLPRT_UPTCL(hw->port), 2055 pf->stat_offsets_loaded, 2056 &osd->eth.tx_unicast, 2057 &nsd->eth.tx_unicast); 2058 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 2059 I40E_GLPRT_MPRCL(hw->port), 2060 pf->stat_offsets_loaded, 2061 &osd->eth.rx_multicast, 2062 &nsd->eth.rx_multicast); 2063 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 2064 I40E_GLPRT_MPTCL(hw->port), 2065 pf->stat_offsets_loaded, 2066 &osd->eth.tx_multicast, 2067 &nsd->eth.tx_multicast); 2068 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 2069 I40E_GLPRT_BPRCL(hw->port), 2070 pf->stat_offsets_loaded, 2071 &osd->eth.rx_broadcast, 2072 &nsd->eth.rx_broadcast); 2073 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 2074 I40E_GLPRT_BPTCL(hw->port), 2075 pf->stat_offsets_loaded, 2076 &osd->eth.tx_broadcast, 2077 &nsd->eth.tx_broadcast); 2078 2079 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 2080 pf->stat_offsets_loaded, 2081 &osd->tx_dropped_link_down, 2082 &nsd->tx_dropped_link_down); 2083 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 2084 pf->stat_offsets_loaded, 2085 &osd->mac_local_faults, 2086 &nsd->mac_local_faults); 2087 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 2088 pf->stat_offsets_loaded, 2089 &osd->mac_remote_faults, 2090 &nsd->mac_remote_faults); 2091 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 2092 pf->stat_offsets_loaded, 2093 &osd->rx_length_errors, 2094 &nsd->rx_length_errors); 2095 2096 /* Flow control (LFC) stats */ 2097 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 2098 pf->stat_offsets_loaded, 2099 &osd->link_xon_rx, &nsd->link_xon_rx); 2100 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 2101 pf->stat_offsets_loaded, 2102 &osd->link_xon_tx, &nsd->link_xon_tx); 2103 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 2104 pf->stat_offsets_loaded, 2105 &osd->link_xoff_rx, &nsd->link_xoff_rx); 2106 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 2107 pf->stat_offsets_loaded, 2108 &osd->link_xoff_tx, &nsd->link_xoff_tx); 2109 2110 /* 2111 * For watchdog management we need to know if we have been paused 2112 * during the last interval, so capture that here. 2113 */ 2114 if (pf->stats.link_xoff_rx != prev_link_xoff_rx) 2115 vsi->shared->isc_pause_frames = 1; 2116 2117 /* Packet size stats rx */ 2118 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 2119 I40E_GLPRT_PRC64L(hw->port), 2120 pf->stat_offsets_loaded, 2121 &osd->rx_size_64, &nsd->rx_size_64); 2122 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 2123 I40E_GLPRT_PRC127L(hw->port), 2124 pf->stat_offsets_loaded, 2125 &osd->rx_size_127, &nsd->rx_size_127); 2126 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 2127 I40E_GLPRT_PRC255L(hw->port), 2128 pf->stat_offsets_loaded, 2129 &osd->rx_size_255, &nsd->rx_size_255); 2130 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 2131 I40E_GLPRT_PRC511L(hw->port), 2132 pf->stat_offsets_loaded, 2133 &osd->rx_size_511, &nsd->rx_size_511); 2134 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 2135 I40E_GLPRT_PRC1023L(hw->port), 2136 pf->stat_offsets_loaded, 2137 &osd->rx_size_1023, &nsd->rx_size_1023); 2138 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 2139 I40E_GLPRT_PRC1522L(hw->port), 2140 pf->stat_offsets_loaded, 2141 &osd->rx_size_1522, &nsd->rx_size_1522); 2142 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 2143 I40E_GLPRT_PRC9522L(hw->port), 2144 pf->stat_offsets_loaded, 2145 &osd->rx_size_big, &nsd->rx_size_big); 2146 2147 /* Packet size stats tx */ 2148 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 2149 I40E_GLPRT_PTC64L(hw->port), 2150 pf->stat_offsets_loaded, 2151 &osd->tx_size_64, &nsd->tx_size_64); 2152 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 2153 I40E_GLPRT_PTC127L(hw->port), 2154 pf->stat_offsets_loaded, 2155 &osd->tx_size_127, &nsd->tx_size_127); 2156 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 2157 I40E_GLPRT_PTC255L(hw->port), 2158 pf->stat_offsets_loaded, 2159 &osd->tx_size_255, &nsd->tx_size_255); 2160 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 2161 I40E_GLPRT_PTC511L(hw->port), 2162 pf->stat_offsets_loaded, 2163 &osd->tx_size_511, &nsd->tx_size_511); 2164 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 2165 I40E_GLPRT_PTC1023L(hw->port), 2166 pf->stat_offsets_loaded, 2167 &osd->tx_size_1023, &nsd->tx_size_1023); 2168 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 2169 I40E_GLPRT_PTC1522L(hw->port), 2170 pf->stat_offsets_loaded, 2171 &osd->tx_size_1522, &nsd->tx_size_1522); 2172 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 2173 I40E_GLPRT_PTC9522L(hw->port), 2174 pf->stat_offsets_loaded, 2175 &osd->tx_size_big, &nsd->tx_size_big); 2176 2177 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 2178 pf->stat_offsets_loaded, 2179 &osd->rx_undersize, &nsd->rx_undersize); 2180 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 2181 pf->stat_offsets_loaded, 2182 &osd->rx_fragments, &nsd->rx_fragments); 2183 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 2184 pf->stat_offsets_loaded, 2185 &osd->rx_oversize, &nsd->rx_oversize); 2186 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 2187 pf->stat_offsets_loaded, 2188 &osd->rx_jabber, &nsd->rx_jabber); 2189 /* EEE */ 2190 i40e_get_phy_lpi_status(hw, nsd); 2191 2192 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded, 2193 &osd->tx_lpi_count, &nsd->tx_lpi_count, 2194 &osd->rx_lpi_count, &nsd->rx_lpi_count); 2195 2196 pf->stat_offsets_loaded = true; 2197 /* End hw stats */ 2198 2199 /* Update vsi stats */ 2200 ixl_update_vsi_stats(vsi); 2201 2202 for (int i = 0; i < pf->num_vfs; i++) { 2203 vf = &pf->vfs[i]; 2204 if (vf->vf_flags & VF_FLAG_ENABLED) 2205 ixl_update_eth_stats(&pf->vfs[i].vsi); 2206 } 2207 } 2208 2209 /** 2210 * Update VSI-specific ethernet statistics counters. 2211 **/ 2212 void 2213 ixl_update_eth_stats(struct ixl_vsi *vsi) 2214 { 2215 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 2216 struct i40e_hw *hw = &pf->hw; 2217 struct i40e_eth_stats *es; 2218 struct i40e_eth_stats *oes; 2219 u16 stat_idx = vsi->info.stat_counter_idx; 2220 2221 es = &vsi->eth_stats; 2222 oes = &vsi->eth_stats_offsets; 2223 2224 /* Gather up the stats that the hw collects */ 2225 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 2226 vsi->stat_offsets_loaded, 2227 &oes->tx_errors, &es->tx_errors); 2228 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 2229 vsi->stat_offsets_loaded, 2230 &oes->rx_discards, &es->rx_discards); 2231 2232 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 2233 I40E_GLV_GORCL(stat_idx), 2234 vsi->stat_offsets_loaded, 2235 &oes->rx_bytes, &es->rx_bytes); 2236 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 2237 I40E_GLV_UPRCL(stat_idx), 2238 vsi->stat_offsets_loaded, 2239 &oes->rx_unicast, &es->rx_unicast); 2240 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 2241 I40E_GLV_MPRCL(stat_idx), 2242 vsi->stat_offsets_loaded, 2243 &oes->rx_multicast, &es->rx_multicast); 2244 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 2245 I40E_GLV_BPRCL(stat_idx), 2246 vsi->stat_offsets_loaded, 2247 &oes->rx_broadcast, &es->rx_broadcast); 2248 2249 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 2250 I40E_GLV_GOTCL(stat_idx), 2251 vsi->stat_offsets_loaded, 2252 &oes->tx_bytes, &es->tx_bytes); 2253 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 2254 I40E_GLV_UPTCL(stat_idx), 2255 vsi->stat_offsets_loaded, 2256 &oes->tx_unicast, &es->tx_unicast); 2257 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 2258 I40E_GLV_MPTCL(stat_idx), 2259 vsi->stat_offsets_loaded, 2260 &oes->tx_multicast, &es->tx_multicast); 2261 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 2262 I40E_GLV_BPTCL(stat_idx), 2263 vsi->stat_offsets_loaded, 2264 &oes->tx_broadcast, &es->tx_broadcast); 2265 vsi->stat_offsets_loaded = true; 2266 } 2267 2268 void 2269 ixl_update_vsi_stats(struct ixl_vsi *vsi) 2270 { 2271 struct ixl_pf *pf; 2272 struct i40e_eth_stats *es; 2273 u64 tx_discards, csum_errs; 2274 2275 struct i40e_hw_port_stats *nsd; 2276 2277 pf = vsi->back; 2278 es = &vsi->eth_stats; 2279 nsd = &pf->stats; 2280 2281 ixl_update_eth_stats(vsi); 2282 2283 tx_discards = es->tx_discards + nsd->tx_dropped_link_down; 2284 2285 csum_errs = 0; 2286 for (int i = 0; i < vsi->num_rx_queues; i++) 2287 csum_errs += vsi->rx_queues[i].rxr.csum_errs; 2288 nsd->checksum_error = csum_errs; 2289 2290 /* Update ifnet stats */ 2291 IXL_SET_IPACKETS(vsi, es->rx_unicast + 2292 es->rx_multicast + 2293 es->rx_broadcast); 2294 IXL_SET_OPACKETS(vsi, es->tx_unicast + 2295 es->tx_multicast + 2296 es->tx_broadcast); 2297 IXL_SET_IBYTES(vsi, es->rx_bytes); 2298 IXL_SET_OBYTES(vsi, es->tx_bytes); 2299 IXL_SET_IMCASTS(vsi, es->rx_multicast); 2300 IXL_SET_OMCASTS(vsi, es->tx_multicast); 2301 2302 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + 2303 nsd->checksum_error + nsd->rx_length_errors + 2304 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize + 2305 nsd->rx_jabber); 2306 IXL_SET_OERRORS(vsi, es->tx_errors); 2307 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); 2308 IXL_SET_OQDROPS(vsi, tx_discards); 2309 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); 2310 IXL_SET_COLLISIONS(vsi, 0); 2311 } 2312 2313 /** 2314 * Reset all of the stats for the given pf 2315 **/ 2316 void 2317 ixl_pf_reset_stats(struct ixl_pf *pf) 2318 { 2319 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); 2320 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); 2321 pf->stat_offsets_loaded = false; 2322 } 2323 2324 /** 2325 * Resets all stats of the given vsi 2326 **/ 2327 void 2328 ixl_vsi_reset_stats(struct ixl_vsi *vsi) 2329 { 2330 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); 2331 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); 2332 vsi->stat_offsets_loaded = false; 2333 } 2334 2335 /** 2336 * Read and update a 48 bit stat from the hw 2337 * 2338 * Since the device stats are not reset at PFReset, they likely will not 2339 * be zeroed when the driver starts. We'll save the first values read 2340 * and use them as offsets to be subtracted from the raw values in order 2341 * to report stats that count from zero. 2342 **/ 2343 void 2344 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 2345 bool offset_loaded, u64 *offset, u64 *stat) 2346 { 2347 u64 new_data; 2348 2349 new_data = rd64(hw, loreg); 2350 2351 if (!offset_loaded) 2352 *offset = new_data; 2353 if (new_data >= *offset) 2354 *stat = new_data - *offset; 2355 else 2356 *stat = (new_data + ((u64)1 << 48)) - *offset; 2357 *stat &= 0xFFFFFFFFFFFFULL; 2358 } 2359 2360 /** 2361 * Read and update a 32 bit stat from the hw 2362 **/ 2363 void 2364 ixl_stat_update32(struct i40e_hw *hw, u32 reg, 2365 bool offset_loaded, u64 *offset, u64 *stat) 2366 { 2367 u32 new_data; 2368 2369 new_data = rd32(hw, reg); 2370 if (!offset_loaded) 2371 *offset = new_data; 2372 if (new_data >= *offset) 2373 *stat = (u32)(new_data - *offset); 2374 else 2375 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 2376 } 2377 2378 /** 2379 * Add subset of device sysctls safe to use in recovery mode 2380 */ 2381 void 2382 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf) 2383 { 2384 device_t dev = pf->dev; 2385 2386 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2387 struct sysctl_oid_list *ctx_list = 2388 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2389 2390 struct sysctl_oid *debug_node; 2391 struct sysctl_oid_list *debug_list; 2392 2393 SYSCTL_ADD_PROC(ctx, ctx_list, 2394 OID_AUTO, "fw_version", 2395 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2396 ixl_sysctl_show_fw, "A", "Firmware version"); 2397 2398 /* Add sysctls meant to print debug information, but don't list them 2399 * in "sysctl -a" output. */ 2400 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2401 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2402 "Debug Sysctls"); 2403 debug_list = SYSCTL_CHILDREN(debug_node); 2404 2405 SYSCTL_ADD_UINT(ctx, debug_list, 2406 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2407 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2408 2409 SYSCTL_ADD_UINT(ctx, debug_list, 2410 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2411 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2412 2413 SYSCTL_ADD_PROC(ctx, debug_list, 2414 OID_AUTO, "dump_debug_data", 2415 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2416 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2417 2418 SYSCTL_ADD_PROC(ctx, debug_list, 2419 OID_AUTO, "do_pf_reset", 2420 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2421 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2422 2423 SYSCTL_ADD_PROC(ctx, debug_list, 2424 OID_AUTO, "do_core_reset", 2425 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2426 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2427 2428 SYSCTL_ADD_PROC(ctx, debug_list, 2429 OID_AUTO, "do_global_reset", 2430 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2431 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2432 2433 SYSCTL_ADD_PROC(ctx, debug_list, 2434 OID_AUTO, "queue_interrupt_table", 2435 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2436 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2437 } 2438 2439 void 2440 ixl_add_device_sysctls(struct ixl_pf *pf) 2441 { 2442 device_t dev = pf->dev; 2443 struct i40e_hw *hw = &pf->hw; 2444 2445 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2446 struct sysctl_oid_list *ctx_list = 2447 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2448 2449 struct sysctl_oid *debug_node; 2450 struct sysctl_oid_list *debug_list; 2451 2452 struct sysctl_oid *fec_node; 2453 struct sysctl_oid_list *fec_list; 2454 struct sysctl_oid *eee_node; 2455 struct sysctl_oid_list *eee_list; 2456 2457 /* Set up sysctls */ 2458 SYSCTL_ADD_PROC(ctx, ctx_list, 2459 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2460 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); 2461 2462 SYSCTL_ADD_PROC(ctx, ctx_list, 2463 OID_AUTO, "advertise_speed", 2464 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2465 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); 2466 2467 SYSCTL_ADD_PROC(ctx, ctx_list, 2468 OID_AUTO, "supported_speeds", 2469 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2470 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); 2471 2472 SYSCTL_ADD_PROC(ctx, ctx_list, 2473 OID_AUTO, "current_speed", 2474 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2475 ixl_sysctl_current_speed, "A", "Current Port Speed"); 2476 2477 SYSCTL_ADD_PROC(ctx, ctx_list, 2478 OID_AUTO, "fw_version", 2479 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2480 ixl_sysctl_show_fw, "A", "Firmware version"); 2481 2482 SYSCTL_ADD_PROC(ctx, ctx_list, 2483 OID_AUTO, "unallocated_queues", 2484 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2485 ixl_sysctl_unallocated_queues, "I", 2486 "Queues not allocated to a PF or VF"); 2487 2488 SYSCTL_ADD_PROC(ctx, ctx_list, 2489 OID_AUTO, "tx_itr", 2490 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2491 ixl_sysctl_pf_tx_itr, "I", 2492 "Immediately set TX ITR value for all queues"); 2493 2494 SYSCTL_ADD_PROC(ctx, ctx_list, 2495 OID_AUTO, "rx_itr", 2496 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2497 ixl_sysctl_pf_rx_itr, "I", 2498 "Immediately set RX ITR value for all queues"); 2499 2500 SYSCTL_ADD_INT(ctx, ctx_list, 2501 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, 2502 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); 2503 2504 SYSCTL_ADD_INT(ctx, ctx_list, 2505 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, 2506 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); 2507 2508 /* Add FEC sysctls for 25G adapters */ 2509 if (i40e_is_25G_device(hw->device_id)) { 2510 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2511 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2512 "FEC Sysctls"); 2513 fec_list = SYSCTL_CHILDREN(fec_node); 2514 2515 SYSCTL_ADD_PROC(ctx, fec_list, 2516 OID_AUTO, "fc_ability", 2517 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2518 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); 2519 2520 SYSCTL_ADD_PROC(ctx, fec_list, 2521 OID_AUTO, "rs_ability", 2522 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2523 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); 2524 2525 SYSCTL_ADD_PROC(ctx, fec_list, 2526 OID_AUTO, "fc_requested", 2527 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2528 ixl_sysctl_fec_fc_request, "I", 2529 "FC FEC mode requested on link"); 2530 2531 SYSCTL_ADD_PROC(ctx, fec_list, 2532 OID_AUTO, "rs_requested", 2533 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2534 ixl_sysctl_fec_rs_request, "I", 2535 "RS FEC mode requested on link"); 2536 2537 SYSCTL_ADD_PROC(ctx, fec_list, 2538 OID_AUTO, "auto_fec_enabled", 2539 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2540 ixl_sysctl_fec_auto_enable, "I", 2541 "Let FW decide FEC ability/request modes"); 2542 } 2543 2544 SYSCTL_ADD_PROC(ctx, ctx_list, 2545 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2546 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); 2547 2548 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2549 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2550 "Energy Efficient Ethernet (EEE) Sysctls"); 2551 eee_list = SYSCTL_CHILDREN(eee_node); 2552 2553 SYSCTL_ADD_PROC(ctx, eee_list, 2554 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 2555 pf, 0, ixl_sysctl_eee_enable, "I", 2556 "Enable Energy Efficient Ethernet (EEE)"); 2557 2558 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status", 2559 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0, 2560 "TX LPI status"); 2561 2562 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status", 2563 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0, 2564 "RX LPI status"); 2565 2566 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count", 2567 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count, 2568 "TX LPI count"); 2569 2570 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count", 2571 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count, 2572 "RX LPI count"); 2573 2574 SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, 2575 "link_active_on_if_down", 2576 CTLTYPE_INT | CTLFLAG_RWTUN, 2577 pf, 0, ixl_sysctl_set_link_active, "I", 2578 IXL_SYSCTL_HELP_SET_LINK_ACTIVE); 2579 2580 /* Add sysctls meant to print debug information, but don't list them 2581 * in "sysctl -a" output. */ 2582 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2583 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2584 "Debug Sysctls"); 2585 debug_list = SYSCTL_CHILDREN(debug_node); 2586 2587 SYSCTL_ADD_UINT(ctx, debug_list, 2588 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2589 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2590 2591 SYSCTL_ADD_UINT(ctx, debug_list, 2592 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2593 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2594 2595 SYSCTL_ADD_PROC(ctx, debug_list, 2596 OID_AUTO, "link_status", 2597 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2598 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); 2599 2600 SYSCTL_ADD_PROC(ctx, debug_list, 2601 OID_AUTO, "phy_abilities_init", 2602 CTLTYPE_STRING | CTLFLAG_RD, 2603 pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities"); 2604 2605 SYSCTL_ADD_PROC(ctx, debug_list, 2606 OID_AUTO, "phy_abilities", 2607 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2608 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); 2609 2610 SYSCTL_ADD_PROC(ctx, debug_list, 2611 OID_AUTO, "filter_list", 2612 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2613 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); 2614 2615 SYSCTL_ADD_PROC(ctx, debug_list, 2616 OID_AUTO, "hw_res_alloc", 2617 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2618 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); 2619 2620 SYSCTL_ADD_PROC(ctx, debug_list, 2621 OID_AUTO, "switch_config", 2622 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2623 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); 2624 2625 SYSCTL_ADD_PROC(ctx, debug_list, 2626 OID_AUTO, "switch_vlans", 2627 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2628 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration"); 2629 2630 SYSCTL_ADD_PROC(ctx, debug_list, 2631 OID_AUTO, "rss_key", 2632 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2633 pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); 2634 2635 SYSCTL_ADD_PROC(ctx, debug_list, 2636 OID_AUTO, "rss_lut", 2637 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2638 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); 2639 2640 SYSCTL_ADD_PROC(ctx, debug_list, 2641 OID_AUTO, "rss_hena", 2642 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2643 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); 2644 2645 SYSCTL_ADD_PROC(ctx, debug_list, 2646 OID_AUTO, "disable_fw_link_management", 2647 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2648 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); 2649 2650 SYSCTL_ADD_PROC(ctx, debug_list, 2651 OID_AUTO, "dump_debug_data", 2652 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2653 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2654 2655 SYSCTL_ADD_PROC(ctx, debug_list, 2656 OID_AUTO, "do_pf_reset", 2657 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2658 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2659 2660 SYSCTL_ADD_PROC(ctx, debug_list, 2661 OID_AUTO, "do_core_reset", 2662 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2663 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2664 2665 SYSCTL_ADD_PROC(ctx, debug_list, 2666 OID_AUTO, "do_global_reset", 2667 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2668 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2669 2670 SYSCTL_ADD_PROC(ctx, debug_list, 2671 OID_AUTO, "queue_interrupt_table", 2672 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2673 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2674 2675 SYSCTL_ADD_PROC(ctx, debug_list, 2676 OID_AUTO, "phy_statistics", CTLTYPE_STRING | CTLFLAG_RD, 2677 pf, 0, ixl_sysctl_phy_statistics, "A", "PHY Statistics"); 2678 2679 if (pf->has_i2c) { 2680 SYSCTL_ADD_PROC(ctx, debug_list, 2681 OID_AUTO, "read_i2c_byte", 2682 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2683 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C); 2684 2685 SYSCTL_ADD_PROC(ctx, debug_list, 2686 OID_AUTO, "write_i2c_byte", 2687 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2688 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C); 2689 2690 SYSCTL_ADD_PROC(ctx, debug_list, 2691 OID_AUTO, "read_i2c_diag_data", 2692 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2693 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); 2694 } 2695 } 2696 2697 /* 2698 * Primarily for finding out how many queues can be assigned to VFs, 2699 * at runtime. 2700 */ 2701 static int 2702 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) 2703 { 2704 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2705 int queues; 2706 2707 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); 2708 2709 return sysctl_handle_int(oidp, NULL, queues, req); 2710 } 2711 2712 static const char * 2713 ixl_link_speed_string(enum i40e_aq_link_speed link_speed) 2714 { 2715 const char * link_speed_str[] = { 2716 "Unknown", 2717 "100 Mbps", 2718 "1 Gbps", 2719 "10 Gbps", 2720 "40 Gbps", 2721 "20 Gbps", 2722 "25 Gbps", 2723 "2.5 Gbps", 2724 "5 Gbps" 2725 }; 2726 int index; 2727 2728 switch (link_speed) { 2729 case I40E_LINK_SPEED_100MB: 2730 index = 1; 2731 break; 2732 case I40E_LINK_SPEED_1GB: 2733 index = 2; 2734 break; 2735 case I40E_LINK_SPEED_10GB: 2736 index = 3; 2737 break; 2738 case I40E_LINK_SPEED_40GB: 2739 index = 4; 2740 break; 2741 case I40E_LINK_SPEED_20GB: 2742 index = 5; 2743 break; 2744 case I40E_LINK_SPEED_25GB: 2745 index = 6; 2746 break; 2747 case I40E_LINK_SPEED_2_5GB: 2748 index = 7; 2749 break; 2750 case I40E_LINK_SPEED_5GB: 2751 index = 8; 2752 break; 2753 case I40E_LINK_SPEED_UNKNOWN: 2754 default: 2755 index = 0; 2756 break; 2757 } 2758 2759 return (link_speed_str[index]); 2760 } 2761 2762 int 2763 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2764 { 2765 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2766 struct i40e_hw *hw = &pf->hw; 2767 int error = 0; 2768 2769 ixl_update_link_status(pf); 2770 2771 error = sysctl_handle_string(oidp, 2772 __DECONST(void *, 2773 ixl_link_speed_string(hw->phy.link_info.link_speed)), 2774 8, req); 2775 2776 return (error); 2777 } 2778 2779 /* 2780 * Converts 8-bit speeds value to and from sysctl flags and 2781 * Admin Queue flags. 2782 */ 2783 static u8 2784 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) 2785 { 2786 #define SPEED_MAP_SIZE 8 2787 static u16 speedmap[SPEED_MAP_SIZE] = { 2788 (I40E_LINK_SPEED_100MB | (0x1 << 8)), 2789 (I40E_LINK_SPEED_1GB | (0x2 << 8)), 2790 (I40E_LINK_SPEED_10GB | (0x4 << 8)), 2791 (I40E_LINK_SPEED_20GB | (0x8 << 8)), 2792 (I40E_LINK_SPEED_25GB | (0x10 << 8)), 2793 (I40E_LINK_SPEED_40GB | (0x20 << 8)), 2794 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)), 2795 (I40E_LINK_SPEED_5GB | (0x80 << 8)), 2796 }; 2797 u8 retval = 0; 2798 2799 for (int i = 0; i < SPEED_MAP_SIZE; i++) { 2800 if (to_aq) 2801 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; 2802 else 2803 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; 2804 } 2805 2806 return (retval); 2807 } 2808 2809 int 2810 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) 2811 { 2812 struct i40e_hw *hw = &pf->hw; 2813 device_t dev = pf->dev; 2814 struct i40e_aq_get_phy_abilities_resp abilities; 2815 struct i40e_aq_set_phy_config config; 2816 enum i40e_status_code aq_error = 0; 2817 2818 /* Get current capability information */ 2819 aq_error = i40e_aq_get_phy_capabilities(hw, 2820 FALSE, FALSE, &abilities, NULL); 2821 if (aq_error) { 2822 device_printf(dev, 2823 "%s: Error getting phy capabilities %d," 2824 " aq error: %d\n", __func__, aq_error, 2825 hw->aq.asq_last_status); 2826 return (EIO); 2827 } 2828 2829 /* Prepare new config */ 2830 bzero(&config, sizeof(config)); 2831 if (from_aq) 2832 config.link_speed = speeds; 2833 else 2834 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); 2835 config.phy_type = abilities.phy_type; 2836 config.phy_type_ext = abilities.phy_type_ext; 2837 config.abilities = abilities.abilities 2838 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 2839 config.eee_capability = abilities.eee_capability; 2840 config.eeer = abilities.eeer_val; 2841 config.low_power_ctrl = abilities.d3_lpan; 2842 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 2843 & I40E_AQ_PHY_FEC_CONFIG_MASK; 2844 2845 /* Do aq command & restart link */ 2846 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 2847 if (aq_error) { 2848 device_printf(dev, 2849 "%s: Error setting new phy config %d," 2850 " aq error: %d\n", __func__, aq_error, 2851 hw->aq.asq_last_status); 2852 return (EIO); 2853 } 2854 2855 return (0); 2856 } 2857 2858 /* 2859 ** Supported link speeds 2860 ** Flags: 2861 ** 0x1 - 100 Mb 2862 ** 0x2 - 1G 2863 ** 0x4 - 10G 2864 ** 0x8 - 20G 2865 ** 0x10 - 25G 2866 ** 0x20 - 40G 2867 ** 0x40 - 2.5G 2868 ** 0x80 - 5G 2869 */ 2870 static int 2871 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) 2872 { 2873 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2874 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 2875 2876 return sysctl_handle_int(oidp, NULL, supported, req); 2877 } 2878 2879 /* 2880 ** Control link advertise speed: 2881 ** Flags: 2882 ** 0x1 - advertise 100 Mb 2883 ** 0x2 - advertise 1G 2884 ** 0x4 - advertise 10G 2885 ** 0x8 - advertise 20G 2886 ** 0x10 - advertise 25G 2887 ** 0x20 - advertise 40G 2888 ** 0x40 - advertise 2.5G 2889 ** 0x80 - advertise 5G 2890 ** 2891 ** Set to 0 to disable link 2892 */ 2893 int 2894 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) 2895 { 2896 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2897 device_t dev = pf->dev; 2898 u8 converted_speeds; 2899 int requested_ls = 0; 2900 int error = 0; 2901 2902 /* Read in new mode */ 2903 requested_ls = pf->advertised_speed; 2904 error = sysctl_handle_int(oidp, &requested_ls, 0, req); 2905 if ((error) || (req->newptr == NULL)) 2906 return (error); 2907 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 2908 device_printf(dev, "Interface is currently in FW recovery mode. " 2909 "Setting advertise speed not supported\n"); 2910 return (EINVAL); 2911 } 2912 2913 /* Error out if bits outside of possible flag range are set */ 2914 if ((requested_ls & ~((u8)0xFF)) != 0) { 2915 device_printf(dev, "Input advertised speed out of range; " 2916 "valid flags are: 0x%02x\n", 2917 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2918 return (EINVAL); 2919 } 2920 2921 /* Check if adapter supports input value */ 2922 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); 2923 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { 2924 device_printf(dev, "Invalid advertised speed; " 2925 "valid flags are: 0x%02x\n", 2926 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2927 return (EINVAL); 2928 } 2929 2930 error = ixl_set_advertised_speeds(pf, requested_ls, false); 2931 if (error) 2932 return (error); 2933 2934 pf->advertised_speed = requested_ls; 2935 ixl_update_link_status(pf); 2936 return (0); 2937 } 2938 2939 /* 2940 * Input: bitmap of enum i40e_aq_link_speed 2941 */ 2942 u64 2943 ixl_max_aq_speed_to_value(u8 link_speeds) 2944 { 2945 if (link_speeds & I40E_LINK_SPEED_40GB) 2946 return IF_Gbps(40); 2947 if (link_speeds & I40E_LINK_SPEED_25GB) 2948 return IF_Gbps(25); 2949 if (link_speeds & I40E_LINK_SPEED_20GB) 2950 return IF_Gbps(20); 2951 if (link_speeds & I40E_LINK_SPEED_10GB) 2952 return IF_Gbps(10); 2953 if (link_speeds & I40E_LINK_SPEED_5GB) 2954 return IF_Gbps(5); 2955 if (link_speeds & I40E_LINK_SPEED_2_5GB) 2956 return IF_Mbps(2500); 2957 if (link_speeds & I40E_LINK_SPEED_1GB) 2958 return IF_Gbps(1); 2959 if (link_speeds & I40E_LINK_SPEED_100MB) 2960 return IF_Mbps(100); 2961 else 2962 /* Minimum supported link speed */ 2963 return IF_Mbps(100); 2964 } 2965 2966 /* 2967 ** Get the width and transaction speed of 2968 ** the bus this adapter is plugged into. 2969 */ 2970 void 2971 ixl_get_bus_info(struct ixl_pf *pf) 2972 { 2973 struct i40e_hw *hw = &pf->hw; 2974 device_t dev = pf->dev; 2975 u16 link; 2976 u32 offset, num_ports; 2977 u64 max_speed; 2978 2979 /* Some devices don't use PCIE */ 2980 if (hw->mac.type == I40E_MAC_X722) 2981 return; 2982 2983 /* Read PCI Express Capabilities Link Status Register */ 2984 pci_find_cap(dev, PCIY_EXPRESS, &offset); 2985 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2986 2987 /* Fill out hw struct with PCIE info */ 2988 i40e_set_pci_config_data(hw, link); 2989 2990 /* Use info to print out bandwidth messages */ 2991 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 2992 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": 2993 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": 2994 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), 2995 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : 2996 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : 2997 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : 2998 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : 2999 ("Unknown")); 3000 3001 /* 3002 * If adapter is in slot with maximum supported speed, 3003 * no warning message needs to be printed out. 3004 */ 3005 if (hw->bus.speed >= i40e_bus_speed_8000 3006 && hw->bus.width >= i40e_bus_width_pcie_x8) 3007 return; 3008 3009 num_ports = bitcount32(hw->func_caps.valid_functions); 3010 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; 3011 3012 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { 3013 device_printf(dev, "PCI-Express bandwidth available" 3014 " for this device may be insufficient for" 3015 " optimal performance.\n"); 3016 device_printf(dev, "Please move the device to a different" 3017 " PCI-e link with more lanes and/or higher" 3018 " transfer rate.\n"); 3019 } 3020 } 3021 3022 static int 3023 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 3024 { 3025 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3026 struct i40e_hw *hw = &pf->hw; 3027 struct sbuf *sbuf; 3028 3029 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3030 ixl_nvm_version_str(hw, sbuf); 3031 sbuf_finish(sbuf); 3032 sbuf_delete(sbuf); 3033 3034 return (0); 3035 } 3036 3037 void 3038 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) 3039 { 3040 u8 nvma_ptr = nvma->config & 0xFF; 3041 u8 nvma_flags = (nvma->config & 0xF00) >> 8; 3042 const char * cmd_str; 3043 3044 switch (nvma->command) { 3045 case I40E_NVM_READ: 3046 if (nvma_ptr == 0xF && nvma_flags == 0xF && 3047 nvma->offset == 0 && nvma->data_size == 1) { 3048 device_printf(dev, "NVMUPD: Get Driver Status Command\n"); 3049 return; 3050 } 3051 cmd_str = "READ "; 3052 break; 3053 case I40E_NVM_WRITE: 3054 cmd_str = "WRITE"; 3055 break; 3056 default: 3057 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command); 3058 return; 3059 } 3060 device_printf(dev, 3061 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n", 3062 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size); 3063 } 3064 3065 int 3066 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) 3067 { 3068 struct i40e_hw *hw = &pf->hw; 3069 struct i40e_nvm_access *nvma; 3070 device_t dev = pf->dev; 3071 enum i40e_status_code status = 0; 3072 size_t nvma_size, ifd_len, exp_len; 3073 int err, perrno; 3074 3075 DEBUGFUNC("ixl_handle_nvmupd_cmd"); 3076 3077 /* Sanity checks */ 3078 nvma_size = sizeof(struct i40e_nvm_access); 3079 ifd_len = ifd->ifd_len; 3080 3081 if (ifd_len < nvma_size || 3082 ifd->ifd_data == NULL) { 3083 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", 3084 __func__); 3085 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", 3086 __func__, ifd_len, nvma_size); 3087 device_printf(dev, "%s: data pointer: %p\n", __func__, 3088 ifd->ifd_data); 3089 return (EINVAL); 3090 } 3091 3092 nvma = malloc(ifd_len, M_IXL, M_WAITOK); 3093 err = copyin(ifd->ifd_data, nvma, ifd_len); 3094 if (err) { 3095 device_printf(dev, "%s: Cannot get request from user space\n", 3096 __func__); 3097 free(nvma, M_IXL); 3098 return (err); 3099 } 3100 3101 if (pf->dbg_mask & IXL_DBG_NVMUPD) 3102 ixl_print_nvm_cmd(dev, nvma); 3103 3104 if (IXL_PF_IS_RESETTING(pf)) { 3105 int count = 0; 3106 while (count++ < 100) { 3107 i40e_msec_delay(100); 3108 if (!(IXL_PF_IS_RESETTING(pf))) 3109 break; 3110 } 3111 } 3112 3113 if (IXL_PF_IS_RESETTING(pf)) { 3114 device_printf(dev, 3115 "%s: timeout waiting for EMP reset to finish\n", 3116 __func__); 3117 free(nvma, M_IXL); 3118 return (-EBUSY); 3119 } 3120 3121 if (nvma->data_size < 1 || nvma->data_size > 4096) { 3122 device_printf(dev, 3123 "%s: invalid request, data size not in supported range\n", 3124 __func__); 3125 free(nvma, M_IXL); 3126 return (EINVAL); 3127 } 3128 3129 /* 3130 * Older versions of the NVM update tool don't set ifd_len to the size 3131 * of the entire buffer passed to the ioctl. Check the data_size field 3132 * in the contained i40e_nvm_access struct and ensure everything is 3133 * copied in from userspace. 3134 */ 3135 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */ 3136 3137 if (ifd_len < exp_len) { 3138 ifd_len = exp_len; 3139 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK); 3140 err = copyin(ifd->ifd_data, nvma, ifd_len); 3141 if (err) { 3142 device_printf(dev, "%s: Cannot get request from user space\n", 3143 __func__); 3144 free(nvma, M_IXL); 3145 return (err); 3146 } 3147 } 3148 3149 // TODO: Might need a different lock here 3150 // IXL_PF_LOCK(pf); 3151 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); 3152 // IXL_PF_UNLOCK(pf); 3153 3154 err = copyout(nvma, ifd->ifd_data, ifd_len); 3155 free(nvma, M_IXL); 3156 if (err) { 3157 device_printf(dev, "%s: Cannot return data to user space\n", 3158 __func__); 3159 return (err); 3160 } 3161 3162 /* Let the nvmupdate report errors, show them only when debug is enabled */ 3163 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) 3164 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", 3165 i40e_stat_str(hw, status), perrno); 3166 3167 /* 3168 * -EPERM is actually ERESTART, which the kernel interprets as it needing 3169 * to run this ioctl again. So use -EACCES for -EPERM instead. 3170 */ 3171 if (perrno == -EPERM) 3172 return (-EACCES); 3173 else 3174 return (perrno); 3175 } 3176 3177 int 3178 ixl_find_i2c_interface(struct ixl_pf *pf) 3179 { 3180 struct i40e_hw *hw = &pf->hw; 3181 bool i2c_en, port_matched; 3182 u32 reg; 3183 3184 for (int i = 0; i < 4; i++) { 3185 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); 3186 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); 3187 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) 3188 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) 3189 & BIT(hw->port); 3190 if (i2c_en && port_matched) 3191 return (i); 3192 } 3193 3194 return (-1); 3195 } 3196 3197 void 3198 ixl_set_link(struct ixl_pf *pf, bool enable) 3199 { 3200 struct i40e_hw *hw = &pf->hw; 3201 device_t dev = pf->dev; 3202 struct i40e_aq_get_phy_abilities_resp abilities; 3203 struct i40e_aq_set_phy_config config; 3204 enum i40e_status_code aq_error = 0; 3205 u32 phy_type, phy_type_ext; 3206 3207 /* Get initial capability information */ 3208 aq_error = i40e_aq_get_phy_capabilities(hw, 3209 FALSE, TRUE, &abilities, NULL); 3210 if (aq_error) { 3211 device_printf(dev, 3212 "%s: Error getting phy capabilities %d," 3213 " aq error: %d\n", __func__, aq_error, 3214 hw->aq.asq_last_status); 3215 return; 3216 } 3217 3218 phy_type = abilities.phy_type; 3219 phy_type_ext = abilities.phy_type_ext; 3220 3221 /* Get current capability information */ 3222 aq_error = i40e_aq_get_phy_capabilities(hw, 3223 FALSE, FALSE, &abilities, NULL); 3224 if (aq_error) { 3225 device_printf(dev, 3226 "%s: Error getting phy capabilities %d," 3227 " aq error: %d\n", __func__, aq_error, 3228 hw->aq.asq_last_status); 3229 return; 3230 } 3231 3232 /* Prepare new config */ 3233 memset(&config, 0, sizeof(config)); 3234 config.link_speed = abilities.link_speed; 3235 config.abilities = abilities.abilities; 3236 config.eee_capability = abilities.eee_capability; 3237 config.eeer = abilities.eeer_val; 3238 config.low_power_ctrl = abilities.d3_lpan; 3239 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 3240 & I40E_AQ_PHY_FEC_CONFIG_MASK; 3241 config.phy_type = 0; 3242 config.phy_type_ext = 0; 3243 3244 config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX | 3245 I40E_AQ_PHY_FLAG_PAUSE_RX); 3246 3247 switch (pf->fc) { 3248 case I40E_FC_FULL: 3249 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX | 3250 I40E_AQ_PHY_FLAG_PAUSE_RX; 3251 break; 3252 case I40E_FC_RX_PAUSE: 3253 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX; 3254 break; 3255 case I40E_FC_TX_PAUSE: 3256 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX; 3257 break; 3258 default: 3259 break; 3260 } 3261 3262 if (enable) { 3263 config.phy_type = phy_type; 3264 config.phy_type_ext = phy_type_ext; 3265 3266 } 3267 3268 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 3269 if (aq_error) { 3270 device_printf(dev, 3271 "%s: Error setting new phy config %d," 3272 " aq error: %d\n", __func__, aq_error, 3273 hw->aq.asq_last_status); 3274 return; 3275 } 3276 3277 aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL); 3278 if (aq_error) { 3279 device_printf(dev, 3280 "%s: Error set link config %d," 3281 " aq error: %d\n", __func__, aq_error, 3282 hw->aq.asq_last_status); 3283 return; 3284 } 3285 } 3286 3287 static char * 3288 ixl_phy_type_string(u32 bit_pos, bool ext) 3289 { 3290 static char * phy_types_str[32] = { 3291 "SGMII", 3292 "1000BASE-KX", 3293 "10GBASE-KX4", 3294 "10GBASE-KR", 3295 "40GBASE-KR4", 3296 "XAUI", 3297 "XFI", 3298 "SFI", 3299 "XLAUI", 3300 "XLPPI", 3301 "40GBASE-CR4", 3302 "10GBASE-CR1", 3303 "SFP+ Active DA", 3304 "QSFP+ Active DA", 3305 "Reserved (14)", 3306 "Reserved (15)", 3307 "Reserved (16)", 3308 "100BASE-TX", 3309 "1000BASE-T", 3310 "10GBASE-T", 3311 "10GBASE-SR", 3312 "10GBASE-LR", 3313 "10GBASE-SFP+Cu", 3314 "10GBASE-CR1", 3315 "40GBASE-CR4", 3316 "40GBASE-SR4", 3317 "40GBASE-LR4", 3318 "1000BASE-SX", 3319 "1000BASE-LX", 3320 "1000BASE-T Optical", 3321 "20GBASE-KR2", 3322 "Reserved (31)" 3323 }; 3324 static char * ext_phy_types_str[8] = { 3325 "25GBASE-KR", 3326 "25GBASE-CR", 3327 "25GBASE-SR", 3328 "25GBASE-LR", 3329 "25GBASE-AOC", 3330 "25GBASE-ACC", 3331 "2.5GBASE-T", 3332 "5GBASE-T" 3333 }; 3334 3335 if (ext && bit_pos > 7) return "Invalid_Ext"; 3336 if (bit_pos > 31) return "Invalid"; 3337 3338 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; 3339 } 3340 3341 /* TODO: ERJ: I don't this is necessary anymore. */ 3342 int 3343 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) 3344 { 3345 device_t dev = pf->dev; 3346 struct i40e_hw *hw = &pf->hw; 3347 struct i40e_aq_desc desc; 3348 enum i40e_status_code status; 3349 3350 struct i40e_aqc_get_link_status *aq_link_status = 3351 (struct i40e_aqc_get_link_status *)&desc.params.raw; 3352 3353 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 3354 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); 3355 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 3356 if (status) { 3357 device_printf(dev, 3358 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", 3359 __func__, i40e_stat_str(hw, status), 3360 i40e_aq_str(hw, hw->aq.asq_last_status)); 3361 return (EIO); 3362 } 3363 3364 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); 3365 return (0); 3366 } 3367 3368 static char * 3369 ixl_phy_type_string_ls(u8 val) 3370 { 3371 if (val >= 0x1F) 3372 return ixl_phy_type_string(val - 0x1F, true); 3373 else 3374 return ixl_phy_type_string(val, false); 3375 } 3376 3377 static int 3378 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) 3379 { 3380 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3381 device_t dev = pf->dev; 3382 struct sbuf *buf; 3383 int error = 0; 3384 3385 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3386 if (!buf) { 3387 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3388 return (ENOMEM); 3389 } 3390 3391 struct i40e_aqc_get_link_status link_status; 3392 error = ixl_aq_get_link_status(pf, &link_status); 3393 if (error) { 3394 sbuf_delete(buf); 3395 return (error); 3396 } 3397 3398 sbuf_printf(buf, "\n" 3399 "PHY Type : 0x%02x<%s>\n" 3400 "Speed : 0x%02x\n" 3401 "Link info: 0x%02x\n" 3402 "AN info : 0x%02x\n" 3403 "Ext info : 0x%02x\n" 3404 "Loopback : 0x%02x\n" 3405 "Max Frame: %d\n" 3406 "Config : 0x%02x\n" 3407 "Power : 0x%02x", 3408 link_status.phy_type, 3409 ixl_phy_type_string_ls(link_status.phy_type), 3410 link_status.link_speed, 3411 link_status.link_info, 3412 link_status.an_info, 3413 link_status.ext_info, 3414 link_status.loopback, 3415 link_status.max_frame_size, 3416 link_status.config, 3417 link_status.power_desc); 3418 3419 error = sbuf_finish(buf); 3420 if (error) 3421 device_printf(dev, "Error finishing sbuf: %d\n", error); 3422 3423 sbuf_delete(buf); 3424 return (error); 3425 } 3426 3427 static int 3428 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) 3429 { 3430 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3431 struct i40e_hw *hw = &pf->hw; 3432 device_t dev = pf->dev; 3433 enum i40e_status_code status; 3434 struct i40e_aq_get_phy_abilities_resp abilities; 3435 struct sbuf *buf; 3436 int error = 0; 3437 3438 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3439 if (!buf) { 3440 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3441 return (ENOMEM); 3442 } 3443 3444 status = i40e_aq_get_phy_capabilities(hw, 3445 FALSE, arg2 != 0, &abilities, NULL); 3446 if (status) { 3447 device_printf(dev, 3448 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 3449 __func__, i40e_stat_str(hw, status), 3450 i40e_aq_str(hw, hw->aq.asq_last_status)); 3451 sbuf_delete(buf); 3452 return (EIO); 3453 } 3454 3455 sbuf_printf(buf, "\n" 3456 "PHY Type : %08x", 3457 abilities.phy_type); 3458 3459 if (abilities.phy_type != 0) { 3460 sbuf_printf(buf, "<"); 3461 for (int i = 0; i < 32; i++) 3462 if ((1 << i) & abilities.phy_type) 3463 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); 3464 sbuf_printf(buf, ">"); 3465 } 3466 3467 sbuf_printf(buf, "\nPHY Ext : %02x", 3468 abilities.phy_type_ext); 3469 3470 if (abilities.phy_type_ext != 0) { 3471 sbuf_printf(buf, "<"); 3472 for (int i = 0; i < 4; i++) 3473 if ((1 << i) & abilities.phy_type_ext) 3474 sbuf_printf(buf, "%s,", 3475 ixl_phy_type_string(i, true)); 3476 sbuf_printf(buf, ">"); 3477 } 3478 3479 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed); 3480 if (abilities.link_speed != 0) { 3481 u8 link_speed; 3482 sbuf_printf(buf, " <"); 3483 for (int i = 0; i < 8; i++) { 3484 link_speed = (1 << i) & abilities.link_speed; 3485 if (link_speed) 3486 sbuf_printf(buf, "%s, ", 3487 ixl_link_speed_string(link_speed)); 3488 } 3489 sbuf_printf(buf, ">"); 3490 } 3491 3492 sbuf_printf(buf, "\n" 3493 "Abilities: %02x\n" 3494 "EEE cap : %04x\n" 3495 "EEER reg : %08x\n" 3496 "D3 Lpan : %02x\n" 3497 "ID : %02x %02x %02x %02x\n" 3498 "ModType : %02x %02x %02x\n" 3499 "ModType E: %01x\n" 3500 "FEC Cfg : %02x\n" 3501 "Ext CC : %02x", 3502 abilities.abilities, abilities.eee_capability, 3503 abilities.eeer_val, abilities.d3_lpan, 3504 abilities.phy_id[0], abilities.phy_id[1], 3505 abilities.phy_id[2], abilities.phy_id[3], 3506 abilities.module_type[0], abilities.module_type[1], 3507 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, 3508 abilities.fec_cfg_curr_mod_ext_info & 0x1F, 3509 abilities.ext_comp_code); 3510 3511 error = sbuf_finish(buf); 3512 if (error) 3513 device_printf(dev, "Error finishing sbuf: %d\n", error); 3514 3515 sbuf_delete(buf); 3516 return (error); 3517 } 3518 3519 static int 3520 ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS) 3521 { 3522 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3523 struct i40e_hw *hw = &pf->hw; 3524 device_t dev = pf->dev; 3525 struct sbuf *buf; 3526 int error = 0; 3527 3528 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3529 if (buf == NULL) { 3530 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3531 return (ENOMEM); 3532 } 3533 3534 if (hw->mac.type == I40E_MAC_X722) { 3535 sbuf_printf(buf, "\n" 3536 "PCS Link Control Register: unavailable\n" 3537 "PCS Link Status 1: unavailable\n" 3538 "PCS Link Status 2: unavailable\n" 3539 "XGMII FIFO Status: unavailable\n" 3540 "Auto-Negotiation (AN) Status: unavailable\n" 3541 "KR PCS Status: unavailable\n" 3542 "KR FEC Status 1 – FEC Correctable Blocks Counter: unavailable\n" 3543 "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: unavailable" 3544 ); 3545 } else { 3546 sbuf_printf(buf, "\n" 3547 "PCS Link Control Register: %#010X\n" 3548 "PCS Link Status 1: %#010X\n" 3549 "PCS Link Status 2: %#010X\n" 3550 "XGMII FIFO Status: %#010X\n" 3551 "Auto-Negotiation (AN) Status: %#010X\n" 3552 "KR PCS Status: %#010X\n" 3553 "KR FEC Status 1 – FEC Correctable Blocks Counter: %#010X\n" 3554 "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: %#010X", 3555 rd32(hw, I40E_PRTMAC_PCS_LINK_CTRL), 3556 rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS1(0)), 3557 rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS2), 3558 rd32(hw, I40E_PRTMAC_PCS_XGMII_FIFO_STATUS), 3559 rd32(hw, I40E_PRTMAC_PCS_AN_LP_STATUS), 3560 rd32(hw, I40E_PRTMAC_PCS_KR_STATUS), 3561 rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS1), 3562 rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS2) 3563 ); 3564 } 3565 3566 error = sbuf_finish(buf); 3567 if (error) 3568 device_printf(dev, "Error finishing sbuf: %d\n", error); 3569 3570 sbuf_delete(buf); 3571 return (error); 3572 } 3573 3574 static int 3575 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) 3576 { 3577 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3578 struct ixl_vsi *vsi = &pf->vsi; 3579 struct ixl_mac_filter *f; 3580 device_t dev = pf->dev; 3581 int error = 0, ftl_len = 0, ftl_counter = 0; 3582 3583 struct sbuf *buf; 3584 3585 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3586 if (!buf) { 3587 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3588 return (ENOMEM); 3589 } 3590 3591 sbuf_printf(buf, "\n"); 3592 3593 /* Print MAC filters */ 3594 sbuf_printf(buf, "PF Filters:\n"); 3595 LIST_FOREACH(f, &vsi->ftl, ftle) 3596 ftl_len++; 3597 3598 if (ftl_len < 1) 3599 sbuf_printf(buf, "(none)\n"); 3600 else { 3601 LIST_FOREACH(f, &vsi->ftl, ftle) { 3602 sbuf_printf(buf, 3603 MAC_FORMAT ", vlan %4d, flags %#06x", 3604 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3605 /* don't print '\n' for last entry */ 3606 if (++ftl_counter != ftl_len) 3607 sbuf_printf(buf, "\n"); 3608 } 3609 } 3610 3611 #ifdef PCI_IOV 3612 /* TODO: Give each VF its own filter list sysctl */ 3613 struct ixl_vf *vf; 3614 if (pf->num_vfs > 0) { 3615 sbuf_printf(buf, "\n\n"); 3616 for (int i = 0; i < pf->num_vfs; i++) { 3617 vf = &pf->vfs[i]; 3618 if (!(vf->vf_flags & VF_FLAG_ENABLED)) 3619 continue; 3620 3621 vsi = &vf->vsi; 3622 ftl_len = 0, ftl_counter = 0; 3623 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num); 3624 LIST_FOREACH(f, &vsi->ftl, ftle) 3625 ftl_len++; 3626 3627 if (ftl_len < 1) 3628 sbuf_printf(buf, "(none)\n"); 3629 else { 3630 LIST_FOREACH(f, &vsi->ftl, ftle) { 3631 sbuf_printf(buf, 3632 MAC_FORMAT ", vlan %4d, flags %#06x\n", 3633 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3634 } 3635 } 3636 } 3637 } 3638 #endif 3639 3640 error = sbuf_finish(buf); 3641 if (error) 3642 device_printf(dev, "Error finishing sbuf: %d\n", error); 3643 sbuf_delete(buf); 3644 3645 return (error); 3646 } 3647 3648 #define IXL_SW_RES_SIZE 0x14 3649 int 3650 ixl_res_alloc_cmp(const void *a, const void *b) 3651 { 3652 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; 3653 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; 3654 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; 3655 3656 return ((int)one->resource_type - (int)two->resource_type); 3657 } 3658 3659 /* 3660 * Longest string length: 25 3661 */ 3662 const char * 3663 ixl_switch_res_type_string(u8 type) 3664 { 3665 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = { 3666 "VEB", 3667 "VSI", 3668 "Perfect Match MAC address", 3669 "S-tag", 3670 "(Reserved)", 3671 "Multicast hash entry", 3672 "Unicast hash entry", 3673 "VLAN", 3674 "VSI List entry", 3675 "(Reserved)", 3676 "VLAN Statistic Pool", 3677 "Mirror Rule", 3678 "Queue Set", 3679 "Inner VLAN Forward filter", 3680 "(Reserved)", 3681 "Inner MAC", 3682 "IP", 3683 "GRE/VN1 Key", 3684 "VN2 Key", 3685 "Tunneling Port" 3686 }; 3687 3688 if (type < IXL_SW_RES_SIZE) 3689 return ixl_switch_res_type_strings[type]; 3690 else 3691 return "(Reserved)"; 3692 } 3693 3694 static int 3695 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) 3696 { 3697 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3698 struct i40e_hw *hw = &pf->hw; 3699 device_t dev = pf->dev; 3700 struct sbuf *buf; 3701 enum i40e_status_code status; 3702 int error = 0; 3703 3704 u8 num_entries; 3705 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; 3706 3707 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3708 if (!buf) { 3709 device_printf(dev, "Could not allocate sbuf for output.\n"); 3710 return (ENOMEM); 3711 } 3712 3713 bzero(resp, sizeof(resp)); 3714 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, 3715 resp, 3716 IXL_SW_RES_SIZE, 3717 NULL); 3718 if (status) { 3719 device_printf(dev, 3720 "%s: get_switch_resource_alloc() error %s, aq error %s\n", 3721 __func__, i40e_stat_str(hw, status), 3722 i40e_aq_str(hw, hw->aq.asq_last_status)); 3723 sbuf_delete(buf); 3724 return (error); 3725 } 3726 3727 /* Sort entries by type for display */ 3728 qsort(resp, num_entries, 3729 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), 3730 &ixl_res_alloc_cmp); 3731 3732 sbuf_cat(buf, "\n"); 3733 sbuf_printf(buf, "# of entries: %d\n", num_entries); 3734 sbuf_printf(buf, 3735 " Type | Guaranteed | Total | Used | Un-allocated\n" 3736 " | (this) | (all) | (this) | (all) \n"); 3737 for (int i = 0; i < num_entries; i++) { 3738 sbuf_printf(buf, 3739 "%25s | %10d %5d %6d %12d", 3740 ixl_switch_res_type_string(resp[i].resource_type), 3741 resp[i].guaranteed, 3742 resp[i].total, 3743 resp[i].used, 3744 resp[i].total_unalloced); 3745 if (i < num_entries - 1) 3746 sbuf_cat(buf, "\n"); 3747 } 3748 3749 error = sbuf_finish(buf); 3750 if (error) 3751 device_printf(dev, "Error finishing sbuf: %d\n", error); 3752 3753 sbuf_delete(buf); 3754 return (error); 3755 } 3756 3757 enum ixl_sw_seid_offset { 3758 IXL_SW_SEID_EMP = 1, 3759 IXL_SW_SEID_MAC_START = 2, 3760 IXL_SW_SEID_MAC_END = 5, 3761 IXL_SW_SEID_PF_START = 16, 3762 IXL_SW_SEID_PF_END = 31, 3763 IXL_SW_SEID_VF_START = 32, 3764 IXL_SW_SEID_VF_END = 159, 3765 }; 3766 3767 /* 3768 * Caller must init and delete sbuf; this function will clear and 3769 * finish it for caller. 3770 * 3771 * Note: The SEID argument only applies for elements defined by FW at 3772 * power-on; these include the EMP, Ports, PFs and VFs. 3773 */ 3774 static char * 3775 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid) 3776 { 3777 sbuf_clear(s); 3778 3779 /* If SEID is in certain ranges, then we can infer the 3780 * mapping of SEID to switch element. 3781 */ 3782 if (seid == IXL_SW_SEID_EMP) { 3783 sbuf_cat(s, "EMP"); 3784 goto out; 3785 } else if (seid >= IXL_SW_SEID_MAC_START && 3786 seid <= IXL_SW_SEID_MAC_END) { 3787 sbuf_printf(s, "MAC %2d", 3788 seid - IXL_SW_SEID_MAC_START); 3789 goto out; 3790 } else if (seid >= IXL_SW_SEID_PF_START && 3791 seid <= IXL_SW_SEID_PF_END) { 3792 sbuf_printf(s, "PF %3d", 3793 seid - IXL_SW_SEID_PF_START); 3794 goto out; 3795 } else if (seid >= IXL_SW_SEID_VF_START && 3796 seid <= IXL_SW_SEID_VF_END) { 3797 sbuf_printf(s, "VF %3d", 3798 seid - IXL_SW_SEID_VF_START); 3799 goto out; 3800 } 3801 3802 switch (element_type) { 3803 case I40E_AQ_SW_ELEM_TYPE_BMC: 3804 sbuf_cat(s, "BMC"); 3805 break; 3806 case I40E_AQ_SW_ELEM_TYPE_PV: 3807 sbuf_cat(s, "PV"); 3808 break; 3809 case I40E_AQ_SW_ELEM_TYPE_VEB: 3810 sbuf_cat(s, "VEB"); 3811 break; 3812 case I40E_AQ_SW_ELEM_TYPE_PA: 3813 sbuf_cat(s, "PA"); 3814 break; 3815 case I40E_AQ_SW_ELEM_TYPE_VSI: 3816 sbuf_printf(s, "VSI"); 3817 break; 3818 default: 3819 sbuf_cat(s, "?"); 3820 break; 3821 } 3822 3823 out: 3824 sbuf_finish(s); 3825 return sbuf_data(s); 3826 } 3827 3828 static int 3829 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b) 3830 { 3831 const struct i40e_aqc_switch_config_element_resp *one, *two; 3832 one = (const struct i40e_aqc_switch_config_element_resp *)a; 3833 two = (const struct i40e_aqc_switch_config_element_resp *)b; 3834 3835 return ((int)one->seid - (int)two->seid); 3836 } 3837 3838 static int 3839 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) 3840 { 3841 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3842 struct i40e_hw *hw = &pf->hw; 3843 device_t dev = pf->dev; 3844 struct sbuf *buf; 3845 struct sbuf *nmbuf; 3846 enum i40e_status_code status; 3847 int error = 0; 3848 u16 next = 0; 3849 u8 aq_buf[I40E_AQ_LARGE_BUF]; 3850 3851 struct i40e_aqc_switch_config_element_resp *elem; 3852 struct i40e_aqc_get_switch_config_resp *sw_config; 3853 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 3854 3855 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3856 if (!buf) { 3857 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3858 return (ENOMEM); 3859 } 3860 3861 status = i40e_aq_get_switch_config(hw, sw_config, 3862 sizeof(aq_buf), &next, NULL); 3863 if (status) { 3864 device_printf(dev, 3865 "%s: aq_get_switch_config() error %s, aq error %s\n", 3866 __func__, i40e_stat_str(hw, status), 3867 i40e_aq_str(hw, hw->aq.asq_last_status)); 3868 sbuf_delete(buf); 3869 return error; 3870 } 3871 if (next) 3872 device_printf(dev, "%s: TODO: get more config with SEID %d\n", 3873 __func__, next); 3874 3875 nmbuf = sbuf_new_auto(); 3876 if (!nmbuf) { 3877 device_printf(dev, "Could not allocate sbuf for name output.\n"); 3878 sbuf_delete(buf); 3879 return (ENOMEM); 3880 } 3881 3882 /* Sort entries by SEID for display */ 3883 qsort(sw_config->element, sw_config->header.num_reported, 3884 sizeof(struct i40e_aqc_switch_config_element_resp), 3885 &ixl_sw_cfg_elem_seid_cmp); 3886 3887 sbuf_cat(buf, "\n"); 3888 /* Assuming <= 255 elements in switch */ 3889 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); 3890 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); 3891 /* Exclude: 3892 * Revision -- all elements are revision 1 for now 3893 */ 3894 sbuf_printf(buf, 3895 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n" 3896 " | | | (uplink)\n"); 3897 for (int i = 0; i < sw_config->header.num_reported; i++) { 3898 elem = &sw_config->element[i]; 3899 3900 // "%4d (%8s) | %8s %8s %#8x", 3901 sbuf_printf(buf, "%4d", elem->seid); 3902 sbuf_cat(buf, " "); 3903 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3904 elem->element_type, elem->seid)); 3905 sbuf_cat(buf, " | "); 3906 sbuf_printf(buf, "%4d", elem->uplink_seid); 3907 sbuf_cat(buf, " "); 3908 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3909 0, elem->uplink_seid)); 3910 sbuf_cat(buf, " | "); 3911 sbuf_printf(buf, "%4d", elem->downlink_seid); 3912 sbuf_cat(buf, " "); 3913 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3914 0, elem->downlink_seid)); 3915 sbuf_cat(buf, " | "); 3916 sbuf_printf(buf, "%8d", elem->connection_type); 3917 if (i < sw_config->header.num_reported - 1) 3918 sbuf_cat(buf, "\n"); 3919 } 3920 sbuf_delete(nmbuf); 3921 3922 error = sbuf_finish(buf); 3923 if (error) 3924 device_printf(dev, "Error finishing sbuf: %d\n", error); 3925 3926 sbuf_delete(buf); 3927 3928 return (error); 3929 } 3930 3931 static int 3932 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS) 3933 { 3934 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3935 struct i40e_hw *hw = &pf->hw; 3936 device_t dev = pf->dev; 3937 int requested_vlan = -1; 3938 enum i40e_status_code status = 0; 3939 int error = 0; 3940 3941 error = sysctl_handle_int(oidp, &requested_vlan, 0, req); 3942 if ((error) || (req->newptr == NULL)) 3943 return (error); 3944 3945 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) { 3946 device_printf(dev, "Flags disallow setting of vlans\n"); 3947 return (ENODEV); 3948 } 3949 3950 hw->switch_tag = requested_vlan; 3951 device_printf(dev, 3952 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n", 3953 hw->switch_tag, hw->first_tag, hw->second_tag); 3954 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL); 3955 if (status) { 3956 device_printf(dev, 3957 "%s: aq_set_switch_config() error %s, aq error %s\n", 3958 __func__, i40e_stat_str(hw, status), 3959 i40e_aq_str(hw, hw->aq.asq_last_status)); 3960 return (status); 3961 } 3962 return (0); 3963 } 3964 3965 static int 3966 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) 3967 { 3968 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3969 struct i40e_hw *hw = &pf->hw; 3970 device_t dev = pf->dev; 3971 struct sbuf *buf; 3972 int error = 0; 3973 enum i40e_status_code status; 3974 u32 reg; 3975 3976 struct i40e_aqc_get_set_rss_key_data key_data; 3977 3978 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3979 if (!buf) { 3980 device_printf(dev, "Could not allocate sbuf for output.\n"); 3981 return (ENOMEM); 3982 } 3983 3984 bzero(&key_data, sizeof(key_data)); 3985 3986 sbuf_cat(buf, "\n"); 3987 if (hw->mac.type == I40E_MAC_X722) { 3988 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); 3989 if (status) 3990 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", 3991 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3992 } else { 3993 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 3994 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 3995 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); 3996 } 3997 } 3998 3999 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); 4000 4001 error = sbuf_finish(buf); 4002 if (error) 4003 device_printf(dev, "Error finishing sbuf: %d\n", error); 4004 sbuf_delete(buf); 4005 4006 return (error); 4007 } 4008 4009 static void 4010 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) 4011 { 4012 int i, j, k, width; 4013 char c; 4014 4015 if (length < 1 || buf == NULL) return; 4016 4017 int byte_stride = 16; 4018 int lines = length / byte_stride; 4019 int rem = length % byte_stride; 4020 if (rem > 0) 4021 lines++; 4022 4023 for (i = 0; i < lines; i++) { 4024 width = (rem > 0 && i == lines - 1) 4025 ? rem : byte_stride; 4026 4027 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); 4028 4029 for (j = 0; j < width; j++) 4030 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); 4031 4032 if (width < byte_stride) { 4033 for (k = 0; k < (byte_stride - width); k++) 4034 sbuf_printf(sb, " "); 4035 } 4036 4037 if (!text) { 4038 sbuf_printf(sb, "\n"); 4039 continue; 4040 } 4041 4042 for (j = 0; j < width; j++) { 4043 c = (char)buf[i * byte_stride + j]; 4044 if (c < 32 || c > 126) 4045 sbuf_printf(sb, "."); 4046 else 4047 sbuf_printf(sb, "%c", c); 4048 4049 if (j == width - 1) 4050 sbuf_printf(sb, "\n"); 4051 } 4052 } 4053 } 4054 4055 static int 4056 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) 4057 { 4058 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4059 struct i40e_hw *hw = &pf->hw; 4060 device_t dev = pf->dev; 4061 struct sbuf *buf; 4062 int error = 0; 4063 enum i40e_status_code status; 4064 u8 hlut[512]; 4065 u32 reg; 4066 4067 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4068 if (!buf) { 4069 device_printf(dev, "Could not allocate sbuf for output.\n"); 4070 return (ENOMEM); 4071 } 4072 4073 bzero(hlut, sizeof(hlut)); 4074 sbuf_cat(buf, "\n"); 4075 if (hw->mac.type == I40E_MAC_X722) { 4076 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); 4077 if (status) 4078 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", 4079 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 4080 } else { 4081 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { 4082 reg = rd32(hw, I40E_PFQF_HLUT(i)); 4083 bcopy(®, &hlut[i << 2], 4); 4084 } 4085 } 4086 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); 4087 4088 error = sbuf_finish(buf); 4089 if (error) 4090 device_printf(dev, "Error finishing sbuf: %d\n", error); 4091 sbuf_delete(buf); 4092 4093 return (error); 4094 } 4095 4096 static int 4097 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) 4098 { 4099 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4100 struct i40e_hw *hw = &pf->hw; 4101 u64 hena; 4102 4103 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 4104 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 4105 4106 return sysctl_handle_long(oidp, NULL, hena, req); 4107 } 4108 4109 /* 4110 * Sysctl to disable firmware's link management 4111 * 4112 * 1 - Disable link management on this port 4113 * 0 - Re-enable link management 4114 * 4115 * On normal NVMs, firmware manages link by default. 4116 */ 4117 static int 4118 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) 4119 { 4120 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4121 struct i40e_hw *hw = &pf->hw; 4122 device_t dev = pf->dev; 4123 int requested_mode = -1; 4124 enum i40e_status_code status = 0; 4125 int error = 0; 4126 4127 /* Read in new mode */ 4128 error = sysctl_handle_int(oidp, &requested_mode, 0, req); 4129 if ((error) || (req->newptr == NULL)) 4130 return (error); 4131 /* Check for sane value */ 4132 if (requested_mode < 0 || requested_mode > 1) { 4133 device_printf(dev, "Valid modes are 0 or 1\n"); 4134 return (EINVAL); 4135 } 4136 4137 /* Set new mode */ 4138 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); 4139 if (status) { 4140 device_printf(dev, 4141 "%s: Error setting new phy debug mode %s," 4142 " aq error: %s\n", __func__, i40e_stat_str(hw, status), 4143 i40e_aq_str(hw, hw->aq.asq_last_status)); 4144 return (EIO); 4145 } 4146 4147 return (0); 4148 } 4149 4150 /* 4151 * Read some diagnostic data from a (Q)SFP+ module 4152 * 4153 * SFP A2 QSFP Lower Page 4154 * Temperature 96-97 22-23 4155 * Vcc 98-99 26-27 4156 * TX power 102-103 34-35..40-41 4157 * RX power 104-105 50-51..56-57 4158 */ 4159 static int 4160 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 4161 { 4162 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4163 device_t dev = pf->dev; 4164 struct sbuf *sbuf; 4165 int error = 0; 4166 u8 output; 4167 4168 if (req->oldptr == NULL) { 4169 error = SYSCTL_OUT(req, 0, 128); 4170 return (0); 4171 } 4172 4173 error = pf->read_i2c_byte(pf, 0, 0xA0, &output); 4174 if (error) { 4175 device_printf(dev, "Error reading from i2c\n"); 4176 return (error); 4177 } 4178 4179 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 4180 if (output == 0x3) { 4181 /* 4182 * Check for: 4183 * - Internally calibrated data 4184 * - Diagnostic monitoring is implemented 4185 */ 4186 pf->read_i2c_byte(pf, 92, 0xA0, &output); 4187 if (!(output & 0x60)) { 4188 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); 4189 return (0); 4190 } 4191 4192 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4193 4194 for (u8 offset = 96; offset < 100; offset++) { 4195 pf->read_i2c_byte(pf, offset, 0xA2, &output); 4196 sbuf_printf(sbuf, "%02X ", output); 4197 } 4198 for (u8 offset = 102; offset < 106; offset++) { 4199 pf->read_i2c_byte(pf, offset, 0xA2, &output); 4200 sbuf_printf(sbuf, "%02X ", output); 4201 } 4202 } else if (output == 0xD || output == 0x11) { 4203 /* 4204 * QSFP+ modules are always internally calibrated, and must indicate 4205 * what types of diagnostic monitoring are implemented 4206 */ 4207 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4208 4209 for (u8 offset = 22; offset < 24; offset++) { 4210 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4211 sbuf_printf(sbuf, "%02X ", output); 4212 } 4213 for (u8 offset = 26; offset < 28; offset++) { 4214 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4215 sbuf_printf(sbuf, "%02X ", output); 4216 } 4217 /* Read the data from the first lane */ 4218 for (u8 offset = 34; offset < 36; offset++) { 4219 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4220 sbuf_printf(sbuf, "%02X ", output); 4221 } 4222 for (u8 offset = 50; offset < 52; offset++) { 4223 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4224 sbuf_printf(sbuf, "%02X ", output); 4225 } 4226 } else { 4227 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output); 4228 return (0); 4229 } 4230 4231 sbuf_finish(sbuf); 4232 sbuf_delete(sbuf); 4233 4234 return (0); 4235 } 4236 4237 /* 4238 * Sysctl to read a byte from I2C bus. 4239 * 4240 * Input: 32-bit value: 4241 * bits 0-7: device address (0xA0 or 0xA2) 4242 * bits 8-15: offset (0-255) 4243 * bits 16-31: unused 4244 * Output: 8-bit value read 4245 */ 4246 static int 4247 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) 4248 { 4249 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4250 device_t dev = pf->dev; 4251 int input = -1, error = 0; 4252 u8 dev_addr, offset, output; 4253 4254 /* Read in I2C read parameters */ 4255 error = sysctl_handle_int(oidp, &input, 0, req); 4256 if ((error) || (req->newptr == NULL)) 4257 return (error); 4258 /* Validate device address */ 4259 dev_addr = input & 0xFF; 4260 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4261 return (EINVAL); 4262 } 4263 offset = (input >> 8) & 0xFF; 4264 4265 error = pf->read_i2c_byte(pf, offset, dev_addr, &output); 4266 if (error) 4267 return (error); 4268 4269 device_printf(dev, "%02X\n", output); 4270 return (0); 4271 } 4272 4273 /* 4274 * Sysctl to write a byte to the I2C bus. 4275 * 4276 * Input: 32-bit value: 4277 * bits 0-7: device address (0xA0 or 0xA2) 4278 * bits 8-15: offset (0-255) 4279 * bits 16-23: value to write 4280 * bits 24-31: unused 4281 * Output: 8-bit value written 4282 */ 4283 static int 4284 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) 4285 { 4286 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4287 device_t dev = pf->dev; 4288 int input = -1, error = 0; 4289 u8 dev_addr, offset, value; 4290 4291 /* Read in I2C write parameters */ 4292 error = sysctl_handle_int(oidp, &input, 0, req); 4293 if ((error) || (req->newptr == NULL)) 4294 return (error); 4295 /* Validate device address */ 4296 dev_addr = input & 0xFF; 4297 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4298 return (EINVAL); 4299 } 4300 offset = (input >> 8) & 0xFF; 4301 value = (input >> 16) & 0xFF; 4302 4303 error = pf->write_i2c_byte(pf, offset, dev_addr, value); 4304 if (error) 4305 return (error); 4306 4307 device_printf(dev, "%02X written\n", value); 4308 return (0); 4309 } 4310 4311 static int 4312 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4313 u8 bit_pos, int *is_set) 4314 { 4315 device_t dev = pf->dev; 4316 struct i40e_hw *hw = &pf->hw; 4317 enum i40e_status_code status; 4318 4319 if (IXL_PF_IN_RECOVERY_MODE(pf)) 4320 return (EIO); 4321 4322 status = i40e_aq_get_phy_capabilities(hw, 4323 FALSE, FALSE, abilities, NULL); 4324 if (status) { 4325 device_printf(dev, 4326 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 4327 __func__, i40e_stat_str(hw, status), 4328 i40e_aq_str(hw, hw->aq.asq_last_status)); 4329 return (EIO); 4330 } 4331 4332 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); 4333 return (0); 4334 } 4335 4336 static int 4337 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4338 u8 bit_pos, int set) 4339 { 4340 device_t dev = pf->dev; 4341 struct i40e_hw *hw = &pf->hw; 4342 struct i40e_aq_set_phy_config config; 4343 enum i40e_status_code status; 4344 4345 /* Set new PHY config */ 4346 memset(&config, 0, sizeof(config)); 4347 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); 4348 if (set) 4349 config.fec_config |= bit_pos; 4350 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { 4351 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 4352 config.phy_type = abilities->phy_type; 4353 config.phy_type_ext = abilities->phy_type_ext; 4354 config.link_speed = abilities->link_speed; 4355 config.eee_capability = abilities->eee_capability; 4356 config.eeer = abilities->eeer_val; 4357 config.low_power_ctrl = abilities->d3_lpan; 4358 status = i40e_aq_set_phy_config(hw, &config, NULL); 4359 4360 if (status) { 4361 device_printf(dev, 4362 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", 4363 __func__, i40e_stat_str(hw, status), 4364 i40e_aq_str(hw, hw->aq.asq_last_status)); 4365 return (EIO); 4366 } 4367 } 4368 4369 return (0); 4370 } 4371 4372 static int 4373 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) 4374 { 4375 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4376 int mode, error = 0; 4377 4378 struct i40e_aq_get_phy_abilities_resp abilities; 4379 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); 4380 if (error) 4381 return (error); 4382 /* Read in new mode */ 4383 error = sysctl_handle_int(oidp, &mode, 0, req); 4384 if ((error) || (req->newptr == NULL)) 4385 return (error); 4386 4387 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); 4388 } 4389 4390 static int 4391 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) 4392 { 4393 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4394 int mode, error = 0; 4395 4396 struct i40e_aq_get_phy_abilities_resp abilities; 4397 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); 4398 if (error) 4399 return (error); 4400 /* Read in new mode */ 4401 error = sysctl_handle_int(oidp, &mode, 0, req); 4402 if ((error) || (req->newptr == NULL)) 4403 return (error); 4404 4405 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); 4406 } 4407 4408 static int 4409 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) 4410 { 4411 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4412 int mode, error = 0; 4413 4414 struct i40e_aq_get_phy_abilities_resp abilities; 4415 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); 4416 if (error) 4417 return (error); 4418 /* Read in new mode */ 4419 error = sysctl_handle_int(oidp, &mode, 0, req); 4420 if ((error) || (req->newptr == NULL)) 4421 return (error); 4422 4423 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); 4424 } 4425 4426 static int 4427 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) 4428 { 4429 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4430 int mode, error = 0; 4431 4432 struct i40e_aq_get_phy_abilities_resp abilities; 4433 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); 4434 if (error) 4435 return (error); 4436 /* Read in new mode */ 4437 error = sysctl_handle_int(oidp, &mode, 0, req); 4438 if ((error) || (req->newptr == NULL)) 4439 return (error); 4440 4441 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); 4442 } 4443 4444 static int 4445 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) 4446 { 4447 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4448 int mode, error = 0; 4449 4450 struct i40e_aq_get_phy_abilities_resp abilities; 4451 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); 4452 if (error) 4453 return (error); 4454 /* Read in new mode */ 4455 error = sysctl_handle_int(oidp, &mode, 0, req); 4456 if ((error) || (req->newptr == NULL)) 4457 return (error); 4458 4459 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); 4460 } 4461 4462 static int 4463 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) 4464 { 4465 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4466 struct i40e_hw *hw = &pf->hw; 4467 device_t dev = pf->dev; 4468 struct sbuf *buf; 4469 int error = 0; 4470 enum i40e_status_code status; 4471 4472 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4473 if (!buf) { 4474 device_printf(dev, "Could not allocate sbuf for output.\n"); 4475 return (ENOMEM); 4476 } 4477 4478 u8 *final_buff; 4479 /* This amount is only necessary if reading the entire cluster into memory */ 4480 #define IXL_FINAL_BUFF_SIZE (1280 * 1024) 4481 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT); 4482 if (final_buff == NULL) { 4483 device_printf(dev, "Could not allocate memory for output.\n"); 4484 goto out; 4485 } 4486 int final_buff_len = 0; 4487 4488 u8 cluster_id = 1; 4489 bool more = true; 4490 4491 u8 dump_buf[4096]; 4492 u16 curr_buff_size = 4096; 4493 u8 curr_next_table = 0; 4494 u32 curr_next_index = 0; 4495 4496 u16 ret_buff_size; 4497 u8 ret_next_table; 4498 u32 ret_next_index; 4499 4500 sbuf_cat(buf, "\n"); 4501 4502 while (more) { 4503 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, 4504 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); 4505 if (status) { 4506 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", 4507 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 4508 goto free_out; 4509 } 4510 4511 /* copy info out of temp buffer */ 4512 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); 4513 final_buff_len += ret_buff_size; 4514 4515 if (ret_next_table != curr_next_table) { 4516 /* We're done with the current table; we can dump out read data. */ 4517 sbuf_printf(buf, "%d:", curr_next_table); 4518 int bytes_printed = 0; 4519 while (bytes_printed <= final_buff_len) { 4520 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); 4521 bytes_printed += 16; 4522 } 4523 sbuf_cat(buf, "\n"); 4524 4525 /* The entire cluster has been read; we're finished */ 4526 if (ret_next_table == 0xFF) 4527 break; 4528 4529 /* Otherwise clear the output buffer and continue reading */ 4530 bzero(final_buff, IXL_FINAL_BUFF_SIZE); 4531 final_buff_len = 0; 4532 } 4533 4534 if (ret_next_index == 0xFFFFFFFF) 4535 ret_next_index = 0; 4536 4537 bzero(dump_buf, sizeof(dump_buf)); 4538 curr_next_table = ret_next_table; 4539 curr_next_index = ret_next_index; 4540 } 4541 4542 free_out: 4543 free(final_buff, M_IXL); 4544 out: 4545 error = sbuf_finish(buf); 4546 if (error) 4547 device_printf(dev, "Error finishing sbuf: %d\n", error); 4548 sbuf_delete(buf); 4549 4550 return (error); 4551 } 4552 4553 static int 4554 ixl_start_fw_lldp(struct ixl_pf *pf) 4555 { 4556 struct i40e_hw *hw = &pf->hw; 4557 enum i40e_status_code status; 4558 4559 status = i40e_aq_start_lldp(hw, false, NULL); 4560 if (status != I40E_SUCCESS) { 4561 switch (hw->aq.asq_last_status) { 4562 case I40E_AQ_RC_EEXIST: 4563 device_printf(pf->dev, 4564 "FW LLDP agent is already running\n"); 4565 break; 4566 case I40E_AQ_RC_EPERM: 4567 device_printf(pf->dev, 4568 "Device configuration forbids SW from starting " 4569 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII " 4570 "attribute to \"Enabled\" to use this sysctl\n"); 4571 return (EINVAL); 4572 default: 4573 device_printf(pf->dev, 4574 "Starting FW LLDP agent failed: error: %s, %s\n", 4575 i40e_stat_str(hw, status), 4576 i40e_aq_str(hw, hw->aq.asq_last_status)); 4577 return (EINVAL); 4578 } 4579 } 4580 4581 ixl_clear_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED); 4582 return (0); 4583 } 4584 4585 static int 4586 ixl_stop_fw_lldp(struct ixl_pf *pf) 4587 { 4588 struct i40e_hw *hw = &pf->hw; 4589 device_t dev = pf->dev; 4590 enum i40e_status_code status; 4591 4592 if (hw->func_caps.npar_enable != 0) { 4593 device_printf(dev, 4594 "Disabling FW LLDP agent is not supported on this device\n"); 4595 return (EINVAL); 4596 } 4597 4598 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) { 4599 device_printf(dev, 4600 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); 4601 return (EINVAL); 4602 } 4603 4604 status = i40e_aq_stop_lldp(hw, true, false, NULL); 4605 if (status != I40E_SUCCESS) { 4606 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) { 4607 device_printf(dev, 4608 "Disabling FW LLDP agent failed: error: %s, %s\n", 4609 i40e_stat_str(hw, status), 4610 i40e_aq_str(hw, hw->aq.asq_last_status)); 4611 return (EINVAL); 4612 } 4613 4614 device_printf(dev, "FW LLDP agent is already stopped\n"); 4615 } 4616 4617 i40e_aq_set_dcb_parameters(hw, true, NULL); 4618 ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED); 4619 return (0); 4620 } 4621 4622 static int 4623 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) 4624 { 4625 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4626 int state, new_state, error = 0; 4627 4628 state = new_state = !ixl_test_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED); 4629 4630 /* Read in new mode */ 4631 error = sysctl_handle_int(oidp, &new_state, 0, req); 4632 if ((error) || (req->newptr == NULL)) 4633 return (error); 4634 4635 /* Already in requested state */ 4636 if (new_state == state) 4637 return (error); 4638 4639 if (new_state == 0) 4640 return ixl_stop_fw_lldp(pf); 4641 4642 return ixl_start_fw_lldp(pf); 4643 } 4644 4645 static int 4646 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 4647 { 4648 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4649 int state, new_state; 4650 int sysctl_handle_status = 0; 4651 enum i40e_status_code cmd_status; 4652 4653 /* Init states' values */ 4654 state = new_state = ixl_test_state(&pf->state, IXL_STATE_EEE_ENABLED); 4655 4656 /* Get requested mode */ 4657 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req); 4658 if ((sysctl_handle_status) || (req->newptr == NULL)) 4659 return (sysctl_handle_status); 4660 4661 /* Check if state has changed */ 4662 if (new_state == state) 4663 return (0); 4664 4665 /* Set new state */ 4666 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state)); 4667 4668 /* Save new state or report error */ 4669 if (!cmd_status) { 4670 if (new_state == 0) 4671 ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED); 4672 else 4673 ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED); 4674 } else if (cmd_status == I40E_ERR_CONFIG) 4675 return (EPERM); 4676 else 4677 return (EIO); 4678 4679 return (0); 4680 } 4681 4682 static int 4683 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS) 4684 { 4685 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4686 int error, state; 4687 4688 state = ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN); 4689 4690 error = sysctl_handle_int(oidp, &state, 0, req); 4691 if ((error) || (req->newptr == NULL)) 4692 return (error); 4693 4694 if (state == 0) 4695 ixl_clear_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN); 4696 else 4697 ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN); 4698 4699 return (0); 4700 } 4701 4702 4703 int 4704 ixl_attach_get_link_status(struct ixl_pf *pf) 4705 { 4706 struct i40e_hw *hw = &pf->hw; 4707 device_t dev = pf->dev; 4708 enum i40e_status_code status; 4709 4710 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 4711 (hw->aq.fw_maj_ver < 4)) { 4712 i40e_msec_delay(75); 4713 status = i40e_aq_set_link_restart_an(hw, TRUE, NULL); 4714 if (status != I40E_SUCCESS) { 4715 device_printf(dev, 4716 "%s link restart failed status: %s, aq_err=%s\n", 4717 __func__, i40e_stat_str(hw, status), 4718 i40e_aq_str(hw, hw->aq.asq_last_status)); 4719 return (EINVAL); 4720 } 4721 } 4722 4723 /* Determine link state */ 4724 hw->phy.get_link_info = TRUE; 4725 status = i40e_get_link_status(hw, &pf->link_up); 4726 if (status != I40E_SUCCESS) { 4727 device_printf(dev, 4728 "%s get link status, status: %s aq_err=%s\n", 4729 __func__, i40e_stat_str(hw, status), 4730 i40e_aq_str(hw, hw->aq.asq_last_status)); 4731 /* 4732 * Most probably FW has not finished configuring PHY. 4733 * Retry periodically in a timer callback. 4734 */ 4735 ixl_set_state(&pf->state, IXL_STATE_LINK_POLLING); 4736 pf->link_poll_start = getsbinuptime(); 4737 return (EAGAIN); 4738 } 4739 ixl_dbg_link(pf, "%s link_up: %d\n", __func__, pf->link_up); 4740 4741 /* Flow Control mode not set by user, read current FW settings */ 4742 if (pf->fc == -1) 4743 pf->fc = hw->fc.current_mode; 4744 4745 return (0); 4746 } 4747 4748 static int 4749 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS) 4750 { 4751 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4752 int requested = 0, error = 0; 4753 4754 /* Read in new mode */ 4755 error = sysctl_handle_int(oidp, &requested, 0, req); 4756 if ((error) || (req->newptr == NULL)) 4757 return (error); 4758 4759 /* Initiate the PF reset later in the admin task */ 4760 ixl_set_state(&pf->state, IXL_STATE_PF_RESET_REQ); 4761 4762 return (error); 4763 } 4764 4765 static int 4766 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS) 4767 { 4768 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4769 struct i40e_hw *hw = &pf->hw; 4770 int requested = 0, error = 0; 4771 4772 /* Read in new mode */ 4773 error = sysctl_handle_int(oidp, &requested, 0, req); 4774 if ((error) || (req->newptr == NULL)) 4775 return (error); 4776 4777 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 4778 4779 return (error); 4780 } 4781 4782 static int 4783 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS) 4784 { 4785 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4786 struct i40e_hw *hw = &pf->hw; 4787 int requested = 0, error = 0; 4788 4789 /* Read in new mode */ 4790 error = sysctl_handle_int(oidp, &requested, 0, req); 4791 if ((error) || (req->newptr == NULL)) 4792 return (error); 4793 4794 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK); 4795 4796 return (error); 4797 } 4798 4799 /* 4800 * Print out mapping of TX queue indexes and Rx queue indexes 4801 * to MSI-X vectors. 4802 */ 4803 static int 4804 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) 4805 { 4806 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4807 struct ixl_vsi *vsi = &pf->vsi; 4808 device_t dev = pf->dev; 4809 struct sbuf *buf; 4810 int error = 0; 4811 4812 struct ixl_rx_queue *rx_que = vsi->rx_queues; 4813 struct ixl_tx_queue *tx_que = vsi->tx_queues; 4814 4815 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4816 if (!buf) { 4817 device_printf(dev, "Could not allocate sbuf for output.\n"); 4818 return (ENOMEM); 4819 } 4820 4821 sbuf_cat(buf, "\n"); 4822 for (int i = 0; i < vsi->num_rx_queues; i++) { 4823 rx_que = &vsi->rx_queues[i]; 4824 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); 4825 } 4826 for (int i = 0; i < vsi->num_tx_queues; i++) { 4827 tx_que = &vsi->tx_queues[i]; 4828 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); 4829 } 4830 4831 error = sbuf_finish(buf); 4832 if (error) 4833 device_printf(dev, "Error finishing sbuf: %d\n", error); 4834 sbuf_delete(buf); 4835 4836 return (error); 4837 } 4838