1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 34 35 #include "ixl_pf.h" 36 37 #ifdef PCI_IOV 38 #include "ixl_pf_iov.h" 39 #endif 40 41 #ifdef IXL_IW 42 #include "ixl_iw.h" 43 #include "ixl_iw_int.h" 44 #endif 45 46 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); 47 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); 48 static const char * ixl_link_speed_string(enum i40e_aq_link_speed); 49 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int); 50 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int); 51 static char * ixl_switch_element_string(struct sbuf *, u8, u16); 52 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *); 53 54 /* Sysctls */ 55 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); 56 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); 57 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 58 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 59 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); 60 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); 61 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); 62 63 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 64 static int ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS); 65 66 /* Debug Sysctls */ 67 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); 68 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); 69 static int ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS); 70 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); 71 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); 72 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); 73 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS); 74 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); 75 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); 76 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); 77 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); 78 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); 79 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); 80 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); 81 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); 82 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); 83 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); 84 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); 85 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); 86 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); 87 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 88 89 /* Debug Sysctls */ 90 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS); 91 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS); 92 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS); 93 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); 94 static int ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS); 95 #ifdef IXL_DEBUG 96 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); 97 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); 98 #endif 99 100 #ifdef IXL_IW 101 extern int ixl_enable_iwarp; 102 extern int ixl_limit_iwarp_msix; 103 #endif 104 105 static const char * const ixl_fc_string[6] = { 106 "None", 107 "Rx", 108 "Tx", 109 "Full", 110 "Priority", 111 "Default" 112 }; 113 114 static char *ixl_fec_string[3] = { 115 "CL108 RS-FEC", 116 "CL74 FC-FEC/BASE-R", 117 "None" 118 }; 119 120 /* Functions for setting and checking driver state. Note the functions take 121 * bit positions, not bitmasks. The atomic_set_32 and atomic_clear_32 122 * operations require bitmasks. This can easily lead to programming error, so 123 * we provide wrapper functions to avoid this. 124 */ 125 126 /** 127 * ixl_set_state - Set the specified state 128 * @s: the state bitmap 129 * @bit: the state to set 130 * 131 * Atomically update the state bitmap with the specified bit set. 132 */ 133 inline void 134 ixl_set_state(volatile u32 *s, enum ixl_state bit) 135 { 136 /* atomic_set_32 expects a bitmask */ 137 atomic_set_32(s, BIT(bit)); 138 } 139 140 /** 141 * ixl_clear_state - Clear the specified state 142 * @s: the state bitmap 143 * @bit: the state to clear 144 * 145 * Atomically update the state bitmap with the specified bit cleared. 146 */ 147 inline void 148 ixl_clear_state(volatile u32 *s, enum ixl_state bit) 149 { 150 /* atomic_clear_32 expects a bitmask */ 151 atomic_clear_32(s, BIT(bit)); 152 } 153 154 /** 155 * ixl_test_state - Test the specified state 156 * @s: the state bitmap 157 * @bit: the bit to test 158 * 159 * Return true if the state is set, false otherwise. Use this only if the flow 160 * does not need to update the state. If you must update the state as well, 161 * prefer ixl_testandset_state. 162 */ 163 inline bool 164 ixl_test_state(volatile u32 *s, enum ixl_state bit) 165 { 166 return !!(*s & BIT(bit)); 167 } 168 169 /** 170 * ixl_testandset_state - Test and set the specified state 171 * @s: the state bitmap 172 * @bit: the bit to test 173 * 174 * Atomically update the state bitmap, setting the specified bit. Returns the 175 * previous value of the bit. 176 */ 177 inline u32 178 ixl_testandset_state(volatile u32 *s, enum ixl_state bit) 179 { 180 /* atomic_testandset_32 expects a bit position, as opposed to bitmask 181 expected by other atomic functions */ 182 return atomic_testandset_32(s, bit); 183 } 184 185 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); 186 187 /* 188 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string 189 */ 190 void 191 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) 192 { 193 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); 194 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); 195 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); 196 197 sbuf_printf(buf, 198 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", 199 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 200 hw->aq.api_maj_ver, hw->aq.api_min_ver, 201 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> 202 IXL_NVM_VERSION_HI_SHIFT, 203 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> 204 IXL_NVM_VERSION_LO_SHIFT, 205 hw->nvm.eetrack, 206 oem_ver, oem_build, oem_patch); 207 } 208 209 void 210 ixl_print_nvm_version(struct ixl_pf *pf) 211 { 212 struct i40e_hw *hw = &pf->hw; 213 device_t dev = pf->dev; 214 struct sbuf *sbuf; 215 216 sbuf = sbuf_new_auto(); 217 ixl_nvm_version_str(hw, sbuf); 218 sbuf_finish(sbuf); 219 device_printf(dev, "%s\n", sbuf_data(sbuf)); 220 sbuf_delete(sbuf); 221 } 222 223 /** 224 * ixl_get_fw_mode - Check the state of FW 225 * @hw: device hardware structure 226 * 227 * Identify state of FW. It might be in a recovery mode 228 * which limits functionality and requires special handling 229 * from the driver. 230 * 231 * @returns FW mode (normal, recovery, unexpected EMP reset) 232 */ 233 static enum ixl_fw_mode 234 ixl_get_fw_mode(struct ixl_pf *pf) 235 { 236 struct i40e_hw *hw = &pf->hw; 237 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL; 238 u32 fwsts; 239 240 #ifdef IXL_DEBUG 241 if (pf->recovery_mode) 242 return IXL_FW_MODE_RECOVERY; 243 #endif 244 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; 245 246 /* Is set and has one of expected values */ 247 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK && 248 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) || 249 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || 250 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK) 251 fw_mode = IXL_FW_MODE_RECOVERY; 252 else { 253 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 && 254 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10) 255 fw_mode = IXL_FW_MODE_UEMPR; 256 } 257 return (fw_mode); 258 } 259 260 /** 261 * ixl_pf_reset - Reset the PF 262 * @pf: PF structure 263 * 264 * Ensure that FW is in the right state and do the reset 265 * if needed. 266 * 267 * @returns zero on success, or an error code on failure. 268 */ 269 int 270 ixl_pf_reset(struct ixl_pf *pf) 271 { 272 struct i40e_hw *hw = &pf->hw; 273 enum i40e_status_code status; 274 enum ixl_fw_mode fw_mode; 275 276 fw_mode = ixl_get_fw_mode(pf); 277 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode); 278 if (fw_mode == IXL_FW_MODE_RECOVERY) { 279 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE); 280 /* Don't try to reset device if it's in recovery mode */ 281 return (0); 282 } 283 284 status = i40e_pf_reset(hw); 285 if (status == I40E_SUCCESS) 286 return (0); 287 288 /* Check FW mode again in case it has changed while 289 * waiting for reset to complete */ 290 fw_mode = ixl_get_fw_mode(pf); 291 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode); 292 if (fw_mode == IXL_FW_MODE_RECOVERY) { 293 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE); 294 return (0); 295 } 296 297 if (fw_mode == IXL_FW_MODE_UEMPR) 298 device_printf(pf->dev, 299 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); 300 else 301 device_printf(pf->dev, "PF reset failure %s\n", 302 i40e_stat_str(hw, status)); 303 return (EIO); 304 } 305 306 /** 307 * ixl_setup_hmc - Setup LAN Host Memory Cache 308 * @pf: PF structure 309 * 310 * Init and configure LAN Host Memory Cache 311 * 312 * @returns 0 on success, EIO on error 313 */ 314 int 315 ixl_setup_hmc(struct ixl_pf *pf) 316 { 317 struct i40e_hw *hw = &pf->hw; 318 enum i40e_status_code status; 319 320 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 321 hw->func_caps.num_rx_qp, 0, 0); 322 if (status) { 323 device_printf(pf->dev, "init_lan_hmc failed: %s\n", 324 i40e_stat_str(hw, status)); 325 return (EIO); 326 } 327 328 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 329 if (status) { 330 device_printf(pf->dev, "configure_lan_hmc failed: %s\n", 331 i40e_stat_str(hw, status)); 332 return (EIO); 333 } 334 335 return (0); 336 } 337 338 /** 339 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache 340 * @pf: PF structure 341 * 342 * Shutdown Host Memory Cache if configured. 343 * 344 */ 345 void 346 ixl_shutdown_hmc(struct ixl_pf *pf) 347 { 348 struct i40e_hw *hw = &pf->hw; 349 enum i40e_status_code status; 350 351 /* HMC not configured, no need to shutdown */ 352 if (hw->hmc.hmc_obj == NULL) 353 return; 354 355 status = i40e_shutdown_lan_hmc(hw); 356 if (status) 357 device_printf(pf->dev, 358 "Shutdown LAN HMC failed with code %s\n", 359 i40e_stat_str(hw, status)); 360 } 361 /* 362 * Write PF ITR values to queue ITR registers. 363 */ 364 void 365 ixl_configure_itr(struct ixl_pf *pf) 366 { 367 ixl_configure_tx_itr(pf); 368 ixl_configure_rx_itr(pf); 369 } 370 371 /********************************************************************* 372 * 373 * Get the hardware capabilities 374 * 375 **********************************************************************/ 376 377 int 378 ixl_get_hw_capabilities(struct ixl_pf *pf) 379 { 380 struct i40e_aqc_list_capabilities_element_resp *buf; 381 struct i40e_hw *hw = &pf->hw; 382 device_t dev = pf->dev; 383 enum i40e_status_code status; 384 int len, i2c_intfc_num; 385 bool again = TRUE; 386 u16 needed; 387 388 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 389 hw->func_caps.iwarp = 0; 390 return (0); 391 } 392 393 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 394 retry: 395 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) 396 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) { 397 device_printf(dev, "Unable to allocate cap memory\n"); 398 return (ENOMEM); 399 } 400 401 /* This populates the hw struct */ 402 status = i40e_aq_discover_capabilities(hw, buf, len, 403 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 404 free(buf, M_IXL); 405 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && 406 (again == TRUE)) { 407 /* retry once with a larger buffer */ 408 again = FALSE; 409 len = needed; 410 goto retry; 411 } else if (status != I40E_SUCCESS) { 412 device_printf(dev, "capability discovery failed; status %s, error %s\n", 413 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 414 return (ENODEV); 415 } 416 417 /* 418 * Some devices have both MDIO and I2C; since this isn't reported 419 * by the FW, check registers to see if an I2C interface exists. 420 */ 421 i2c_intfc_num = ixl_find_i2c_interface(pf); 422 if (i2c_intfc_num != -1) 423 pf->has_i2c = true; 424 425 /* Determine functions to use for driver I2C accesses */ 426 switch (pf->i2c_access_method) { 427 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: { 428 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 429 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 430 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 431 } else { 432 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 433 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 434 } 435 break; 436 } 437 case IXL_I2C_ACCESS_METHOD_AQ: 438 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 439 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 440 break; 441 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD: 442 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 443 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 444 break; 445 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS: 446 pf->read_i2c_byte = ixl_read_i2c_byte_bb; 447 pf->write_i2c_byte = ixl_write_i2c_byte_bb; 448 break; 449 default: 450 /* Should not happen */ 451 device_printf(dev, "Error setting I2C access functions\n"); 452 break; 453 } 454 455 /* Keep link active by default */ 456 ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN); 457 458 /* Print a subset of the capability information. */ 459 device_printf(dev, 460 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n", 461 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, 462 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, 463 (hw->func_caps.mdio_port_mode == 2) ? "I2C" : 464 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : 465 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : 466 "MDIO shared"); 467 468 return (0); 469 } 470 471 /* For the set_advertise sysctl */ 472 void 473 ixl_set_initial_advertised_speeds(struct ixl_pf *pf) 474 { 475 device_t dev = pf->dev; 476 int err; 477 478 /* Make sure to initialize the device to the complete list of 479 * supported speeds on driver load, to ensure unloading and 480 * reloading the driver will restore this value. 481 */ 482 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); 483 if (err) { 484 /* Non-fatal error */ 485 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", 486 __func__, err); 487 return; 488 } 489 490 pf->advertised_speed = 491 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 492 } 493 494 int 495 ixl_teardown_hw_structs(struct ixl_pf *pf) 496 { 497 enum i40e_status_code status = 0; 498 struct i40e_hw *hw = &pf->hw; 499 device_t dev = pf->dev; 500 501 /* Shutdown LAN HMC */ 502 if (hw->hmc.hmc_obj) { 503 status = i40e_shutdown_lan_hmc(hw); 504 if (status) { 505 device_printf(dev, 506 "init: LAN HMC shutdown failure; status %s\n", 507 i40e_stat_str(hw, status)); 508 goto err_out; 509 } 510 } 511 512 /* Shutdown admin queue */ 513 ixl_disable_intr0(hw); 514 status = i40e_shutdown_adminq(hw); 515 if (status) 516 device_printf(dev, 517 "init: Admin Queue shutdown failure; status %s\n", 518 i40e_stat_str(hw, status)); 519 520 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); 521 err_out: 522 return (status); 523 } 524 525 /* 526 ** Creates new filter with given MAC address and VLAN ID 527 */ 528 static struct ixl_mac_filter * 529 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 530 { 531 struct ixl_mac_filter *f; 532 533 /* create a new empty filter */ 534 f = malloc(sizeof(struct ixl_mac_filter), 535 M_IXL, M_NOWAIT | M_ZERO); 536 if (f) { 537 LIST_INSERT_HEAD(headp, f, ftle); 538 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); 539 f->vlan = vlan; 540 } 541 542 return (f); 543 } 544 545 /** 546 * ixl_free_filters - Free all filters in given list 547 * headp - pointer to list head 548 * 549 * Frees memory used by each entry in the list. 550 * Does not remove filters from HW. 551 */ 552 void 553 ixl_free_filters(struct ixl_ftl_head *headp) 554 { 555 struct ixl_mac_filter *f, *nf; 556 557 f = LIST_FIRST(headp); 558 while (f != NULL) { 559 nf = LIST_NEXT(f, ftle); 560 free(f, M_IXL); 561 f = nf; 562 } 563 564 LIST_INIT(headp); 565 } 566 567 static u_int 568 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 569 { 570 struct ixl_add_maddr_arg *ama = arg; 571 struct ixl_vsi *vsi = ama->vsi; 572 const u8 *macaddr = (u8*)LLADDR(sdl); 573 struct ixl_mac_filter *f; 574 575 /* Does one already exist */ 576 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 577 if (f != NULL) 578 return (0); 579 580 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY); 581 if (f == NULL) { 582 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 583 return (0); 584 } 585 f->flags |= IXL_FILTER_MC; 586 587 return (1); 588 } 589 590 /********************************************************************* 591 * Filter Routines 592 * 593 * Routines for multicast and vlan filter management. 594 * 595 *********************************************************************/ 596 void 597 ixl_add_multi(struct ixl_vsi *vsi) 598 { 599 if_t ifp = vsi->ifp; 600 struct i40e_hw *hw = vsi->hw; 601 int mcnt = 0; 602 struct ixl_add_maddr_arg cb_arg; 603 604 IOCTL_DEBUGOUT("ixl_add_multi: begin"); 605 606 mcnt = if_llmaddr_count(ifp); 607 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { 608 i40e_aq_set_vsi_multicast_promiscuous(hw, 609 vsi->seid, TRUE, NULL); 610 /* delete all existing MC filters */ 611 ixl_del_multi(vsi, true); 612 return; 613 } 614 615 cb_arg.vsi = vsi; 616 LIST_INIT(&cb_arg.to_add); 617 618 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg); 619 if (mcnt > 0) 620 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt); 621 622 IOCTL_DEBUGOUT("ixl_add_multi: end"); 623 } 624 625 static u_int 626 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 627 { 628 struct ixl_mac_filter *f = arg; 629 630 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl))) 631 return (1); 632 else 633 return (0); 634 } 635 636 void 637 ixl_del_multi(struct ixl_vsi *vsi, bool all) 638 { 639 struct ixl_ftl_head to_del; 640 if_t ifp = vsi->ifp; 641 struct ixl_mac_filter *f, *fn; 642 int mcnt = 0; 643 644 IOCTL_DEBUGOUT("ixl_del_multi: begin"); 645 646 LIST_INIT(&to_del); 647 /* Search for removed multicast addresses */ 648 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) { 649 if ((f->flags & IXL_FILTER_MC) == 0 || 650 (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0))) 651 continue; 652 653 LIST_REMOVE(f, ftle); 654 LIST_INSERT_HEAD(&to_del, f, ftle); 655 mcnt++; 656 } 657 658 if (mcnt > 0) 659 ixl_del_hw_filters(vsi, &to_del, mcnt); 660 } 661 662 void 663 ixl_link_up_msg(struct ixl_pf *pf) 664 { 665 struct i40e_hw *hw = &pf->hw; 666 if_t ifp = pf->vsi.ifp; 667 char *req_fec_string, *neg_fec_string; 668 u8 fec_abilities; 669 670 fec_abilities = hw->phy.link_info.req_fec_info; 671 /* If both RS and KR are requested, only show RS */ 672 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) 673 req_fec_string = ixl_fec_string[0]; 674 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) 675 req_fec_string = ixl_fec_string[1]; 676 else 677 req_fec_string = ixl_fec_string[2]; 678 679 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) 680 neg_fec_string = ixl_fec_string[0]; 681 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) 682 neg_fec_string = ixl_fec_string[1]; 683 else 684 neg_fec_string = ixl_fec_string[2]; 685 686 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 687 if_name(ifp), 688 ixl_link_speed_string(hw->phy.link_info.link_speed), 689 req_fec_string, neg_fec_string, 690 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", 691 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && 692 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 693 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? 694 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 695 ixl_fc_string[1] : ixl_fc_string[0]); 696 } 697 698 /* 699 * Configure admin queue/misc interrupt cause registers in hardware. 700 */ 701 void 702 ixl_configure_intr0_msix(struct ixl_pf *pf) 703 { 704 struct i40e_hw *hw = &pf->hw; 705 u32 reg; 706 707 /* First set up the adminq - vector 0 */ 708 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 709 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 710 711 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 712 I40E_PFINT_ICR0_ENA_GRST_MASK | 713 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 714 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 715 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 716 I40E_PFINT_ICR0_ENA_VFLR_MASK | 717 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 718 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; 719 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 720 721 /* 722 * 0x7FF is the end of the queue list. 723 * This means we won't use MSI-X vector 0 for a queue interrupt 724 * in MSI-X mode. 725 */ 726 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 727 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ 728 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); 729 730 wr32(hw, I40E_PFINT_DYN_CTL0, 731 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 732 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 733 734 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 735 } 736 737 void 738 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types) 739 { 740 /* Display supported media types */ 741 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) 742 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL); 743 744 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) 745 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL); 746 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) 747 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL); 748 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) 749 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL); 750 751 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T)) 752 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL); 753 754 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T)) 755 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL); 756 757 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || 758 phy_types & (I40E_CAP_PHY_TYPE_XFI) || 759 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) 760 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 761 762 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) 763 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL); 764 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) 765 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL); 766 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) 767 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL); 768 769 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || 770 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || 771 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || 772 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || 773 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 774 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL); 775 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) 776 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL); 777 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) 778 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL); 779 780 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) 781 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL); 782 783 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) 784 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) 785 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL); 786 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) 787 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL); 788 if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) 789 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL); 790 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) 791 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 792 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) 793 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL); 794 795 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) 796 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL); 797 798 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 799 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL); 800 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) 801 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); 802 803 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) 804 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL); 805 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) 806 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL); 807 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) 808 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL); 809 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) 810 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL); 811 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) 812 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL); 813 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) 814 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL); 815 } 816 817 /********************************************************************* 818 * 819 * Get Firmware Switch configuration 820 * - this will need to be more robust when more complex 821 * switch configurations are enabled. 822 * 823 **********************************************************************/ 824 int 825 ixl_switch_config(struct ixl_pf *pf) 826 { 827 struct i40e_hw *hw = &pf->hw; 828 struct ixl_vsi *vsi = &pf->vsi; 829 device_t dev = iflib_get_dev(vsi->ctx); 830 struct i40e_aqc_get_switch_config_resp *sw_config; 831 u8 aq_buf[I40E_AQ_LARGE_BUF]; 832 int ret; 833 u16 next = 0; 834 835 memset(&aq_buf, 0, sizeof(aq_buf)); 836 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 837 ret = i40e_aq_get_switch_config(hw, sw_config, 838 sizeof(aq_buf), &next, NULL); 839 if (ret) { 840 device_printf(dev, "aq_get_switch_config() failed, error %d," 841 " aq_error %d\n", ret, pf->hw.aq.asq_last_status); 842 return (ret); 843 } 844 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { 845 device_printf(dev, 846 "Switch config: header reported: %d in structure, %d total\n", 847 LE16_TO_CPU(sw_config->header.num_reported), 848 LE16_TO_CPU(sw_config->header.num_total)); 849 for (int i = 0; 850 i < LE16_TO_CPU(sw_config->header.num_reported); i++) { 851 device_printf(dev, 852 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, 853 sw_config->element[i].element_type, 854 LE16_TO_CPU(sw_config->element[i].seid), 855 LE16_TO_CPU(sw_config->element[i].uplink_seid), 856 LE16_TO_CPU(sw_config->element[i].downlink_seid)); 857 } 858 } 859 /* Simplified due to a single VSI */ 860 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid); 861 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid); 862 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid); 863 return (ret); 864 } 865 866 void 867 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls) 868 { 869 struct sysctl_oid *tree; 870 struct sysctl_oid_list *child; 871 struct sysctl_oid_list *vsi_list; 872 873 tree = device_get_sysctl_tree(vsi->dev); 874 child = SYSCTL_CHILDREN(tree); 875 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name, 876 CTLFLAG_RD, NULL, "VSI Number"); 877 878 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 879 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats); 880 881 /* Copy of netstat RX errors counter for validation purposes */ 882 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors", 883 CTLFLAG_RD, &vsi->ierrors, 884 "RX packet errors"); 885 886 if (queues_sysctls) 887 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx); 888 } 889 890 /* 891 * Used to set the Tx ITR value for all of the PF LAN VSI's queues. 892 * Writes to the ITR registers immediately. 893 */ 894 static int 895 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) 896 { 897 struct ixl_pf *pf = (struct ixl_pf *)arg1; 898 device_t dev = pf->dev; 899 int error = 0; 900 int requested_tx_itr; 901 902 requested_tx_itr = pf->tx_itr; 903 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); 904 if ((error) || (req->newptr == NULL)) 905 return (error); 906 if (pf->dynamic_tx_itr) { 907 device_printf(dev, 908 "Cannot set TX itr value while dynamic TX itr is enabled\n"); 909 return (EINVAL); 910 } 911 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { 912 device_printf(dev, 913 "Invalid TX itr value; value must be between 0 and %d\n", 914 IXL_MAX_ITR); 915 return (EINVAL); 916 } 917 918 pf->tx_itr = requested_tx_itr; 919 ixl_configure_tx_itr(pf); 920 921 return (error); 922 } 923 924 /* 925 * Used to set the Rx ITR value for all of the PF LAN VSI's queues. 926 * Writes to the ITR registers immediately. 927 */ 928 static int 929 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) 930 { 931 struct ixl_pf *pf = (struct ixl_pf *)arg1; 932 device_t dev = pf->dev; 933 int error = 0; 934 int requested_rx_itr; 935 936 requested_rx_itr = pf->rx_itr; 937 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); 938 if ((error) || (req->newptr == NULL)) 939 return (error); 940 if (pf->dynamic_rx_itr) { 941 device_printf(dev, 942 "Cannot set RX itr value while dynamic RX itr is enabled\n"); 943 return (EINVAL); 944 } 945 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { 946 device_printf(dev, 947 "Invalid RX itr value; value must be between 0 and %d\n", 948 IXL_MAX_ITR); 949 return (EINVAL); 950 } 951 952 pf->rx_itr = requested_rx_itr; 953 ixl_configure_rx_itr(pf); 954 955 return (error); 956 } 957 958 void 959 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 960 struct sysctl_oid_list *child, 961 struct i40e_hw_port_stats *stats) 962 { 963 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 964 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics"); 965 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); 966 967 struct i40e_eth_stats *eth_stats = &stats->eth; 968 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); 969 970 struct ixl_sysctl_info ctls[] = 971 { 972 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 973 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 974 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 975 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 976 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, 977 /* Packet Reception Stats */ 978 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 979 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 980 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 981 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 982 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 983 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 984 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 985 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 986 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 987 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 988 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 989 {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, 990 /* Packet Transmission Stats */ 991 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 992 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 993 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 994 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 995 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 996 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 997 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 998 /* Flow control */ 999 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 1000 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 1001 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 1002 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 1003 /* End */ 1004 {0,0,0} 1005 }; 1006 1007 struct ixl_sysctl_info *entry = ctls; 1008 while (entry->stat != 0) 1009 { 1010 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, 1011 CTLFLAG_RD, entry->stat, 1012 entry->description); 1013 entry++; 1014 } 1015 } 1016 1017 void 1018 ixl_set_rss_key(struct ixl_pf *pf) 1019 { 1020 struct i40e_hw *hw = &pf->hw; 1021 struct ixl_vsi *vsi = &pf->vsi; 1022 device_t dev = pf->dev; 1023 u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; 1024 enum i40e_status_code status; 1025 1026 #ifdef RSS 1027 /* Fetch the configured RSS key */ 1028 rss_getkey((uint8_t *) &rss_seed); 1029 #else 1030 ixl_get_default_rss_key(rss_seed); 1031 #endif 1032 /* Fill out hash function seed */ 1033 if (hw->mac.type == I40E_MAC_X722) { 1034 struct i40e_aqc_get_set_rss_key_data key_data; 1035 bcopy(rss_seed, &key_data, 52); 1036 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); 1037 if (status) 1038 device_printf(dev, 1039 "i40e_aq_set_rss_key status %s, error %s\n", 1040 i40e_stat_str(hw, status), 1041 i40e_aq_str(hw, hw->aq.asq_last_status)); 1042 } else { 1043 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) 1044 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); 1045 } 1046 } 1047 1048 /* 1049 * Configure enabled PCTYPES for RSS. 1050 */ 1051 void 1052 ixl_set_rss_pctypes(struct ixl_pf *pf) 1053 { 1054 struct i40e_hw *hw = &pf->hw; 1055 u64 set_hena = 0, hena; 1056 1057 #ifdef RSS 1058 u32 rss_hash_config; 1059 1060 rss_hash_config = rss_gethashconfig(); 1061 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 1062 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); 1063 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 1064 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 1065 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1066 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 1067 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1068 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); 1069 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1070 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); 1071 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1072 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 1073 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1074 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 1075 #else 1076 if (hw->mac.type == I40E_MAC_X722) 1077 set_hena = IXL_DEFAULT_RSS_HENA_X722; 1078 else 1079 set_hena = IXL_DEFAULT_RSS_HENA_XL710; 1080 #endif 1081 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 1082 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 1083 hena |= set_hena; 1084 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 1085 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 1086 1087 } 1088 1089 /* 1090 ** Setup the PF's RSS parameters. 1091 */ 1092 void 1093 ixl_config_rss(struct ixl_pf *pf) 1094 { 1095 ixl_set_rss_key(pf); 1096 ixl_set_rss_pctypes(pf); 1097 ixl_set_rss_hlut(pf); 1098 } 1099 1100 /* 1101 * In some firmware versions there is default MAC/VLAN filter 1102 * configured which interferes with filters managed by driver. 1103 * Make sure it's removed. 1104 */ 1105 void 1106 ixl_del_default_hw_filters(struct ixl_vsi *vsi) 1107 { 1108 struct i40e_aqc_remove_macvlan_element_data e; 1109 1110 bzero(&e, sizeof(e)); 1111 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1112 e.vlan_tag = 0; 1113 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1114 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1115 1116 bzero(&e, sizeof(e)); 1117 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1118 e.vlan_tag = 0; 1119 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1120 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1121 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1122 } 1123 1124 /* 1125 ** Initialize filter list and add filters that the hardware 1126 ** needs to know about. 1127 ** 1128 ** Requires VSI's seid to be set before calling. 1129 */ 1130 void 1131 ixl_init_filters(struct ixl_vsi *vsi) 1132 { 1133 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1134 1135 ixl_dbg_filter(pf, "%s: start\n", __func__); 1136 1137 /* Initialize mac filter list for VSI */ 1138 LIST_INIT(&vsi->ftl); 1139 vsi->num_hw_filters = 0; 1140 1141 /* Receive broadcast Ethernet frames */ 1142 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL); 1143 1144 if (IXL_VSI_IS_VF(vsi)) 1145 return; 1146 1147 ixl_del_default_hw_filters(vsi); 1148 1149 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); 1150 1151 /* 1152 * Prevent Tx flow control frames from being sent out by 1153 * non-firmware transmitters. 1154 * This affects every VSI in the PF. 1155 */ 1156 #ifndef IXL_DEBUG_FC 1157 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1158 #else 1159 if (pf->enable_tx_fc_filter) 1160 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1161 #endif 1162 } 1163 1164 void 1165 ixl_reconfigure_filters(struct ixl_vsi *vsi) 1166 { 1167 struct i40e_hw *hw = vsi->hw; 1168 struct ixl_ftl_head tmp; 1169 int cnt; 1170 1171 /* 1172 * The ixl_add_hw_filters function adds filters configured 1173 * in HW to a list in VSI. Move all filters to a temporary 1174 * list to avoid corrupting it by concatenating to itself. 1175 */ 1176 LIST_INIT(&tmp); 1177 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle); 1178 cnt = vsi->num_hw_filters; 1179 vsi->num_hw_filters = 0; 1180 1181 ixl_add_hw_filters(vsi, &tmp, cnt); 1182 1183 /* 1184 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp 1185 * will be NULL. Furthermore, the ftl of such vsi already contains 1186 * IXL_VLAN_ANY filter so we can skip that as well. 1187 */ 1188 if (hw == NULL) 1189 return; 1190 1191 /* Filter could be removed if MAC address was changed */ 1192 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 1193 1194 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0) 1195 return; 1196 /* 1197 * VLAN HW filtering is enabled, make sure that filters 1198 * for all registered VLAN tags are configured 1199 */ 1200 ixl_add_vlan_filters(vsi, hw->mac.addr); 1201 } 1202 1203 /* 1204 * This routine adds a MAC/VLAN filter to the software filter 1205 * list, then adds that new filter to the HW if it doesn't already 1206 * exist in the SW filter list. 1207 */ 1208 void 1209 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1210 { 1211 struct ixl_mac_filter *f, *tmp; 1212 struct ixl_pf *pf; 1213 device_t dev; 1214 struct ixl_ftl_head to_add; 1215 int to_add_cnt; 1216 1217 pf = vsi->back; 1218 dev = pf->dev; 1219 to_add_cnt = 1; 1220 1221 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n", 1222 MAC_FORMAT_ARGS(macaddr), vlan); 1223 1224 /* Does one already exist */ 1225 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1226 if (f != NULL) 1227 return; 1228 1229 LIST_INIT(&to_add); 1230 f = ixl_new_filter(&to_add, macaddr, vlan); 1231 if (f == NULL) { 1232 device_printf(dev, "WARNING: no filter available!!\n"); 1233 return; 1234 } 1235 if (f->vlan != IXL_VLAN_ANY) 1236 f->flags |= IXL_FILTER_VLAN; 1237 else 1238 vsi->num_macs++; 1239 1240 /* 1241 ** Is this the first vlan being registered, if so we 1242 ** need to remove the ANY filter that indicates we are 1243 ** not in a vlan, and replace that with a 0 filter. 1244 */ 1245 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { 1246 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 1247 if (tmp != NULL) { 1248 struct ixl_ftl_head to_del; 1249 1250 /* Prepare new filter first to avoid removing 1251 * VLAN_ANY filter if allocation fails */ 1252 f = ixl_new_filter(&to_add, macaddr, 0); 1253 if (f == NULL) { 1254 device_printf(dev, "WARNING: no filter available!!\n"); 1255 free(LIST_FIRST(&to_add), M_IXL); 1256 return; 1257 } 1258 to_add_cnt++; 1259 1260 LIST_REMOVE(tmp, ftle); 1261 LIST_INIT(&to_del); 1262 LIST_INSERT_HEAD(&to_del, tmp, ftle); 1263 ixl_del_hw_filters(vsi, &to_del, 1); 1264 } 1265 } 1266 1267 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1268 } 1269 1270 /** 1271 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs 1272 * @vsi: pointer to VSI 1273 * @macaddr: MAC address 1274 * 1275 * Adds MAC/VLAN filter for each VLAN configured on the interface 1276 * if there is enough HW filters. Otherwise adds a single filter 1277 * for all tagged and untagged frames to allow all configured VLANs 1278 * to recieve traffic. 1279 */ 1280 void 1281 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1282 { 1283 struct ixl_ftl_head to_add; 1284 struct ixl_mac_filter *f; 1285 int to_add_cnt = 0; 1286 int i, vlan = 0; 1287 1288 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) { 1289 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1290 return; 1291 } 1292 LIST_INIT(&to_add); 1293 1294 /* Add filter for untagged frames if it does not exist yet */ 1295 f = ixl_find_filter(&vsi->ftl, macaddr, 0); 1296 if (f == NULL) { 1297 f = ixl_new_filter(&to_add, macaddr, 0); 1298 if (f == NULL) { 1299 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1300 return; 1301 } 1302 to_add_cnt++; 1303 } 1304 1305 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) { 1306 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan); 1307 if (vlan == -1) 1308 break; 1309 1310 /* Does one already exist */ 1311 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1312 if (f != NULL) 1313 continue; 1314 1315 f = ixl_new_filter(&to_add, macaddr, vlan); 1316 if (f == NULL) { 1317 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1318 ixl_free_filters(&to_add); 1319 return; 1320 } 1321 to_add_cnt++; 1322 } 1323 1324 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1325 } 1326 1327 void 1328 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1329 { 1330 struct ixl_mac_filter *f, *tmp; 1331 struct ixl_ftl_head ftl_head; 1332 int to_del_cnt = 1; 1333 1334 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1335 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n", 1336 MAC_FORMAT_ARGS(macaddr), vlan); 1337 1338 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1339 if (f == NULL) 1340 return; 1341 1342 LIST_REMOVE(f, ftle); 1343 LIST_INIT(&ftl_head); 1344 LIST_INSERT_HEAD(&ftl_head, f, ftle); 1345 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0) 1346 vsi->num_macs--; 1347 1348 /* If this is not the last vlan just remove the filter */ 1349 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) { 1350 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1351 return; 1352 } 1353 1354 /* It's the last vlan, we need to switch back to a non-vlan filter */ 1355 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0); 1356 if (tmp != NULL) { 1357 LIST_REMOVE(tmp, ftle); 1358 LIST_INSERT_AFTER(f, tmp, ftle); 1359 to_del_cnt++; 1360 } 1361 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1362 1363 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1364 } 1365 1366 /** 1367 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC 1368 * @vsi: VSI which filters need to be removed 1369 * @macaddr: MAC address 1370 * 1371 * Remove all MAC/VLAN filters with a given MAC address. For multicast 1372 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY) 1373 * so skip them to speed up processing. Those filters should be removed 1374 * using ixl_del_filter function. 1375 */ 1376 void 1377 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1378 { 1379 struct ixl_mac_filter *f, *tmp; 1380 struct ixl_ftl_head to_del; 1381 int to_del_cnt = 0; 1382 1383 LIST_INIT(&to_del); 1384 1385 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) { 1386 if ((f->flags & IXL_FILTER_MC) != 0 || 1387 !ixl_ether_is_equal(f->macaddr, macaddr)) 1388 continue; 1389 1390 LIST_REMOVE(f, ftle); 1391 LIST_INSERT_HEAD(&to_del, f, ftle); 1392 to_del_cnt++; 1393 } 1394 1395 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1396 "%s: " MAC_FORMAT ", to_del_cnt: %d\n", 1397 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt); 1398 if (to_del_cnt > 0) 1399 ixl_del_hw_filters(vsi, &to_del, to_del_cnt); 1400 } 1401 1402 /* 1403 ** Find the filter with both matching mac addr and vlan id 1404 */ 1405 struct ixl_mac_filter * 1406 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 1407 { 1408 struct ixl_mac_filter *f; 1409 1410 LIST_FOREACH(f, headp, ftle) { 1411 if (ixl_ether_is_equal(f->macaddr, macaddr) && 1412 (f->vlan == vlan)) { 1413 return (f); 1414 } 1415 } 1416 1417 return (NULL); 1418 } 1419 1420 /* 1421 ** This routine takes additions to the vsi filter 1422 ** table and creates an Admin Queue call to create 1423 ** the filters in the hardware. 1424 */ 1425 void 1426 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt) 1427 { 1428 struct i40e_aqc_add_macvlan_element_data *a, *b; 1429 struct ixl_mac_filter *f, *fn; 1430 struct ixl_pf *pf; 1431 struct i40e_hw *hw; 1432 device_t dev; 1433 enum i40e_status_code status; 1434 int j = 0; 1435 1436 pf = vsi->back; 1437 dev = vsi->dev; 1438 hw = &pf->hw; 1439 1440 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt); 1441 1442 if (cnt < 1) { 1443 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n"); 1444 return; 1445 } 1446 1447 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, 1448 M_IXL, M_NOWAIT | M_ZERO); 1449 if (a == NULL) { 1450 device_printf(dev, "add_hw_filters failed to get memory\n"); 1451 return; 1452 } 1453 1454 LIST_FOREACH(f, to_add, ftle) { 1455 b = &a[j]; // a pox on fvl long names :) 1456 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); 1457 if (f->vlan == IXL_VLAN_ANY) { 1458 b->vlan_tag = 0; 1459 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; 1460 } else { 1461 b->vlan_tag = f->vlan; 1462 b->flags = 0; 1463 } 1464 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1465 /* Some FW versions do not set match method 1466 * when adding filters fails. Initialize it with 1467 * expected error value to allow detection which 1468 * filters were not added */ 1469 b->match_method = I40E_AQC_MM_ERR_NO_RES; 1470 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n", 1471 MAC_FORMAT_ARGS(f->macaddr)); 1472 1473 if (++j == cnt) 1474 break; 1475 } 1476 if (j != cnt) { 1477 /* Something went wrong */ 1478 device_printf(dev, 1479 "%s ERROR: list of filters to short expected: %d, found: %d\n", 1480 __func__, cnt, j); 1481 ixl_free_filters(to_add); 1482 goto out_free; 1483 } 1484 1485 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); 1486 if (status == I40E_SUCCESS) { 1487 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle); 1488 vsi->num_hw_filters += j; 1489 goto out_free; 1490 } 1491 1492 device_printf(dev, 1493 "i40e_aq_add_macvlan status %s, error %s\n", 1494 i40e_stat_str(hw, status), 1495 i40e_aq_str(hw, hw->aq.asq_last_status)); 1496 j = 0; 1497 1498 /* Verify which filters were actually configured in HW 1499 * and add them to the list */ 1500 LIST_FOREACH_SAFE(f, to_add, ftle, fn) { 1501 LIST_REMOVE(f, ftle); 1502 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) { 1503 ixl_dbg_filter(pf, 1504 "%s filter " MAC_FORMAT " VTAG: %d not added\n", 1505 __func__, 1506 MAC_FORMAT_ARGS(f->macaddr), 1507 f->vlan); 1508 free(f, M_IXL); 1509 } else { 1510 LIST_INSERT_HEAD(&vsi->ftl, f, ftle); 1511 vsi->num_hw_filters++; 1512 } 1513 j++; 1514 } 1515 1516 out_free: 1517 free(a, M_IXL); 1518 } 1519 1520 /* 1521 ** This routine takes removals in the vsi filter 1522 ** table and creates an Admin Queue call to delete 1523 ** the filters in the hardware. 1524 */ 1525 void 1526 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt) 1527 { 1528 struct i40e_aqc_remove_macvlan_element_data *d, *e; 1529 struct ixl_pf *pf; 1530 struct i40e_hw *hw; 1531 device_t dev; 1532 struct ixl_mac_filter *f, *f_temp; 1533 enum i40e_status_code status; 1534 int j = 0; 1535 1536 pf = vsi->back; 1537 hw = &pf->hw; 1538 dev = vsi->dev; 1539 1540 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt); 1541 1542 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, 1543 M_IXL, M_NOWAIT | M_ZERO); 1544 if (d == NULL) { 1545 device_printf(dev, "%s: failed to get memory\n", __func__); 1546 return; 1547 } 1548 1549 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) { 1550 e = &d[j]; // a pox on fvl long names :) 1551 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); 1552 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1553 if (f->vlan == IXL_VLAN_ANY) { 1554 e->vlan_tag = 0; 1555 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1556 } else { 1557 e->vlan_tag = f->vlan; 1558 } 1559 1560 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n", 1561 MAC_FORMAT_ARGS(f->macaddr)); 1562 1563 /* delete entry from the list */ 1564 LIST_REMOVE(f, ftle); 1565 free(f, M_IXL); 1566 if (++j == cnt) 1567 break; 1568 } 1569 if (j != cnt || !LIST_EMPTY(to_del)) { 1570 /* Something went wrong */ 1571 device_printf(dev, 1572 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n", 1573 __func__, cnt, j); 1574 ixl_free_filters(to_del); 1575 goto out_free; 1576 } 1577 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); 1578 if (status) { 1579 device_printf(dev, 1580 "%s: i40e_aq_remove_macvlan status %s, error %s\n", 1581 __func__, i40e_stat_str(hw, status), 1582 i40e_aq_str(hw, hw->aq.asq_last_status)); 1583 for (int i = 0; i < j; i++) { 1584 if (d[i].error_code == 0) 1585 continue; 1586 device_printf(dev, 1587 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n", 1588 __func__, MAC_FORMAT_ARGS(d[i].mac_addr), 1589 d[i].vlan_tag); 1590 } 1591 } 1592 1593 vsi->num_hw_filters -= j; 1594 1595 out_free: 1596 free(d, M_IXL); 1597 1598 ixl_dbg_filter(pf, "%s: end\n", __func__); 1599 } 1600 1601 int 1602 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1603 { 1604 struct i40e_hw *hw = &pf->hw; 1605 int error = 0; 1606 u32 reg; 1607 u16 pf_qidx; 1608 1609 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1610 1611 ixl_dbg(pf, IXL_DBG_EN_DIS, 1612 "Enabling PF TX ring %4d / VSI TX ring %4d...\n", 1613 pf_qidx, vsi_qidx); 1614 1615 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); 1616 1617 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1618 reg |= I40E_QTX_ENA_QENA_REQ_MASK | 1619 I40E_QTX_ENA_QENA_STAT_MASK; 1620 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1621 /* Verify the enable took */ 1622 for (int j = 0; j < 10; j++) { 1623 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1624 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 1625 break; 1626 i40e_usec_delay(10); 1627 } 1628 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 1629 device_printf(pf->dev, "TX queue %d still disabled!\n", 1630 pf_qidx); 1631 error = ETIMEDOUT; 1632 } 1633 1634 return (error); 1635 } 1636 1637 int 1638 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1639 { 1640 struct i40e_hw *hw = &pf->hw; 1641 int error = 0; 1642 u32 reg; 1643 u16 pf_qidx; 1644 1645 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1646 1647 ixl_dbg(pf, IXL_DBG_EN_DIS, 1648 "Enabling PF RX ring %4d / VSI RX ring %4d...\n", 1649 pf_qidx, vsi_qidx); 1650 1651 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1652 reg |= I40E_QRX_ENA_QENA_REQ_MASK | 1653 I40E_QRX_ENA_QENA_STAT_MASK; 1654 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1655 /* Verify the enable took */ 1656 for (int j = 0; j < 10; j++) { 1657 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1658 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 1659 break; 1660 i40e_usec_delay(10); 1661 } 1662 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 1663 device_printf(pf->dev, "RX queue %d still disabled!\n", 1664 pf_qidx); 1665 error = ETIMEDOUT; 1666 } 1667 1668 return (error); 1669 } 1670 1671 int 1672 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1673 { 1674 int error = 0; 1675 1676 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); 1677 /* Called function already prints error message */ 1678 if (error) 1679 return (error); 1680 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); 1681 return (error); 1682 } 1683 1684 /* 1685 * Returns error on first ring that is detected hung. 1686 */ 1687 int 1688 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1689 { 1690 struct i40e_hw *hw = &pf->hw; 1691 int error = 0; 1692 u32 reg; 1693 u16 pf_qidx; 1694 1695 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1696 1697 ixl_dbg(pf, IXL_DBG_EN_DIS, 1698 "Disabling PF TX ring %4d / VSI TX ring %4d...\n", 1699 pf_qidx, vsi_qidx); 1700 1701 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); 1702 i40e_usec_delay(500); 1703 1704 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1705 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 1706 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1707 /* Verify the disable took */ 1708 for (int j = 0; j < 10; j++) { 1709 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1710 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) 1711 break; 1712 i40e_msec_delay(10); 1713 } 1714 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { 1715 device_printf(pf->dev, "TX queue %d still enabled!\n", 1716 pf_qidx); 1717 error = ETIMEDOUT; 1718 } 1719 1720 return (error); 1721 } 1722 1723 /* 1724 * Returns error on first ring that is detected hung. 1725 */ 1726 int 1727 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1728 { 1729 struct i40e_hw *hw = &pf->hw; 1730 int error = 0; 1731 u32 reg; 1732 u16 pf_qidx; 1733 1734 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1735 1736 ixl_dbg(pf, IXL_DBG_EN_DIS, 1737 "Disabling PF RX ring %4d / VSI RX ring %4d...\n", 1738 pf_qidx, vsi_qidx); 1739 1740 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1741 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 1742 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1743 /* Verify the disable took */ 1744 for (int j = 0; j < 10; j++) { 1745 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1746 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) 1747 break; 1748 i40e_msec_delay(10); 1749 } 1750 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { 1751 device_printf(pf->dev, "RX queue %d still enabled!\n", 1752 pf_qidx); 1753 error = ETIMEDOUT; 1754 } 1755 1756 return (error); 1757 } 1758 1759 int 1760 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1761 { 1762 int error = 0; 1763 1764 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); 1765 /* Called function already prints error message */ 1766 if (error) 1767 return (error); 1768 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); 1769 return (error); 1770 } 1771 1772 static void 1773 ixl_handle_tx_mdd_event(struct ixl_pf *pf) 1774 { 1775 struct i40e_hw *hw = &pf->hw; 1776 device_t dev = pf->dev; 1777 struct ixl_vf *vf; 1778 bool mdd_detected = false; 1779 bool pf_mdd_detected = false; 1780 bool vf_mdd_detected = false; 1781 u16 vf_num, queue; 1782 u8 pf_num, event; 1783 u8 pf_mdet_num, vp_mdet_num; 1784 u32 reg; 1785 1786 /* find what triggered the MDD event */ 1787 reg = rd32(hw, I40E_GL_MDET_TX); 1788 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 1789 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 1790 I40E_GL_MDET_TX_PF_NUM_SHIFT; 1791 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 1792 I40E_GL_MDET_TX_VF_NUM_SHIFT; 1793 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 1794 I40E_GL_MDET_TX_EVENT_SHIFT; 1795 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 1796 I40E_GL_MDET_TX_QUEUE_SHIFT; 1797 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 1798 mdd_detected = true; 1799 } 1800 1801 if (!mdd_detected) 1802 return; 1803 1804 reg = rd32(hw, I40E_PF_MDET_TX); 1805 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 1806 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 1807 pf_mdet_num = hw->pf_id; 1808 pf_mdd_detected = true; 1809 } 1810 1811 /* Check if MDD was caused by a VF */ 1812 for (int i = 0; i < pf->num_vfs; i++) { 1813 vf = &(pf->vfs[i]); 1814 reg = rd32(hw, I40E_VP_MDET_TX(i)); 1815 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 1816 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 1817 vp_mdet_num = i; 1818 vf->num_mdd_events++; 1819 vf_mdd_detected = true; 1820 } 1821 } 1822 1823 /* Print out an error message */ 1824 if (vf_mdd_detected && pf_mdd_detected) 1825 device_printf(dev, 1826 "Malicious Driver Detection event %d" 1827 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n", 1828 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num); 1829 else if (vf_mdd_detected && !pf_mdd_detected) 1830 device_printf(dev, 1831 "Malicious Driver Detection event %d" 1832 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n", 1833 event, queue, pf_num, vf_num, vp_mdet_num); 1834 else if (!vf_mdd_detected && pf_mdd_detected) 1835 device_printf(dev, 1836 "Malicious Driver Detection event %d" 1837 " on TX queue %d, pf number %d (PF-%d)\n", 1838 event, queue, pf_num, pf_mdet_num); 1839 /* Theoretically shouldn't happen */ 1840 else 1841 device_printf(dev, 1842 "TX Malicious Driver Detection event (unknown)\n"); 1843 } 1844 1845 static void 1846 ixl_handle_rx_mdd_event(struct ixl_pf *pf) 1847 { 1848 struct i40e_hw *hw = &pf->hw; 1849 device_t dev = pf->dev; 1850 struct ixl_vf *vf; 1851 bool mdd_detected = false; 1852 bool pf_mdd_detected = false; 1853 bool vf_mdd_detected = false; 1854 u16 queue; 1855 u8 pf_num, event; 1856 u8 pf_mdet_num, vp_mdet_num; 1857 u32 reg; 1858 1859 /* 1860 * GL_MDET_RX doesn't contain VF number information, unlike 1861 * GL_MDET_TX. 1862 */ 1863 reg = rd32(hw, I40E_GL_MDET_RX); 1864 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 1865 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 1866 I40E_GL_MDET_RX_FUNCTION_SHIFT; 1867 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 1868 I40E_GL_MDET_RX_EVENT_SHIFT; 1869 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 1870 I40E_GL_MDET_RX_QUEUE_SHIFT; 1871 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 1872 mdd_detected = true; 1873 } 1874 1875 if (!mdd_detected) 1876 return; 1877 1878 reg = rd32(hw, I40E_PF_MDET_RX); 1879 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 1880 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 1881 pf_mdet_num = hw->pf_id; 1882 pf_mdd_detected = true; 1883 } 1884 1885 /* Check if MDD was caused by a VF */ 1886 for (int i = 0; i < pf->num_vfs; i++) { 1887 vf = &(pf->vfs[i]); 1888 reg = rd32(hw, I40E_VP_MDET_RX(i)); 1889 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 1890 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 1891 vp_mdet_num = i; 1892 vf->num_mdd_events++; 1893 vf_mdd_detected = true; 1894 } 1895 } 1896 1897 /* Print out an error message */ 1898 if (vf_mdd_detected && pf_mdd_detected) 1899 device_printf(dev, 1900 "Malicious Driver Detection event %d" 1901 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n", 1902 event, queue, pf_num, pf_mdet_num, vp_mdet_num); 1903 else if (vf_mdd_detected && !pf_mdd_detected) 1904 device_printf(dev, 1905 "Malicious Driver Detection event %d" 1906 " on RX queue %d, pf number %d, (VF-%d)\n", 1907 event, queue, pf_num, vp_mdet_num); 1908 else if (!vf_mdd_detected && pf_mdd_detected) 1909 device_printf(dev, 1910 "Malicious Driver Detection event %d" 1911 " on RX queue %d, pf number %d (PF-%d)\n", 1912 event, queue, pf_num, pf_mdet_num); 1913 /* Theoretically shouldn't happen */ 1914 else 1915 device_printf(dev, 1916 "RX Malicious Driver Detection event (unknown)\n"); 1917 } 1918 1919 /** 1920 * ixl_handle_mdd_event 1921 * 1922 * Called from interrupt handler to identify possibly malicious vfs 1923 * (But also detects events from the PF, as well) 1924 **/ 1925 void 1926 ixl_handle_mdd_event(struct ixl_pf *pf) 1927 { 1928 struct i40e_hw *hw = &pf->hw; 1929 u32 reg; 1930 1931 /* 1932 * Handle both TX/RX because it's possible they could 1933 * both trigger in the same interrupt. 1934 */ 1935 ixl_handle_tx_mdd_event(pf); 1936 ixl_handle_rx_mdd_event(pf); 1937 1938 ixl_clear_state(&pf->state, IXL_STATE_MDD_PENDING); 1939 1940 /* re-enable mdd interrupt cause */ 1941 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1942 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 1943 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1944 ixl_flush(hw); 1945 } 1946 1947 void 1948 ixl_enable_intr0(struct i40e_hw *hw) 1949 { 1950 u32 reg; 1951 1952 /* Use IXL_ITR_NONE so ITR isn't updated here */ 1953 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | 1954 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 1955 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 1956 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1957 } 1958 1959 void 1960 ixl_disable_intr0(struct i40e_hw *hw) 1961 { 1962 u32 reg; 1963 1964 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; 1965 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1966 ixl_flush(hw); 1967 } 1968 1969 void 1970 ixl_enable_queue(struct i40e_hw *hw, int id) 1971 { 1972 u32 reg; 1973 1974 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | 1975 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1976 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 1977 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1978 } 1979 1980 void 1981 ixl_disable_queue(struct i40e_hw *hw, int id) 1982 { 1983 u32 reg; 1984 1985 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1986 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1987 } 1988 1989 void 1990 ixl_handle_empr_reset(struct ixl_pf *pf) 1991 { 1992 struct ixl_vsi *vsi = &pf->vsi; 1993 bool is_up = !!(if_getdrvflags(vsi->ifp) & IFF_DRV_RUNNING); 1994 1995 ixl_prepare_for_reset(pf, is_up); 1996 /* 1997 * i40e_pf_reset checks the type of reset and acts 1998 * accordingly. If EMP or Core reset was performed 1999 * doing PF reset is not necessary and it sometimes 2000 * fails. 2001 */ 2002 ixl_pf_reset(pf); 2003 2004 if (!IXL_PF_IN_RECOVERY_MODE(pf) && 2005 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) { 2006 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE); 2007 device_printf(pf->dev, 2008 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 2009 pf->link_up = FALSE; 2010 ixl_update_link_status(pf); 2011 } 2012 2013 ixl_rebuild_hw_structs_after_reset(pf, is_up); 2014 2015 ixl_clear_state(&pf->state, IXL_STATE_RESETTING); 2016 } 2017 2018 void 2019 ixl_update_stats_counters(struct ixl_pf *pf) 2020 { 2021 struct i40e_hw *hw = &pf->hw; 2022 struct ixl_vsi *vsi = &pf->vsi; 2023 struct ixl_vf *vf; 2024 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx; 2025 2026 struct i40e_hw_port_stats *nsd = &pf->stats; 2027 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 2028 2029 /* Update hw stats */ 2030 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 2031 pf->stat_offsets_loaded, 2032 &osd->crc_errors, &nsd->crc_errors); 2033 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 2034 pf->stat_offsets_loaded, 2035 &osd->illegal_bytes, &nsd->illegal_bytes); 2036 ixl_stat_update48(hw, I40E_GLPRT_GORCL(hw->port), 2037 pf->stat_offsets_loaded, 2038 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 2039 ixl_stat_update48(hw, I40E_GLPRT_GOTCL(hw->port), 2040 pf->stat_offsets_loaded, 2041 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 2042 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 2043 pf->stat_offsets_loaded, 2044 &osd->eth.rx_discards, 2045 &nsd->eth.rx_discards); 2046 ixl_stat_update48(hw, I40E_GLPRT_UPRCL(hw->port), 2047 pf->stat_offsets_loaded, 2048 &osd->eth.rx_unicast, 2049 &nsd->eth.rx_unicast); 2050 ixl_stat_update48(hw, I40E_GLPRT_UPTCL(hw->port), 2051 pf->stat_offsets_loaded, 2052 &osd->eth.tx_unicast, 2053 &nsd->eth.tx_unicast); 2054 ixl_stat_update48(hw, I40E_GLPRT_MPRCL(hw->port), 2055 pf->stat_offsets_loaded, 2056 &osd->eth.rx_multicast, 2057 &nsd->eth.rx_multicast); 2058 ixl_stat_update48(hw, I40E_GLPRT_MPTCL(hw->port), 2059 pf->stat_offsets_loaded, 2060 &osd->eth.tx_multicast, 2061 &nsd->eth.tx_multicast); 2062 ixl_stat_update48(hw, I40E_GLPRT_BPRCL(hw->port), 2063 pf->stat_offsets_loaded, 2064 &osd->eth.rx_broadcast, 2065 &nsd->eth.rx_broadcast); 2066 ixl_stat_update48(hw, I40E_GLPRT_BPTCL(hw->port), 2067 pf->stat_offsets_loaded, 2068 &osd->eth.tx_broadcast, 2069 &nsd->eth.tx_broadcast); 2070 2071 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 2072 pf->stat_offsets_loaded, 2073 &osd->tx_dropped_link_down, 2074 &nsd->tx_dropped_link_down); 2075 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 2076 pf->stat_offsets_loaded, 2077 &osd->mac_local_faults, 2078 &nsd->mac_local_faults); 2079 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 2080 pf->stat_offsets_loaded, 2081 &osd->mac_remote_faults, 2082 &nsd->mac_remote_faults); 2083 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 2084 pf->stat_offsets_loaded, 2085 &osd->rx_length_errors, 2086 &nsd->rx_length_errors); 2087 2088 /* Flow control (LFC) stats */ 2089 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 2090 pf->stat_offsets_loaded, 2091 &osd->link_xon_rx, &nsd->link_xon_rx); 2092 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 2093 pf->stat_offsets_loaded, 2094 &osd->link_xon_tx, &nsd->link_xon_tx); 2095 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 2096 pf->stat_offsets_loaded, 2097 &osd->link_xoff_rx, &nsd->link_xoff_rx); 2098 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 2099 pf->stat_offsets_loaded, 2100 &osd->link_xoff_tx, &nsd->link_xoff_tx); 2101 2102 /* 2103 * For watchdog management we need to know if we have been paused 2104 * during the last interval, so capture that here. 2105 */ 2106 if (pf->stats.link_xoff_rx != prev_link_xoff_rx) 2107 vsi->shared->isc_pause_frames = 1; 2108 2109 /* Packet size stats rx */ 2110 ixl_stat_update48(hw, I40E_GLPRT_PRC64L(hw->port), 2111 pf->stat_offsets_loaded, 2112 &osd->rx_size_64, &nsd->rx_size_64); 2113 ixl_stat_update48(hw, I40E_GLPRT_PRC127L(hw->port), 2114 pf->stat_offsets_loaded, 2115 &osd->rx_size_127, &nsd->rx_size_127); 2116 ixl_stat_update48(hw, I40E_GLPRT_PRC255L(hw->port), 2117 pf->stat_offsets_loaded, 2118 &osd->rx_size_255, &nsd->rx_size_255); 2119 ixl_stat_update48(hw, I40E_GLPRT_PRC511L(hw->port), 2120 pf->stat_offsets_loaded, 2121 &osd->rx_size_511, &nsd->rx_size_511); 2122 ixl_stat_update48(hw, I40E_GLPRT_PRC1023L(hw->port), 2123 pf->stat_offsets_loaded, 2124 &osd->rx_size_1023, &nsd->rx_size_1023); 2125 ixl_stat_update48(hw, I40E_GLPRT_PRC1522L(hw->port), 2126 pf->stat_offsets_loaded, 2127 &osd->rx_size_1522, &nsd->rx_size_1522); 2128 ixl_stat_update48(hw, I40E_GLPRT_PRC9522L(hw->port), 2129 pf->stat_offsets_loaded, 2130 &osd->rx_size_big, &nsd->rx_size_big); 2131 2132 /* Packet size stats tx */ 2133 ixl_stat_update48(hw, I40E_GLPRT_PTC64L(hw->port), 2134 pf->stat_offsets_loaded, 2135 &osd->tx_size_64, &nsd->tx_size_64); 2136 ixl_stat_update48(hw, I40E_GLPRT_PTC127L(hw->port), 2137 pf->stat_offsets_loaded, 2138 &osd->tx_size_127, &nsd->tx_size_127); 2139 ixl_stat_update48(hw, I40E_GLPRT_PTC255L(hw->port), 2140 pf->stat_offsets_loaded, 2141 &osd->tx_size_255, &nsd->tx_size_255); 2142 ixl_stat_update48(hw, I40E_GLPRT_PTC511L(hw->port), 2143 pf->stat_offsets_loaded, 2144 &osd->tx_size_511, &nsd->tx_size_511); 2145 ixl_stat_update48(hw, I40E_GLPRT_PTC1023L(hw->port), 2146 pf->stat_offsets_loaded, 2147 &osd->tx_size_1023, &nsd->tx_size_1023); 2148 ixl_stat_update48(hw, I40E_GLPRT_PTC1522L(hw->port), 2149 pf->stat_offsets_loaded, 2150 &osd->tx_size_1522, &nsd->tx_size_1522); 2151 ixl_stat_update48(hw, I40E_GLPRT_PTC9522L(hw->port), 2152 pf->stat_offsets_loaded, 2153 &osd->tx_size_big, &nsd->tx_size_big); 2154 2155 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 2156 pf->stat_offsets_loaded, 2157 &osd->rx_undersize, &nsd->rx_undersize); 2158 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 2159 pf->stat_offsets_loaded, 2160 &osd->rx_fragments, &nsd->rx_fragments); 2161 2162 u64 rx_roc; 2163 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 2164 pf->stat_offsets_loaded, 2165 &osd->rx_oversize, &rx_roc); 2166 2167 /* 2168 * Read from RXERR1 register to get the count for the packets 2169 * larger than RX MAX and include that in total rx_oversize count. 2170 * 2171 * Also need to add BIT(7) to hw->port value while indexing 2172 * I40E_GL_RXERR1 register as indexes 0..127 are for VFs when 2173 * SR-IOV is enabled. Indexes 128..143 are for PFs. 2174 */ 2175 u64 rx_err1; 2176 ixl_stat_update64(hw, 2177 I40E_GL_RXERR1L(hw->pf_id + BIT(7)), 2178 pf->stat_offsets_loaded, 2179 &osd->rx_err1, 2180 &rx_err1); 2181 2182 nsd->rx_oversize = rx_roc + rx_err1; 2183 2184 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 2185 pf->stat_offsets_loaded, 2186 &osd->rx_jabber, &nsd->rx_jabber); 2187 /* EEE */ 2188 i40e_get_phy_lpi_status(hw, nsd); 2189 2190 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded, 2191 &osd->tx_lpi_count, &nsd->tx_lpi_count, 2192 &osd->rx_lpi_count, &nsd->rx_lpi_count); 2193 2194 pf->stat_offsets_loaded = true; 2195 /* End hw stats */ 2196 2197 /* Update vsi stats */ 2198 ixl_update_vsi_stats(vsi); 2199 2200 for (int i = 0; i < pf->num_vfs; i++) { 2201 vf = &pf->vfs[i]; 2202 if (vf->vf_flags & VF_FLAG_ENABLED) 2203 ixl_update_eth_stats(&pf->vfs[i].vsi); 2204 } 2205 } 2206 2207 /** 2208 * Update VSI-specific ethernet statistics counters. 2209 **/ 2210 void 2211 ixl_update_eth_stats(struct ixl_vsi *vsi) 2212 { 2213 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 2214 struct i40e_hw *hw = &pf->hw; 2215 struct i40e_eth_stats *es; 2216 struct i40e_eth_stats *oes; 2217 u16 stat_idx = vsi->info.stat_counter_idx; 2218 2219 es = &vsi->eth_stats; 2220 oes = &vsi->eth_stats_offsets; 2221 2222 /* Gather up the stats that the hw collects */ 2223 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 2224 vsi->stat_offsets_loaded, 2225 &oes->tx_errors, &es->tx_errors); 2226 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 2227 vsi->stat_offsets_loaded, 2228 &oes->rx_discards, &es->rx_discards); 2229 2230 ixl_stat_update48(hw, I40E_GLV_GORCL(stat_idx), 2231 vsi->stat_offsets_loaded, 2232 &oes->rx_bytes, &es->rx_bytes); 2233 ixl_stat_update48(hw, I40E_GLV_UPRCL(stat_idx), 2234 vsi->stat_offsets_loaded, 2235 &oes->rx_unicast, &es->rx_unicast); 2236 ixl_stat_update48(hw, I40E_GLV_MPRCL(stat_idx), 2237 vsi->stat_offsets_loaded, 2238 &oes->rx_multicast, &es->rx_multicast); 2239 ixl_stat_update48(hw, I40E_GLV_BPRCL(stat_idx), 2240 vsi->stat_offsets_loaded, 2241 &oes->rx_broadcast, &es->rx_broadcast); 2242 2243 ixl_stat_update48(hw, I40E_GLV_GOTCL(stat_idx), 2244 vsi->stat_offsets_loaded, 2245 &oes->tx_bytes, &es->tx_bytes); 2246 ixl_stat_update48(hw, I40E_GLV_UPTCL(stat_idx), 2247 vsi->stat_offsets_loaded, 2248 &oes->tx_unicast, &es->tx_unicast); 2249 ixl_stat_update48(hw, I40E_GLV_MPTCL(stat_idx), 2250 vsi->stat_offsets_loaded, 2251 &oes->tx_multicast, &es->tx_multicast); 2252 ixl_stat_update48(hw, I40E_GLV_BPTCL(stat_idx), 2253 vsi->stat_offsets_loaded, 2254 &oes->tx_broadcast, &es->tx_broadcast); 2255 vsi->stat_offsets_loaded = true; 2256 } 2257 2258 void 2259 ixl_update_vsi_stats(struct ixl_vsi *vsi) 2260 { 2261 struct ixl_pf *pf; 2262 struct i40e_eth_stats *es; 2263 u64 tx_discards, csum_errs; 2264 2265 struct i40e_hw_port_stats *nsd; 2266 2267 pf = vsi->back; 2268 es = &vsi->eth_stats; 2269 nsd = &pf->stats; 2270 2271 ixl_update_eth_stats(vsi); 2272 2273 tx_discards = es->tx_discards + nsd->tx_dropped_link_down; 2274 2275 csum_errs = 0; 2276 for (int i = 0; i < vsi->num_rx_queues; i++) 2277 csum_errs += vsi->rx_queues[i].rxr.csum_errs; 2278 nsd->checksum_error = csum_errs; 2279 2280 /* Update ifnet stats */ 2281 IXL_SET_IPACKETS(vsi, es->rx_unicast + 2282 es->rx_multicast + 2283 es->rx_broadcast); 2284 IXL_SET_OPACKETS(vsi, es->tx_unicast + 2285 es->tx_multicast + 2286 es->tx_broadcast); 2287 IXL_SET_IBYTES(vsi, es->rx_bytes); 2288 IXL_SET_OBYTES(vsi, es->tx_bytes); 2289 IXL_SET_IMCASTS(vsi, es->rx_multicast); 2290 IXL_SET_OMCASTS(vsi, es->tx_multicast); 2291 2292 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + 2293 nsd->checksum_error + nsd->rx_length_errors + 2294 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize + 2295 nsd->rx_jabber); 2296 IXL_SET_OERRORS(vsi, es->tx_errors); 2297 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); 2298 IXL_SET_OQDROPS(vsi, tx_discards); 2299 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); 2300 IXL_SET_COLLISIONS(vsi, 0); 2301 } 2302 2303 /** 2304 * Reset all of the stats for the given pf 2305 **/ 2306 void 2307 ixl_pf_reset_stats(struct ixl_pf *pf) 2308 { 2309 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); 2310 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); 2311 pf->stat_offsets_loaded = false; 2312 } 2313 2314 /** 2315 * Resets all stats of the given vsi 2316 **/ 2317 void 2318 ixl_vsi_reset_stats(struct ixl_vsi *vsi) 2319 { 2320 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); 2321 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); 2322 vsi->stat_offsets_loaded = false; 2323 } 2324 2325 /** 2326 * Helper function for reading and updating 48/64 bit stats from the hw 2327 * 2328 * Since the device stats are not reset at PFReset, they likely will not 2329 * be zeroed when the driver starts. We'll save the first values read 2330 * and use them as offsets to be subtracted from the raw values in order 2331 * to report stats that count from zero. 2332 **/ 2333 static void 2334 _ixl_stat_update_helper(struct i40e_hw *hw, u32 reg, 2335 bool offset_loaded, u64 mask, u64 *offset, u64 *stat) 2336 { 2337 u64 new_data = rd64(hw, reg); 2338 2339 if (!offset_loaded) 2340 *offset = new_data; 2341 if (new_data >= *offset) 2342 *stat = new_data - *offset; 2343 else 2344 *stat = (new_data + mask) - *offset + 1; 2345 *stat &= mask; 2346 } 2347 2348 /** 2349 * Read and update a 48 bit stat from the hw 2350 **/ 2351 void 2352 ixl_stat_update48(struct i40e_hw *hw, u32 reg, 2353 bool offset_loaded, u64 *offset, u64 *stat) 2354 { 2355 _ixl_stat_update_helper(hw, 2356 reg, 2357 offset_loaded, 2358 0xFFFFFFFFFFFFULL, 2359 offset, 2360 stat); 2361 } 2362 2363 /** 2364 * ixl_stat_update64 - read and update a 64 bit stat from the chip. 2365 **/ 2366 void 2367 ixl_stat_update64(struct i40e_hw *hw, u32 reg, 2368 bool offset_loaded, u64 *offset, u64 *stat) 2369 { 2370 _ixl_stat_update_helper(hw, 2371 reg, 2372 offset_loaded, 2373 0xFFFFFFFFFFFFFFFFULL, 2374 offset, 2375 stat); 2376 } 2377 2378 /** 2379 * Read and update a 32 bit stat from the hw 2380 **/ 2381 void 2382 ixl_stat_update32(struct i40e_hw *hw, u32 reg, 2383 bool offset_loaded, u64 *offset, u64 *stat) 2384 { 2385 u32 new_data; 2386 2387 new_data = rd32(hw, reg); 2388 if (!offset_loaded) 2389 *offset = new_data; 2390 if (new_data >= *offset) 2391 *stat = (u32)(new_data - *offset); 2392 else 2393 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 2394 } 2395 2396 /** 2397 * Add subset of device sysctls safe to use in recovery mode 2398 */ 2399 void 2400 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf) 2401 { 2402 device_t dev = pf->dev; 2403 2404 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2405 struct sysctl_oid_list *ctx_list = 2406 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2407 2408 struct sysctl_oid *debug_node; 2409 struct sysctl_oid_list *debug_list; 2410 2411 SYSCTL_ADD_PROC(ctx, ctx_list, 2412 OID_AUTO, "fw_version", 2413 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2414 ixl_sysctl_show_fw, "A", "Firmware version"); 2415 2416 /* Add sysctls meant to print debug information, but don't list them 2417 * in "sysctl -a" output. */ 2418 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2419 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2420 "Debug Sysctls"); 2421 debug_list = SYSCTL_CHILDREN(debug_node); 2422 2423 SYSCTL_ADD_UINT(ctx, debug_list, 2424 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2425 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2426 2427 SYSCTL_ADD_UINT(ctx, debug_list, 2428 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2429 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2430 2431 SYSCTL_ADD_PROC(ctx, debug_list, 2432 OID_AUTO, "dump_debug_data", 2433 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2434 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2435 2436 SYSCTL_ADD_PROC(ctx, debug_list, 2437 OID_AUTO, "do_pf_reset", 2438 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2439 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2440 2441 SYSCTL_ADD_PROC(ctx, debug_list, 2442 OID_AUTO, "do_core_reset", 2443 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2444 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2445 2446 SYSCTL_ADD_PROC(ctx, debug_list, 2447 OID_AUTO, "do_global_reset", 2448 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2449 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2450 2451 SYSCTL_ADD_PROC(ctx, debug_list, 2452 OID_AUTO, "queue_interrupt_table", 2453 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2454 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2455 2456 SYSCTL_ADD_PROC(ctx, debug_list, 2457 OID_AUTO, "queue_int_ctln", 2458 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2459 pf, 0, ixl_sysctl_debug_queue_int_ctln, "A", 2460 "View MSI-X control registers for RX queues"); 2461 } 2462 2463 void 2464 ixl_add_device_sysctls(struct ixl_pf *pf) 2465 { 2466 device_t dev = pf->dev; 2467 struct i40e_hw *hw = &pf->hw; 2468 2469 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2470 struct sysctl_oid_list *ctx_list = 2471 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2472 2473 struct sysctl_oid *debug_node; 2474 struct sysctl_oid_list *debug_list; 2475 2476 struct sysctl_oid *fec_node; 2477 struct sysctl_oid_list *fec_list; 2478 struct sysctl_oid *eee_node; 2479 struct sysctl_oid_list *eee_list; 2480 2481 /* Set up sysctls */ 2482 SYSCTL_ADD_PROC(ctx, ctx_list, 2483 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2484 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); 2485 2486 SYSCTL_ADD_PROC(ctx, ctx_list, 2487 OID_AUTO, "advertise_speed", 2488 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2489 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); 2490 2491 SYSCTL_ADD_PROC(ctx, ctx_list, 2492 OID_AUTO, "supported_speeds", 2493 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2494 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); 2495 2496 SYSCTL_ADD_PROC(ctx, ctx_list, 2497 OID_AUTO, "current_speed", 2498 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2499 ixl_sysctl_current_speed, "A", "Current Port Speed"); 2500 2501 SYSCTL_ADD_PROC(ctx, ctx_list, 2502 OID_AUTO, "fw_version", 2503 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2504 ixl_sysctl_show_fw, "A", "Firmware version"); 2505 2506 SYSCTL_ADD_PROC(ctx, ctx_list, 2507 OID_AUTO, "unallocated_queues", 2508 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2509 ixl_sysctl_unallocated_queues, "I", 2510 "Queues not allocated to a PF or VF"); 2511 2512 SYSCTL_ADD_PROC(ctx, ctx_list, 2513 OID_AUTO, "tx_itr", 2514 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2515 ixl_sysctl_pf_tx_itr, "I", 2516 "Immediately set TX ITR value for all queues"); 2517 2518 SYSCTL_ADD_PROC(ctx, ctx_list, 2519 OID_AUTO, "rx_itr", 2520 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2521 ixl_sysctl_pf_rx_itr, "I", 2522 "Immediately set RX ITR value for all queues"); 2523 2524 SYSCTL_ADD_INT(ctx, ctx_list, 2525 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, 2526 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); 2527 2528 SYSCTL_ADD_INT(ctx, ctx_list, 2529 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, 2530 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); 2531 2532 /* Add FEC sysctls for 25G adapters */ 2533 if (i40e_is_25G_device(hw->device_id)) { 2534 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2535 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2536 "FEC Sysctls"); 2537 fec_list = SYSCTL_CHILDREN(fec_node); 2538 2539 SYSCTL_ADD_PROC(ctx, fec_list, 2540 OID_AUTO, "fc_ability", 2541 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2542 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); 2543 2544 SYSCTL_ADD_PROC(ctx, fec_list, 2545 OID_AUTO, "rs_ability", 2546 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2547 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); 2548 2549 SYSCTL_ADD_PROC(ctx, fec_list, 2550 OID_AUTO, "fc_requested", 2551 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2552 ixl_sysctl_fec_fc_request, "I", 2553 "FC FEC mode requested on link"); 2554 2555 SYSCTL_ADD_PROC(ctx, fec_list, 2556 OID_AUTO, "rs_requested", 2557 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2558 ixl_sysctl_fec_rs_request, "I", 2559 "RS FEC mode requested on link"); 2560 2561 SYSCTL_ADD_PROC(ctx, fec_list, 2562 OID_AUTO, "auto_fec_enabled", 2563 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2564 ixl_sysctl_fec_auto_enable, "I", 2565 "Let FW decide FEC ability/request modes"); 2566 } 2567 2568 SYSCTL_ADD_PROC(ctx, ctx_list, 2569 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2570 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); 2571 2572 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2573 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2574 "Energy Efficient Ethernet (EEE) Sysctls"); 2575 eee_list = SYSCTL_CHILDREN(eee_node); 2576 2577 SYSCTL_ADD_PROC(ctx, eee_list, 2578 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 2579 pf, 0, ixl_sysctl_eee_enable, "I", 2580 "Enable Energy Efficient Ethernet (EEE)"); 2581 2582 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status", 2583 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0, 2584 "TX LPI status"); 2585 2586 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status", 2587 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0, 2588 "RX LPI status"); 2589 2590 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count", 2591 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count, 2592 "TX LPI count"); 2593 2594 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count", 2595 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count, 2596 "RX LPI count"); 2597 2598 SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, 2599 "link_active_on_if_down", 2600 CTLTYPE_INT | CTLFLAG_RWTUN, 2601 pf, 0, ixl_sysctl_set_link_active, "I", 2602 IXL_SYSCTL_HELP_SET_LINK_ACTIVE); 2603 2604 /* Add sysctls meant to print debug information, but don't list them 2605 * in "sysctl -a" output. */ 2606 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2607 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2608 "Debug Sysctls"); 2609 debug_list = SYSCTL_CHILDREN(debug_node); 2610 2611 SYSCTL_ADD_UINT(ctx, debug_list, 2612 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2613 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2614 2615 SYSCTL_ADD_UINT(ctx, debug_list, 2616 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2617 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2618 2619 SYSCTL_ADD_PROC(ctx, debug_list, 2620 OID_AUTO, "link_status", 2621 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2622 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); 2623 2624 SYSCTL_ADD_PROC(ctx, debug_list, 2625 OID_AUTO, "phy_abilities_init", 2626 CTLTYPE_STRING | CTLFLAG_RD, 2627 pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities"); 2628 2629 SYSCTL_ADD_PROC(ctx, debug_list, 2630 OID_AUTO, "phy_abilities", 2631 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2632 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); 2633 2634 SYSCTL_ADD_PROC(ctx, debug_list, 2635 OID_AUTO, "filter_list", 2636 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2637 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); 2638 2639 SYSCTL_ADD_PROC(ctx, debug_list, 2640 OID_AUTO, "hw_res_alloc", 2641 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2642 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); 2643 2644 SYSCTL_ADD_PROC(ctx, debug_list, 2645 OID_AUTO, "switch_config", 2646 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2647 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); 2648 2649 SYSCTL_ADD_PROC(ctx, debug_list, 2650 OID_AUTO, "switch_vlans", 2651 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2652 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration"); 2653 2654 SYSCTL_ADD_PROC(ctx, debug_list, 2655 OID_AUTO, "rss_key", 2656 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2657 pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); 2658 2659 SYSCTL_ADD_PROC(ctx, debug_list, 2660 OID_AUTO, "rss_lut", 2661 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2662 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); 2663 2664 SYSCTL_ADD_PROC(ctx, debug_list, 2665 OID_AUTO, "rss_hena", 2666 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2667 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); 2668 2669 SYSCTL_ADD_PROC(ctx, debug_list, 2670 OID_AUTO, "disable_fw_link_management", 2671 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2672 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); 2673 2674 SYSCTL_ADD_PROC(ctx, debug_list, 2675 OID_AUTO, "dump_debug_data", 2676 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2677 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2678 2679 SYSCTL_ADD_PROC(ctx, debug_list, 2680 OID_AUTO, "do_pf_reset", 2681 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2682 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2683 2684 SYSCTL_ADD_PROC(ctx, debug_list, 2685 OID_AUTO, "do_core_reset", 2686 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2687 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2688 2689 SYSCTL_ADD_PROC(ctx, debug_list, 2690 OID_AUTO, "do_global_reset", 2691 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2692 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2693 2694 SYSCTL_ADD_PROC(ctx, debug_list, 2695 OID_AUTO, "queue_interrupt_table", 2696 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2697 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2698 2699 SYSCTL_ADD_PROC(ctx, debug_list, 2700 OID_AUTO, "phy_statistics", CTLTYPE_STRING | CTLFLAG_RD, 2701 pf, 0, ixl_sysctl_phy_statistics, "A", "PHY Statistics"); 2702 2703 if (pf->has_i2c) { 2704 SYSCTL_ADD_PROC(ctx, debug_list, 2705 OID_AUTO, "read_i2c_byte", 2706 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2707 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C); 2708 2709 SYSCTL_ADD_PROC(ctx, debug_list, 2710 OID_AUTO, "write_i2c_byte", 2711 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2712 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C); 2713 2714 SYSCTL_ADD_PROC(ctx, debug_list, 2715 OID_AUTO, "read_i2c_diag_data", 2716 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2717 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); 2718 } 2719 } 2720 2721 /* 2722 * Primarily for finding out how many queues can be assigned to VFs, 2723 * at runtime. 2724 */ 2725 static int 2726 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) 2727 { 2728 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2729 int queues; 2730 2731 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); 2732 2733 return sysctl_handle_int(oidp, NULL, queues, req); 2734 } 2735 2736 static const char * 2737 ixl_link_speed_string(enum i40e_aq_link_speed link_speed) 2738 { 2739 const char * link_speed_str[] = { 2740 "Unknown", 2741 "100 Mbps", 2742 "1 Gbps", 2743 "10 Gbps", 2744 "40 Gbps", 2745 "20 Gbps", 2746 "25 Gbps", 2747 "2.5 Gbps", 2748 "5 Gbps" 2749 }; 2750 int index; 2751 2752 switch (link_speed) { 2753 case I40E_LINK_SPEED_100MB: 2754 index = 1; 2755 break; 2756 case I40E_LINK_SPEED_1GB: 2757 index = 2; 2758 break; 2759 case I40E_LINK_SPEED_10GB: 2760 index = 3; 2761 break; 2762 case I40E_LINK_SPEED_40GB: 2763 index = 4; 2764 break; 2765 case I40E_LINK_SPEED_20GB: 2766 index = 5; 2767 break; 2768 case I40E_LINK_SPEED_25GB: 2769 index = 6; 2770 break; 2771 case I40E_LINK_SPEED_2_5GB: 2772 index = 7; 2773 break; 2774 case I40E_LINK_SPEED_5GB: 2775 index = 8; 2776 break; 2777 case I40E_LINK_SPEED_UNKNOWN: 2778 default: 2779 index = 0; 2780 break; 2781 } 2782 2783 return (link_speed_str[index]); 2784 } 2785 2786 int 2787 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2788 { 2789 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2790 struct i40e_hw *hw = &pf->hw; 2791 int error = 0; 2792 2793 ixl_update_link_status(pf); 2794 2795 error = sysctl_handle_string(oidp, 2796 __DECONST(void *, 2797 ixl_link_speed_string(hw->phy.link_info.link_speed)), 2798 8, req); 2799 2800 return (error); 2801 } 2802 2803 /* 2804 * Converts 8-bit speeds value to and from sysctl flags and 2805 * Admin Queue flags. 2806 */ 2807 static u8 2808 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) 2809 { 2810 #define SPEED_MAP_SIZE 8 2811 static u16 speedmap[SPEED_MAP_SIZE] = { 2812 (I40E_LINK_SPEED_100MB | (0x1 << 8)), 2813 (I40E_LINK_SPEED_1GB | (0x2 << 8)), 2814 (I40E_LINK_SPEED_10GB | (0x4 << 8)), 2815 (I40E_LINK_SPEED_20GB | (0x8 << 8)), 2816 (I40E_LINK_SPEED_25GB | (0x10 << 8)), 2817 (I40E_LINK_SPEED_40GB | (0x20 << 8)), 2818 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)), 2819 (I40E_LINK_SPEED_5GB | (0x80 << 8)), 2820 }; 2821 u8 retval = 0; 2822 2823 for (int i = 0; i < SPEED_MAP_SIZE; i++) { 2824 if (to_aq) 2825 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; 2826 else 2827 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; 2828 } 2829 2830 return (retval); 2831 } 2832 2833 int 2834 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) 2835 { 2836 struct i40e_hw *hw = &pf->hw; 2837 device_t dev = pf->dev; 2838 struct i40e_aq_get_phy_abilities_resp abilities; 2839 struct i40e_aq_set_phy_config config; 2840 enum i40e_status_code aq_error = 0; 2841 2842 /* Get current capability information */ 2843 aq_error = i40e_aq_get_phy_capabilities(hw, 2844 FALSE, FALSE, &abilities, NULL); 2845 if (aq_error) { 2846 device_printf(dev, 2847 "%s: Error getting phy capabilities %d," 2848 " aq error: %d\n", __func__, aq_error, 2849 hw->aq.asq_last_status); 2850 return (EIO); 2851 } 2852 2853 /* Prepare new config */ 2854 bzero(&config, sizeof(config)); 2855 if (from_aq) 2856 config.link_speed = speeds; 2857 else 2858 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); 2859 config.phy_type = abilities.phy_type; 2860 config.phy_type_ext = abilities.phy_type_ext; 2861 config.abilities = abilities.abilities 2862 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 2863 config.eee_capability = abilities.eee_capability; 2864 config.eeer = abilities.eeer_val; 2865 config.low_power_ctrl = abilities.d3_lpan; 2866 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 2867 & I40E_AQ_PHY_FEC_CONFIG_MASK; 2868 2869 /* Do aq command & restart link */ 2870 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 2871 if (aq_error) { 2872 device_printf(dev, 2873 "%s: Error setting new phy config %d," 2874 " aq error: %d\n", __func__, aq_error, 2875 hw->aq.asq_last_status); 2876 return (EIO); 2877 } 2878 2879 return (0); 2880 } 2881 2882 /* 2883 ** Supported link speeds 2884 ** Flags: 2885 ** 0x1 - 100 Mb 2886 ** 0x2 - 1G 2887 ** 0x4 - 10G 2888 ** 0x8 - 20G 2889 ** 0x10 - 25G 2890 ** 0x20 - 40G 2891 ** 0x40 - 2.5G 2892 ** 0x80 - 5G 2893 */ 2894 static int 2895 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) 2896 { 2897 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2898 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 2899 2900 return sysctl_handle_int(oidp, NULL, supported, req); 2901 } 2902 2903 /* 2904 ** Control link advertise speed: 2905 ** Flags: 2906 ** 0x1 - advertise 100 Mb 2907 ** 0x2 - advertise 1G 2908 ** 0x4 - advertise 10G 2909 ** 0x8 - advertise 20G 2910 ** 0x10 - advertise 25G 2911 ** 0x20 - advertise 40G 2912 ** 0x40 - advertise 2.5G 2913 ** 0x80 - advertise 5G 2914 ** 2915 ** Set to 0 to disable link 2916 */ 2917 int 2918 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) 2919 { 2920 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2921 device_t dev = pf->dev; 2922 u8 converted_speeds; 2923 int requested_ls = 0; 2924 int error = 0; 2925 2926 /* Read in new mode */ 2927 requested_ls = pf->advertised_speed; 2928 error = sysctl_handle_int(oidp, &requested_ls, 0, req); 2929 if ((error) || (req->newptr == NULL)) 2930 return (error); 2931 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 2932 device_printf(dev, "Interface is currently in FW recovery mode. " 2933 "Setting advertise speed not supported\n"); 2934 return (EINVAL); 2935 } 2936 2937 /* Error out if bits outside of possible flag range are set */ 2938 if ((requested_ls & ~((u8)0xFF)) != 0) { 2939 device_printf(dev, "Input advertised speed out of range; " 2940 "valid flags are: 0x%02x\n", 2941 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2942 return (EINVAL); 2943 } 2944 2945 /* Check if adapter supports input value */ 2946 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); 2947 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { 2948 device_printf(dev, "Invalid advertised speed; " 2949 "valid flags are: 0x%02x\n", 2950 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2951 return (EINVAL); 2952 } 2953 2954 error = ixl_set_advertised_speeds(pf, requested_ls, false); 2955 if (error) 2956 return (error); 2957 2958 pf->advertised_speed = requested_ls; 2959 ixl_update_link_status(pf); 2960 return (0); 2961 } 2962 2963 /* 2964 * Input: bitmap of enum i40e_aq_link_speed 2965 */ 2966 u64 2967 ixl_max_aq_speed_to_value(u8 link_speeds) 2968 { 2969 if (link_speeds & I40E_LINK_SPEED_40GB) 2970 return IF_Gbps(40); 2971 if (link_speeds & I40E_LINK_SPEED_25GB) 2972 return IF_Gbps(25); 2973 if (link_speeds & I40E_LINK_SPEED_20GB) 2974 return IF_Gbps(20); 2975 if (link_speeds & I40E_LINK_SPEED_10GB) 2976 return IF_Gbps(10); 2977 if (link_speeds & I40E_LINK_SPEED_5GB) 2978 return IF_Gbps(5); 2979 if (link_speeds & I40E_LINK_SPEED_2_5GB) 2980 return IF_Mbps(2500); 2981 if (link_speeds & I40E_LINK_SPEED_1GB) 2982 return IF_Gbps(1); 2983 if (link_speeds & I40E_LINK_SPEED_100MB) 2984 return IF_Mbps(100); 2985 else 2986 /* Minimum supported link speed */ 2987 return IF_Mbps(100); 2988 } 2989 2990 /* 2991 ** Get the width and transaction speed of 2992 ** the bus this adapter is plugged into. 2993 */ 2994 void 2995 ixl_get_bus_info(struct ixl_pf *pf) 2996 { 2997 struct i40e_hw *hw = &pf->hw; 2998 device_t dev = pf->dev; 2999 u16 link; 3000 u32 offset, num_ports; 3001 u64 max_speed; 3002 3003 /* Some devices don't use PCIE */ 3004 if (hw->mac.type == I40E_MAC_X722) 3005 return; 3006 3007 /* Read PCI Express Capabilities Link Status Register */ 3008 pci_find_cap(dev, PCIY_EXPRESS, &offset); 3009 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 3010 3011 /* Fill out hw struct with PCIE info */ 3012 i40e_set_pci_config_data(hw, link); 3013 3014 /* Use info to print out bandwidth messages */ 3015 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 3016 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": 3017 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": 3018 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), 3019 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : 3020 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : 3021 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : 3022 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : 3023 ("Unknown")); 3024 3025 /* 3026 * If adapter is in slot with maximum supported speed, 3027 * no warning message needs to be printed out. 3028 */ 3029 if (hw->bus.speed >= i40e_bus_speed_8000 3030 && hw->bus.width >= i40e_bus_width_pcie_x8) 3031 return; 3032 3033 num_ports = bitcount32(hw->func_caps.valid_functions); 3034 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; 3035 3036 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { 3037 device_printf(dev, "PCI-Express bandwidth available" 3038 " for this device may be insufficient for" 3039 " optimal performance.\n"); 3040 device_printf(dev, "Please move the device to a different" 3041 " PCI-e link with more lanes and/or higher" 3042 " transfer rate.\n"); 3043 } 3044 } 3045 3046 static int 3047 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 3048 { 3049 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3050 struct i40e_hw *hw = &pf->hw; 3051 struct sbuf *sbuf; 3052 3053 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3054 ixl_nvm_version_str(hw, sbuf); 3055 sbuf_finish(sbuf); 3056 sbuf_delete(sbuf); 3057 3058 return (0); 3059 } 3060 3061 void 3062 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) 3063 { 3064 u8 nvma_ptr = nvma->config & 0xFF; 3065 u8 nvma_flags = (nvma->config & 0xF00) >> 8; 3066 const char * cmd_str; 3067 3068 switch (nvma->command) { 3069 case I40E_NVM_READ: 3070 if (nvma_ptr == 0xF && nvma_flags == 0xF && 3071 nvma->offset == 0 && nvma->data_size == 1) { 3072 device_printf(dev, "NVMUPD: Get Driver Status Command\n"); 3073 return; 3074 } 3075 cmd_str = "READ "; 3076 break; 3077 case I40E_NVM_WRITE: 3078 cmd_str = "WRITE"; 3079 break; 3080 default: 3081 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command); 3082 return; 3083 } 3084 device_printf(dev, 3085 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n", 3086 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size); 3087 } 3088 3089 int 3090 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) 3091 { 3092 struct i40e_hw *hw = &pf->hw; 3093 struct i40e_nvm_access *nvma; 3094 device_t dev = pf->dev; 3095 enum i40e_status_code status = 0; 3096 size_t nvma_size, ifd_len, exp_len; 3097 int err, perrno; 3098 3099 DEBUGFUNC("ixl_handle_nvmupd_cmd"); 3100 3101 /* Sanity checks */ 3102 nvma_size = sizeof(struct i40e_nvm_access); 3103 ifd_len = ifd->ifd_len; 3104 3105 if (ifd_len < nvma_size || 3106 ifd->ifd_data == NULL) { 3107 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", 3108 __func__); 3109 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", 3110 __func__, ifd_len, nvma_size); 3111 device_printf(dev, "%s: data pointer: %p\n", __func__, 3112 ifd->ifd_data); 3113 return (EINVAL); 3114 } 3115 3116 nvma = malloc(ifd_len, M_IXL, M_WAITOK); 3117 err = copyin(ifd->ifd_data, nvma, ifd_len); 3118 if (err) { 3119 device_printf(dev, "%s: Cannot get request from user space\n", 3120 __func__); 3121 free(nvma, M_IXL); 3122 return (err); 3123 } 3124 3125 if (pf->dbg_mask & IXL_DBG_NVMUPD) 3126 ixl_print_nvm_cmd(dev, nvma); 3127 3128 if (IXL_PF_IS_RESETTING(pf)) { 3129 int count = 0; 3130 while (count++ < 100) { 3131 i40e_msec_delay(100); 3132 if (!(IXL_PF_IS_RESETTING(pf))) 3133 break; 3134 } 3135 } 3136 3137 if (IXL_PF_IS_RESETTING(pf)) { 3138 device_printf(dev, 3139 "%s: timeout waiting for EMP reset to finish\n", 3140 __func__); 3141 free(nvma, M_IXL); 3142 return (-EBUSY); 3143 } 3144 3145 if (nvma->data_size < 1 || nvma->data_size > 4096) { 3146 device_printf(dev, 3147 "%s: invalid request, data size not in supported range\n", 3148 __func__); 3149 free(nvma, M_IXL); 3150 return (EINVAL); 3151 } 3152 3153 /* 3154 * Older versions of the NVM update tool don't set ifd_len to the size 3155 * of the entire buffer passed to the ioctl. Check the data_size field 3156 * in the contained i40e_nvm_access struct and ensure everything is 3157 * copied in from userspace. 3158 */ 3159 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */ 3160 3161 if (ifd_len < exp_len) { 3162 ifd_len = exp_len; 3163 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK); 3164 err = copyin(ifd->ifd_data, nvma, ifd_len); 3165 if (err) { 3166 device_printf(dev, "%s: Cannot get request from user space\n", 3167 __func__); 3168 free(nvma, M_IXL); 3169 return (err); 3170 } 3171 } 3172 3173 // TODO: Might need a different lock here 3174 // IXL_PF_LOCK(pf); 3175 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); 3176 // IXL_PF_UNLOCK(pf); 3177 3178 err = copyout(nvma, ifd->ifd_data, ifd_len); 3179 free(nvma, M_IXL); 3180 if (err) { 3181 device_printf(dev, "%s: Cannot return data to user space\n", 3182 __func__); 3183 return (err); 3184 } 3185 3186 /* Let the nvmupdate report errors, show them only when debug is enabled */ 3187 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) 3188 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", 3189 i40e_stat_str(hw, status), perrno); 3190 3191 /* 3192 * -EPERM is actually ERESTART, which the kernel interprets as it needing 3193 * to run this ioctl again. So use -EACCES for -EPERM instead. 3194 */ 3195 if (perrno == -EPERM) 3196 return (-EACCES); 3197 else 3198 return (perrno); 3199 } 3200 3201 int 3202 ixl_find_i2c_interface(struct ixl_pf *pf) 3203 { 3204 struct i40e_hw *hw = &pf->hw; 3205 bool i2c_en, port_matched; 3206 u32 reg; 3207 3208 for (int i = 0; i < 4; i++) { 3209 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); 3210 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); 3211 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) 3212 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) 3213 & BIT(hw->port); 3214 if (i2c_en && port_matched) 3215 return (i); 3216 } 3217 3218 return (-1); 3219 } 3220 3221 void 3222 ixl_set_link(struct ixl_pf *pf, bool enable) 3223 { 3224 struct i40e_hw *hw = &pf->hw; 3225 device_t dev = pf->dev; 3226 struct i40e_aq_get_phy_abilities_resp abilities; 3227 struct i40e_aq_set_phy_config config; 3228 enum i40e_status_code aq_error = 0; 3229 u32 phy_type, phy_type_ext; 3230 3231 /* Get initial capability information */ 3232 aq_error = i40e_aq_get_phy_capabilities(hw, 3233 FALSE, TRUE, &abilities, NULL); 3234 if (aq_error) { 3235 device_printf(dev, 3236 "%s: Error getting phy capabilities %d," 3237 " aq error: %d\n", __func__, aq_error, 3238 hw->aq.asq_last_status); 3239 return; 3240 } 3241 3242 phy_type = abilities.phy_type; 3243 phy_type_ext = abilities.phy_type_ext; 3244 3245 /* Get current capability information */ 3246 aq_error = i40e_aq_get_phy_capabilities(hw, 3247 FALSE, FALSE, &abilities, NULL); 3248 if (aq_error) { 3249 device_printf(dev, 3250 "%s: Error getting phy capabilities %d," 3251 " aq error: %d\n", __func__, aq_error, 3252 hw->aq.asq_last_status); 3253 return; 3254 } 3255 3256 /* Prepare new config */ 3257 memset(&config, 0, sizeof(config)); 3258 config.link_speed = abilities.link_speed; 3259 config.abilities = abilities.abilities; 3260 config.eee_capability = abilities.eee_capability; 3261 config.eeer = abilities.eeer_val; 3262 config.low_power_ctrl = abilities.d3_lpan; 3263 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 3264 & I40E_AQ_PHY_FEC_CONFIG_MASK; 3265 config.phy_type = 0; 3266 config.phy_type_ext = 0; 3267 3268 config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX | 3269 I40E_AQ_PHY_FLAG_PAUSE_RX); 3270 3271 switch (pf->fc) { 3272 case I40E_FC_FULL: 3273 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX | 3274 I40E_AQ_PHY_FLAG_PAUSE_RX; 3275 break; 3276 case I40E_FC_RX_PAUSE: 3277 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX; 3278 break; 3279 case I40E_FC_TX_PAUSE: 3280 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX; 3281 break; 3282 default: 3283 break; 3284 } 3285 3286 if (enable) { 3287 config.phy_type = phy_type; 3288 config.phy_type_ext = phy_type_ext; 3289 3290 } 3291 3292 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 3293 if (aq_error) { 3294 device_printf(dev, 3295 "%s: Error setting new phy config %d," 3296 " aq error: %d\n", __func__, aq_error, 3297 hw->aq.asq_last_status); 3298 return; 3299 } 3300 3301 aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL); 3302 if (aq_error) { 3303 device_printf(dev, 3304 "%s: Error set link config %d," 3305 " aq error: %d\n", __func__, aq_error, 3306 hw->aq.asq_last_status); 3307 return; 3308 } 3309 } 3310 3311 static char * 3312 ixl_phy_type_string(u32 bit_pos, bool ext) 3313 { 3314 static char * phy_types_str[32] = { 3315 "SGMII", 3316 "1000BASE-KX", 3317 "10GBASE-KX4", 3318 "10GBASE-KR", 3319 "40GBASE-KR4", 3320 "XAUI", 3321 "XFI", 3322 "SFI", 3323 "XLAUI", 3324 "XLPPI", 3325 "40GBASE-CR4", 3326 "10GBASE-CR1", 3327 "SFP+ Active DA", 3328 "QSFP+ Active DA", 3329 "Reserved (14)", 3330 "Reserved (15)", 3331 "Reserved (16)", 3332 "100BASE-TX", 3333 "1000BASE-T", 3334 "10GBASE-T", 3335 "10GBASE-SR", 3336 "10GBASE-LR", 3337 "10GBASE-SFP+Cu", 3338 "10GBASE-CR1", 3339 "40GBASE-CR4", 3340 "40GBASE-SR4", 3341 "40GBASE-LR4", 3342 "1000BASE-SX", 3343 "1000BASE-LX", 3344 "1000BASE-T Optical", 3345 "20GBASE-KR2", 3346 "Reserved (31)" 3347 }; 3348 static char * ext_phy_types_str[8] = { 3349 "25GBASE-KR", 3350 "25GBASE-CR", 3351 "25GBASE-SR", 3352 "25GBASE-LR", 3353 "25GBASE-AOC", 3354 "25GBASE-ACC", 3355 "2.5GBASE-T", 3356 "5GBASE-T" 3357 }; 3358 3359 if (ext && bit_pos > 7) return "Invalid_Ext"; 3360 if (bit_pos > 31) return "Invalid"; 3361 3362 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; 3363 } 3364 3365 /* TODO: ERJ: I don't this is necessary anymore. */ 3366 int 3367 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) 3368 { 3369 device_t dev = pf->dev; 3370 struct i40e_hw *hw = &pf->hw; 3371 struct i40e_aq_desc desc; 3372 enum i40e_status_code status; 3373 3374 struct i40e_aqc_get_link_status *aq_link_status = 3375 (struct i40e_aqc_get_link_status *)&desc.params.raw; 3376 3377 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 3378 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); 3379 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 3380 if (status) { 3381 device_printf(dev, 3382 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", 3383 __func__, i40e_stat_str(hw, status), 3384 i40e_aq_str(hw, hw->aq.asq_last_status)); 3385 return (EIO); 3386 } 3387 3388 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); 3389 return (0); 3390 } 3391 3392 static char * 3393 ixl_phy_type_string_ls(u8 val) 3394 { 3395 if (val >= 0x1F) 3396 return ixl_phy_type_string(val - 0x1F, true); 3397 else 3398 return ixl_phy_type_string(val, false); 3399 } 3400 3401 static int 3402 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) 3403 { 3404 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3405 device_t dev = pf->dev; 3406 struct sbuf *buf; 3407 int error = 0; 3408 3409 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3410 if (!buf) { 3411 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3412 return (ENOMEM); 3413 } 3414 3415 struct i40e_aqc_get_link_status link_status; 3416 error = ixl_aq_get_link_status(pf, &link_status); 3417 if (error) { 3418 sbuf_delete(buf); 3419 return (error); 3420 } 3421 3422 sbuf_printf(buf, "\n" 3423 "PHY Type : 0x%02x<%s>\n" 3424 "Speed : 0x%02x\n" 3425 "Link info: 0x%02x\n" 3426 "AN info : 0x%02x\n" 3427 "Ext info : 0x%02x\n" 3428 "Loopback : 0x%02x\n" 3429 "Max Frame: %d\n" 3430 "Config : 0x%02x\n" 3431 "Power : 0x%02x", 3432 link_status.phy_type, 3433 ixl_phy_type_string_ls(link_status.phy_type), 3434 link_status.link_speed, 3435 link_status.link_info, 3436 link_status.an_info, 3437 link_status.ext_info, 3438 link_status.loopback, 3439 link_status.max_frame_size, 3440 link_status.config, 3441 link_status.power_desc); 3442 3443 error = sbuf_finish(buf); 3444 if (error) 3445 device_printf(dev, "Error finishing sbuf: %d\n", error); 3446 3447 sbuf_delete(buf); 3448 return (error); 3449 } 3450 3451 static int 3452 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) 3453 { 3454 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3455 struct i40e_hw *hw = &pf->hw; 3456 device_t dev = pf->dev; 3457 enum i40e_status_code status; 3458 struct i40e_aq_get_phy_abilities_resp abilities; 3459 struct sbuf *buf; 3460 int error = 0; 3461 3462 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3463 if (!buf) { 3464 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3465 return (ENOMEM); 3466 } 3467 3468 status = i40e_aq_get_phy_capabilities(hw, 3469 FALSE, arg2 != 0, &abilities, NULL); 3470 if (status) { 3471 device_printf(dev, 3472 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 3473 __func__, i40e_stat_str(hw, status), 3474 i40e_aq_str(hw, hw->aq.asq_last_status)); 3475 sbuf_delete(buf); 3476 return (EIO); 3477 } 3478 3479 sbuf_printf(buf, "\n" 3480 "PHY Type : %08x", 3481 abilities.phy_type); 3482 3483 if (abilities.phy_type != 0) { 3484 sbuf_printf(buf, "<"); 3485 for (int i = 0; i < 32; i++) 3486 if ((1 << i) & abilities.phy_type) 3487 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); 3488 sbuf_printf(buf, ">"); 3489 } 3490 3491 sbuf_printf(buf, "\nPHY Ext : %02x", 3492 abilities.phy_type_ext); 3493 3494 if (abilities.phy_type_ext != 0) { 3495 sbuf_printf(buf, "<"); 3496 for (int i = 0; i < 4; i++) 3497 if ((1 << i) & abilities.phy_type_ext) 3498 sbuf_printf(buf, "%s,", 3499 ixl_phy_type_string(i, true)); 3500 sbuf_printf(buf, ">"); 3501 } 3502 3503 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed); 3504 if (abilities.link_speed != 0) { 3505 u8 link_speed; 3506 sbuf_printf(buf, " <"); 3507 for (int i = 0; i < 8; i++) { 3508 link_speed = (1 << i) & abilities.link_speed; 3509 if (link_speed) 3510 sbuf_printf(buf, "%s, ", 3511 ixl_link_speed_string(link_speed)); 3512 } 3513 sbuf_printf(buf, ">"); 3514 } 3515 3516 sbuf_printf(buf, "\n" 3517 "Abilities: %02x\n" 3518 "EEE cap : %04x\n" 3519 "EEER reg : %08x\n" 3520 "D3 Lpan : %02x\n" 3521 "ID : %02x %02x %02x %02x\n" 3522 "ModType : %02x %02x %02x\n" 3523 "ModType E: %01x\n" 3524 "FEC Cfg : %02x\n" 3525 "Ext CC : %02x", 3526 abilities.abilities, abilities.eee_capability, 3527 abilities.eeer_val, abilities.d3_lpan, 3528 abilities.phy_id[0], abilities.phy_id[1], 3529 abilities.phy_id[2], abilities.phy_id[3], 3530 abilities.module_type[0], abilities.module_type[1], 3531 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, 3532 abilities.fec_cfg_curr_mod_ext_info & 0x1F, 3533 abilities.ext_comp_code); 3534 3535 error = sbuf_finish(buf); 3536 if (error) 3537 device_printf(dev, "Error finishing sbuf: %d\n", error); 3538 3539 sbuf_delete(buf); 3540 return (error); 3541 } 3542 3543 static int 3544 ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS) 3545 { 3546 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3547 struct i40e_hw *hw = &pf->hw; 3548 device_t dev = pf->dev; 3549 struct sbuf *buf; 3550 int error = 0; 3551 3552 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3553 if (buf == NULL) { 3554 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3555 return (ENOMEM); 3556 } 3557 3558 if (hw->mac.type == I40E_MAC_X722) { 3559 sbuf_printf(buf, "\n" 3560 "PCS Link Control Register: unavailable\n" 3561 "PCS Link Status 1: unavailable\n" 3562 "PCS Link Status 2: unavailable\n" 3563 "XGMII FIFO Status: unavailable\n" 3564 "Auto-Negotiation (AN) Status: unavailable\n" 3565 "KR PCS Status: unavailable\n" 3566 "KR FEC Status 1 – FEC Correctable Blocks Counter: unavailable\n" 3567 "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: unavailable" 3568 ); 3569 } else { 3570 sbuf_printf(buf, "\n" 3571 "PCS Link Control Register: %#010X\n" 3572 "PCS Link Status 1: %#010X\n" 3573 "PCS Link Status 2: %#010X\n" 3574 "XGMII FIFO Status: %#010X\n" 3575 "Auto-Negotiation (AN) Status: %#010X\n" 3576 "KR PCS Status: %#010X\n" 3577 "KR FEC Status 1 – FEC Correctable Blocks Counter: %#010X\n" 3578 "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: %#010X", 3579 rd32(hw, I40E_PRTMAC_PCS_LINK_CTRL), 3580 rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS1(0)), 3581 rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS2), 3582 rd32(hw, I40E_PRTMAC_PCS_XGMII_FIFO_STATUS), 3583 rd32(hw, I40E_PRTMAC_PCS_AN_LP_STATUS), 3584 rd32(hw, I40E_PRTMAC_PCS_KR_STATUS), 3585 rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS1), 3586 rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS2) 3587 ); 3588 } 3589 3590 error = sbuf_finish(buf); 3591 if (error) 3592 device_printf(dev, "Error finishing sbuf: %d\n", error); 3593 3594 sbuf_delete(buf); 3595 return (error); 3596 } 3597 3598 static int 3599 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) 3600 { 3601 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3602 struct ixl_vsi *vsi = &pf->vsi; 3603 struct ixl_mac_filter *f; 3604 device_t dev = pf->dev; 3605 int error = 0, ftl_len = 0, ftl_counter = 0; 3606 3607 struct sbuf *buf; 3608 3609 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3610 if (!buf) { 3611 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3612 return (ENOMEM); 3613 } 3614 3615 sbuf_printf(buf, "\n"); 3616 3617 /* Print MAC filters */ 3618 sbuf_printf(buf, "PF Filters:\n"); 3619 LIST_FOREACH(f, &vsi->ftl, ftle) 3620 ftl_len++; 3621 3622 if (ftl_len < 1) 3623 sbuf_printf(buf, "(none)\n"); 3624 else { 3625 LIST_FOREACH(f, &vsi->ftl, ftle) { 3626 sbuf_printf(buf, 3627 MAC_FORMAT ", vlan %4d, flags %#06x", 3628 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3629 /* don't print '\n' for last entry */ 3630 if (++ftl_counter != ftl_len) 3631 sbuf_printf(buf, "\n"); 3632 } 3633 } 3634 3635 #ifdef PCI_IOV 3636 /* TODO: Give each VF its own filter list sysctl */ 3637 struct ixl_vf *vf; 3638 if (pf->num_vfs > 0) { 3639 sbuf_printf(buf, "\n\n"); 3640 for (int i = 0; i < pf->num_vfs; i++) { 3641 vf = &pf->vfs[i]; 3642 if (!(vf->vf_flags & VF_FLAG_ENABLED)) 3643 continue; 3644 3645 vsi = &vf->vsi; 3646 ftl_len = 0, ftl_counter = 0; 3647 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num); 3648 LIST_FOREACH(f, &vsi->ftl, ftle) 3649 ftl_len++; 3650 3651 if (ftl_len < 1) 3652 sbuf_printf(buf, "(none)\n"); 3653 else { 3654 LIST_FOREACH(f, &vsi->ftl, ftle) { 3655 sbuf_printf(buf, 3656 MAC_FORMAT ", vlan %4d, flags %#06x\n", 3657 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3658 } 3659 } 3660 } 3661 } 3662 #endif 3663 3664 error = sbuf_finish(buf); 3665 if (error) 3666 device_printf(dev, "Error finishing sbuf: %d\n", error); 3667 sbuf_delete(buf); 3668 3669 return (error); 3670 } 3671 3672 #define IXL_SW_RES_SIZE 0x14 3673 int 3674 ixl_res_alloc_cmp(const void *a, const void *b) 3675 { 3676 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; 3677 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; 3678 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; 3679 3680 return ((int)one->resource_type - (int)two->resource_type); 3681 } 3682 3683 /* 3684 * Longest string length: 25 3685 */ 3686 const char * 3687 ixl_switch_res_type_string(u8 type) 3688 { 3689 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = { 3690 "VEB", 3691 "VSI", 3692 "Perfect Match MAC address", 3693 "S-tag", 3694 "(Reserved)", 3695 "Multicast hash entry", 3696 "Unicast hash entry", 3697 "VLAN", 3698 "VSI List entry", 3699 "(Reserved)", 3700 "VLAN Statistic Pool", 3701 "Mirror Rule", 3702 "Queue Set", 3703 "Inner VLAN Forward filter", 3704 "(Reserved)", 3705 "Inner MAC", 3706 "IP", 3707 "GRE/VN1 Key", 3708 "VN2 Key", 3709 "Tunneling Port" 3710 }; 3711 3712 if (type < IXL_SW_RES_SIZE) 3713 return ixl_switch_res_type_strings[type]; 3714 else 3715 return "(Reserved)"; 3716 } 3717 3718 static int 3719 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) 3720 { 3721 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3722 struct i40e_hw *hw = &pf->hw; 3723 device_t dev = pf->dev; 3724 struct sbuf *buf; 3725 enum i40e_status_code status; 3726 int error = 0; 3727 3728 u8 num_entries; 3729 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; 3730 3731 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3732 if (!buf) { 3733 device_printf(dev, "Could not allocate sbuf for output.\n"); 3734 return (ENOMEM); 3735 } 3736 3737 bzero(resp, sizeof(resp)); 3738 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, 3739 resp, 3740 IXL_SW_RES_SIZE, 3741 NULL); 3742 if (status) { 3743 device_printf(dev, 3744 "%s: get_switch_resource_alloc() error %s, aq error %s\n", 3745 __func__, i40e_stat_str(hw, status), 3746 i40e_aq_str(hw, hw->aq.asq_last_status)); 3747 sbuf_delete(buf); 3748 return (error); 3749 } 3750 3751 /* Sort entries by type for display */ 3752 qsort(resp, num_entries, 3753 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), 3754 &ixl_res_alloc_cmp); 3755 3756 sbuf_cat(buf, "\n"); 3757 sbuf_printf(buf, "# of entries: %d\n", num_entries); 3758 sbuf_printf(buf, 3759 " Type | Guaranteed | Total | Used | Un-allocated\n" 3760 " | (this) | (all) | (this) | (all) \n"); 3761 for (int i = 0; i < num_entries; i++) { 3762 sbuf_printf(buf, 3763 "%25s | %10d %5d %6d %12d", 3764 ixl_switch_res_type_string(resp[i].resource_type), 3765 resp[i].guaranteed, 3766 resp[i].total, 3767 resp[i].used, 3768 resp[i].total_unalloced); 3769 if (i < num_entries - 1) 3770 sbuf_cat(buf, "\n"); 3771 } 3772 3773 error = sbuf_finish(buf); 3774 if (error) 3775 device_printf(dev, "Error finishing sbuf: %d\n", error); 3776 3777 sbuf_delete(buf); 3778 return (error); 3779 } 3780 3781 enum ixl_sw_seid_offset { 3782 IXL_SW_SEID_EMP = 1, 3783 IXL_SW_SEID_MAC_START = 2, 3784 IXL_SW_SEID_MAC_END = 5, 3785 IXL_SW_SEID_PF_START = 16, 3786 IXL_SW_SEID_PF_END = 31, 3787 IXL_SW_SEID_VF_START = 32, 3788 IXL_SW_SEID_VF_END = 159, 3789 }; 3790 3791 /* 3792 * Caller must init and delete sbuf; this function will clear and 3793 * finish it for caller. 3794 * 3795 * Note: The SEID argument only applies for elements defined by FW at 3796 * power-on; these include the EMP, Ports, PFs and VFs. 3797 */ 3798 static char * 3799 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid) 3800 { 3801 sbuf_clear(s); 3802 3803 /* If SEID is in certain ranges, then we can infer the 3804 * mapping of SEID to switch element. 3805 */ 3806 if (seid == IXL_SW_SEID_EMP) { 3807 sbuf_cat(s, "EMP"); 3808 goto out; 3809 } else if (seid >= IXL_SW_SEID_MAC_START && 3810 seid <= IXL_SW_SEID_MAC_END) { 3811 sbuf_printf(s, "MAC %2d", 3812 seid - IXL_SW_SEID_MAC_START); 3813 goto out; 3814 } else if (seid >= IXL_SW_SEID_PF_START && 3815 seid <= IXL_SW_SEID_PF_END) { 3816 sbuf_printf(s, "PF %3d", 3817 seid - IXL_SW_SEID_PF_START); 3818 goto out; 3819 } else if (seid >= IXL_SW_SEID_VF_START && 3820 seid <= IXL_SW_SEID_VF_END) { 3821 sbuf_printf(s, "VF %3d", 3822 seid - IXL_SW_SEID_VF_START); 3823 goto out; 3824 } 3825 3826 switch (element_type) { 3827 case I40E_AQ_SW_ELEM_TYPE_BMC: 3828 sbuf_cat(s, "BMC"); 3829 break; 3830 case I40E_AQ_SW_ELEM_TYPE_PV: 3831 sbuf_cat(s, "PV"); 3832 break; 3833 case I40E_AQ_SW_ELEM_TYPE_VEB: 3834 sbuf_cat(s, "VEB"); 3835 break; 3836 case I40E_AQ_SW_ELEM_TYPE_PA: 3837 sbuf_cat(s, "PA"); 3838 break; 3839 case I40E_AQ_SW_ELEM_TYPE_VSI: 3840 sbuf_printf(s, "VSI"); 3841 break; 3842 default: 3843 sbuf_cat(s, "?"); 3844 break; 3845 } 3846 3847 out: 3848 sbuf_finish(s); 3849 return sbuf_data(s); 3850 } 3851 3852 static int 3853 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b) 3854 { 3855 const struct i40e_aqc_switch_config_element_resp *one, *two; 3856 one = (const struct i40e_aqc_switch_config_element_resp *)a; 3857 two = (const struct i40e_aqc_switch_config_element_resp *)b; 3858 3859 return ((int)one->seid - (int)two->seid); 3860 } 3861 3862 static int 3863 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) 3864 { 3865 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3866 struct i40e_hw *hw = &pf->hw; 3867 device_t dev = pf->dev; 3868 struct sbuf *buf; 3869 struct sbuf *nmbuf; 3870 enum i40e_status_code status; 3871 int error = 0; 3872 u16 next = 0; 3873 u8 aq_buf[I40E_AQ_LARGE_BUF]; 3874 3875 struct i40e_aqc_switch_config_element_resp *elem; 3876 struct i40e_aqc_get_switch_config_resp *sw_config; 3877 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 3878 3879 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3880 if (!buf) { 3881 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3882 return (ENOMEM); 3883 } 3884 3885 status = i40e_aq_get_switch_config(hw, sw_config, 3886 sizeof(aq_buf), &next, NULL); 3887 if (status) { 3888 device_printf(dev, 3889 "%s: aq_get_switch_config() error %s, aq error %s\n", 3890 __func__, i40e_stat_str(hw, status), 3891 i40e_aq_str(hw, hw->aq.asq_last_status)); 3892 sbuf_delete(buf); 3893 return error; 3894 } 3895 if (next) 3896 device_printf(dev, "%s: TODO: get more config with SEID %d\n", 3897 __func__, next); 3898 3899 nmbuf = sbuf_new_auto(); 3900 if (!nmbuf) { 3901 device_printf(dev, "Could not allocate sbuf for name output.\n"); 3902 sbuf_delete(buf); 3903 return (ENOMEM); 3904 } 3905 3906 /* Sort entries by SEID for display */ 3907 qsort(sw_config->element, sw_config->header.num_reported, 3908 sizeof(struct i40e_aqc_switch_config_element_resp), 3909 &ixl_sw_cfg_elem_seid_cmp); 3910 3911 sbuf_cat(buf, "\n"); 3912 /* Assuming <= 255 elements in switch */ 3913 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); 3914 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); 3915 /* Exclude: 3916 * Revision -- all elements are revision 1 for now 3917 */ 3918 sbuf_printf(buf, 3919 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n" 3920 " | | | (uplink)\n"); 3921 for (int i = 0; i < sw_config->header.num_reported; i++) { 3922 elem = &sw_config->element[i]; 3923 3924 // "%4d (%8s) | %8s %8s %#8x", 3925 sbuf_printf(buf, "%4d", elem->seid); 3926 sbuf_cat(buf, " "); 3927 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3928 elem->element_type, elem->seid)); 3929 sbuf_cat(buf, " | "); 3930 sbuf_printf(buf, "%4d", elem->uplink_seid); 3931 sbuf_cat(buf, " "); 3932 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3933 0, elem->uplink_seid)); 3934 sbuf_cat(buf, " | "); 3935 sbuf_printf(buf, "%4d", elem->downlink_seid); 3936 sbuf_cat(buf, " "); 3937 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3938 0, elem->downlink_seid)); 3939 sbuf_cat(buf, " | "); 3940 sbuf_printf(buf, "%8d", elem->connection_type); 3941 if (i < sw_config->header.num_reported - 1) 3942 sbuf_cat(buf, "\n"); 3943 } 3944 sbuf_delete(nmbuf); 3945 3946 error = sbuf_finish(buf); 3947 if (error) 3948 device_printf(dev, "Error finishing sbuf: %d\n", error); 3949 3950 sbuf_delete(buf); 3951 3952 return (error); 3953 } 3954 3955 static int 3956 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS) 3957 { 3958 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3959 struct i40e_hw *hw = &pf->hw; 3960 device_t dev = pf->dev; 3961 int requested_vlan = -1; 3962 enum i40e_status_code status = 0; 3963 int error = 0; 3964 3965 error = sysctl_handle_int(oidp, &requested_vlan, 0, req); 3966 if ((error) || (req->newptr == NULL)) 3967 return (error); 3968 3969 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) { 3970 device_printf(dev, "Flags disallow setting of vlans\n"); 3971 return (ENODEV); 3972 } 3973 3974 hw->switch_tag = requested_vlan; 3975 device_printf(dev, 3976 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n", 3977 hw->switch_tag, hw->first_tag, hw->second_tag); 3978 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL); 3979 if (status) { 3980 device_printf(dev, 3981 "%s: aq_set_switch_config() error %s, aq error %s\n", 3982 __func__, i40e_stat_str(hw, status), 3983 i40e_aq_str(hw, hw->aq.asq_last_status)); 3984 return (status); 3985 } 3986 return (0); 3987 } 3988 3989 static int 3990 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) 3991 { 3992 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3993 struct i40e_hw *hw = &pf->hw; 3994 device_t dev = pf->dev; 3995 struct sbuf *buf; 3996 int error = 0; 3997 enum i40e_status_code status; 3998 u32 reg; 3999 4000 struct i40e_aqc_get_set_rss_key_data key_data; 4001 4002 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4003 if (!buf) { 4004 device_printf(dev, "Could not allocate sbuf for output.\n"); 4005 return (ENOMEM); 4006 } 4007 4008 bzero(&key_data, sizeof(key_data)); 4009 4010 sbuf_cat(buf, "\n"); 4011 if (hw->mac.type == I40E_MAC_X722) { 4012 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); 4013 if (status) 4014 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", 4015 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 4016 } else { 4017 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 4018 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 4019 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); 4020 } 4021 } 4022 4023 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); 4024 4025 error = sbuf_finish(buf); 4026 if (error) 4027 device_printf(dev, "Error finishing sbuf: %d\n", error); 4028 sbuf_delete(buf); 4029 4030 return (error); 4031 } 4032 4033 static void 4034 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) 4035 { 4036 int i, j, k, width; 4037 char c; 4038 4039 if (length < 1 || buf == NULL) return; 4040 4041 int byte_stride = 16; 4042 int lines = length / byte_stride; 4043 int rem = length % byte_stride; 4044 if (rem > 0) 4045 lines++; 4046 4047 for (i = 0; i < lines; i++) { 4048 width = (rem > 0 && i == lines - 1) 4049 ? rem : byte_stride; 4050 4051 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); 4052 4053 for (j = 0; j < width; j++) 4054 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); 4055 4056 if (width < byte_stride) { 4057 for (k = 0; k < (byte_stride - width); k++) 4058 sbuf_printf(sb, " "); 4059 } 4060 4061 if (!text) { 4062 sbuf_printf(sb, "\n"); 4063 continue; 4064 } 4065 4066 for (j = 0; j < width; j++) { 4067 c = (char)buf[i * byte_stride + j]; 4068 if (c < 32 || c > 126) 4069 sbuf_printf(sb, "."); 4070 else 4071 sbuf_printf(sb, "%c", c); 4072 4073 if (j == width - 1) 4074 sbuf_printf(sb, "\n"); 4075 } 4076 } 4077 } 4078 4079 static int 4080 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) 4081 { 4082 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4083 struct i40e_hw *hw = &pf->hw; 4084 device_t dev = pf->dev; 4085 struct sbuf *buf; 4086 int error = 0; 4087 enum i40e_status_code status; 4088 u8 hlut[512]; 4089 u32 reg; 4090 4091 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4092 if (!buf) { 4093 device_printf(dev, "Could not allocate sbuf for output.\n"); 4094 return (ENOMEM); 4095 } 4096 4097 bzero(hlut, sizeof(hlut)); 4098 sbuf_cat(buf, "\n"); 4099 if (hw->mac.type == I40E_MAC_X722) { 4100 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); 4101 if (status) 4102 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", 4103 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 4104 } else { 4105 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { 4106 reg = rd32(hw, I40E_PFQF_HLUT(i)); 4107 bcopy(®, &hlut[i << 2], 4); 4108 } 4109 } 4110 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); 4111 4112 error = sbuf_finish(buf); 4113 if (error) 4114 device_printf(dev, "Error finishing sbuf: %d\n", error); 4115 sbuf_delete(buf); 4116 4117 return (error); 4118 } 4119 4120 static int 4121 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) 4122 { 4123 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4124 struct i40e_hw *hw = &pf->hw; 4125 u64 hena; 4126 4127 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 4128 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 4129 4130 return sysctl_handle_long(oidp, NULL, hena, req); 4131 } 4132 4133 /* 4134 * Sysctl to disable firmware's link management 4135 * 4136 * 1 - Disable link management on this port 4137 * 0 - Re-enable link management 4138 * 4139 * On normal NVMs, firmware manages link by default. 4140 */ 4141 static int 4142 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) 4143 { 4144 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4145 struct i40e_hw *hw = &pf->hw; 4146 device_t dev = pf->dev; 4147 int requested_mode = -1; 4148 enum i40e_status_code status = 0; 4149 int error = 0; 4150 4151 /* Read in new mode */ 4152 error = sysctl_handle_int(oidp, &requested_mode, 0, req); 4153 if ((error) || (req->newptr == NULL)) 4154 return (error); 4155 /* Check for sane value */ 4156 if (requested_mode < 0 || requested_mode > 1) { 4157 device_printf(dev, "Valid modes are 0 or 1\n"); 4158 return (EINVAL); 4159 } 4160 4161 /* Set new mode */ 4162 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); 4163 if (status) { 4164 device_printf(dev, 4165 "%s: Error setting new phy debug mode %s," 4166 " aq error: %s\n", __func__, i40e_stat_str(hw, status), 4167 i40e_aq_str(hw, hw->aq.asq_last_status)); 4168 return (EIO); 4169 } 4170 4171 return (0); 4172 } 4173 4174 /* 4175 * Read some diagnostic data from a (Q)SFP+ module 4176 * 4177 * SFP A2 QSFP Lower Page 4178 * Temperature 96-97 22-23 4179 * Vcc 98-99 26-27 4180 * TX power 102-103 34-35..40-41 4181 * RX power 104-105 50-51..56-57 4182 */ 4183 static int 4184 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 4185 { 4186 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4187 device_t dev = pf->dev; 4188 struct sbuf *sbuf; 4189 int error = 0; 4190 u8 output; 4191 4192 if (req->oldptr == NULL) { 4193 error = SYSCTL_OUT(req, 0, 128); 4194 return (0); 4195 } 4196 4197 error = pf->read_i2c_byte(pf, 0, 0xA0, &output); 4198 if (error) { 4199 device_printf(dev, "Error reading from i2c\n"); 4200 return (error); 4201 } 4202 4203 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 4204 if (output == 0x3) { 4205 /* 4206 * Check for: 4207 * - Internally calibrated data 4208 * - Diagnostic monitoring is implemented 4209 */ 4210 pf->read_i2c_byte(pf, 92, 0xA0, &output); 4211 if (!(output & 0x60)) { 4212 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); 4213 return (0); 4214 } 4215 4216 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4217 4218 for (u8 offset = 96; offset < 100; offset++) { 4219 pf->read_i2c_byte(pf, offset, 0xA2, &output); 4220 sbuf_printf(sbuf, "%02X ", output); 4221 } 4222 for (u8 offset = 102; offset < 106; offset++) { 4223 pf->read_i2c_byte(pf, offset, 0xA2, &output); 4224 sbuf_printf(sbuf, "%02X ", output); 4225 } 4226 } else if (output == 0xD || output == 0x11) { 4227 /* 4228 * QSFP+ modules are always internally calibrated, and must indicate 4229 * what types of diagnostic monitoring are implemented 4230 */ 4231 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4232 4233 for (u8 offset = 22; offset < 24; offset++) { 4234 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4235 sbuf_printf(sbuf, "%02X ", output); 4236 } 4237 for (u8 offset = 26; offset < 28; offset++) { 4238 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4239 sbuf_printf(sbuf, "%02X ", output); 4240 } 4241 /* Read the data from the first lane */ 4242 for (u8 offset = 34; offset < 36; offset++) { 4243 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4244 sbuf_printf(sbuf, "%02X ", output); 4245 } 4246 for (u8 offset = 50; offset < 52; offset++) { 4247 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4248 sbuf_printf(sbuf, "%02X ", output); 4249 } 4250 } else { 4251 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output); 4252 return (0); 4253 } 4254 4255 sbuf_finish(sbuf); 4256 sbuf_delete(sbuf); 4257 4258 return (0); 4259 } 4260 4261 /* 4262 * Sysctl to read a byte from I2C bus. 4263 * 4264 * Input: 32-bit value: 4265 * bits 0-7: device address (0xA0 or 0xA2) 4266 * bits 8-15: offset (0-255) 4267 * bits 16-31: unused 4268 * Output: 8-bit value read 4269 */ 4270 static int 4271 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) 4272 { 4273 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4274 device_t dev = pf->dev; 4275 int input = -1, error = 0; 4276 u8 dev_addr, offset, output; 4277 4278 /* Read in I2C read parameters */ 4279 error = sysctl_handle_int(oidp, &input, 0, req); 4280 if ((error) || (req->newptr == NULL)) 4281 return (error); 4282 /* Validate device address */ 4283 dev_addr = input & 0xFF; 4284 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4285 return (EINVAL); 4286 } 4287 offset = (input >> 8) & 0xFF; 4288 4289 error = pf->read_i2c_byte(pf, offset, dev_addr, &output); 4290 if (error) 4291 return (error); 4292 4293 device_printf(dev, "%02X\n", output); 4294 return (0); 4295 } 4296 4297 /* 4298 * Sysctl to write a byte to the I2C bus. 4299 * 4300 * Input: 32-bit value: 4301 * bits 0-7: device address (0xA0 or 0xA2) 4302 * bits 8-15: offset (0-255) 4303 * bits 16-23: value to write 4304 * bits 24-31: unused 4305 * Output: 8-bit value written 4306 */ 4307 static int 4308 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) 4309 { 4310 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4311 device_t dev = pf->dev; 4312 int input = -1, error = 0; 4313 u8 dev_addr, offset, value; 4314 4315 /* Read in I2C write parameters */ 4316 error = sysctl_handle_int(oidp, &input, 0, req); 4317 if ((error) || (req->newptr == NULL)) 4318 return (error); 4319 /* Validate device address */ 4320 dev_addr = input & 0xFF; 4321 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4322 return (EINVAL); 4323 } 4324 offset = (input >> 8) & 0xFF; 4325 value = (input >> 16) & 0xFF; 4326 4327 error = pf->write_i2c_byte(pf, offset, dev_addr, value); 4328 if (error) 4329 return (error); 4330 4331 device_printf(dev, "%02X written\n", value); 4332 return (0); 4333 } 4334 4335 static int 4336 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4337 u8 bit_pos, int *is_set) 4338 { 4339 device_t dev = pf->dev; 4340 struct i40e_hw *hw = &pf->hw; 4341 enum i40e_status_code status; 4342 4343 if (IXL_PF_IN_RECOVERY_MODE(pf)) 4344 return (EIO); 4345 4346 status = i40e_aq_get_phy_capabilities(hw, 4347 FALSE, FALSE, abilities, NULL); 4348 if (status) { 4349 device_printf(dev, 4350 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 4351 __func__, i40e_stat_str(hw, status), 4352 i40e_aq_str(hw, hw->aq.asq_last_status)); 4353 return (EIO); 4354 } 4355 4356 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); 4357 return (0); 4358 } 4359 4360 static int 4361 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4362 u8 bit_pos, int set) 4363 { 4364 device_t dev = pf->dev; 4365 struct i40e_hw *hw = &pf->hw; 4366 struct i40e_aq_set_phy_config config; 4367 enum i40e_status_code status; 4368 4369 /* Set new PHY config */ 4370 memset(&config, 0, sizeof(config)); 4371 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); 4372 if (set) 4373 config.fec_config |= bit_pos; 4374 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { 4375 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 4376 config.phy_type = abilities->phy_type; 4377 config.phy_type_ext = abilities->phy_type_ext; 4378 config.link_speed = abilities->link_speed; 4379 config.eee_capability = abilities->eee_capability; 4380 config.eeer = abilities->eeer_val; 4381 config.low_power_ctrl = abilities->d3_lpan; 4382 status = i40e_aq_set_phy_config(hw, &config, NULL); 4383 4384 if (status) { 4385 device_printf(dev, 4386 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", 4387 __func__, i40e_stat_str(hw, status), 4388 i40e_aq_str(hw, hw->aq.asq_last_status)); 4389 return (EIO); 4390 } 4391 } 4392 4393 return (0); 4394 } 4395 4396 static int 4397 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) 4398 { 4399 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4400 int mode, error = 0; 4401 4402 struct i40e_aq_get_phy_abilities_resp abilities; 4403 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); 4404 if (error) 4405 return (error); 4406 /* Read in new mode */ 4407 error = sysctl_handle_int(oidp, &mode, 0, req); 4408 if ((error) || (req->newptr == NULL)) 4409 return (error); 4410 4411 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); 4412 } 4413 4414 static int 4415 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) 4416 { 4417 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4418 int mode, error = 0; 4419 4420 struct i40e_aq_get_phy_abilities_resp abilities; 4421 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); 4422 if (error) 4423 return (error); 4424 /* Read in new mode */ 4425 error = sysctl_handle_int(oidp, &mode, 0, req); 4426 if ((error) || (req->newptr == NULL)) 4427 return (error); 4428 4429 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); 4430 } 4431 4432 static int 4433 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) 4434 { 4435 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4436 int mode, error = 0; 4437 4438 struct i40e_aq_get_phy_abilities_resp abilities; 4439 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); 4440 if (error) 4441 return (error); 4442 /* Read in new mode */ 4443 error = sysctl_handle_int(oidp, &mode, 0, req); 4444 if ((error) || (req->newptr == NULL)) 4445 return (error); 4446 4447 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); 4448 } 4449 4450 static int 4451 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) 4452 { 4453 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4454 int mode, error = 0; 4455 4456 struct i40e_aq_get_phy_abilities_resp abilities; 4457 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); 4458 if (error) 4459 return (error); 4460 /* Read in new mode */ 4461 error = sysctl_handle_int(oidp, &mode, 0, req); 4462 if ((error) || (req->newptr == NULL)) 4463 return (error); 4464 4465 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); 4466 } 4467 4468 static int 4469 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) 4470 { 4471 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4472 int mode, error = 0; 4473 4474 struct i40e_aq_get_phy_abilities_resp abilities; 4475 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); 4476 if (error) 4477 return (error); 4478 /* Read in new mode */ 4479 error = sysctl_handle_int(oidp, &mode, 0, req); 4480 if ((error) || (req->newptr == NULL)) 4481 return (error); 4482 4483 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); 4484 } 4485 4486 static int 4487 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) 4488 { 4489 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4490 struct i40e_hw *hw = &pf->hw; 4491 device_t dev = pf->dev; 4492 struct sbuf *buf; 4493 int error = 0; 4494 enum i40e_status_code status; 4495 4496 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4497 if (!buf) { 4498 device_printf(dev, "Could not allocate sbuf for output.\n"); 4499 return (ENOMEM); 4500 } 4501 4502 u8 *final_buff; 4503 /* This amount is only necessary if reading the entire cluster into memory */ 4504 #define IXL_FINAL_BUFF_SIZE (1280 * 1024) 4505 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT); 4506 if (final_buff == NULL) { 4507 device_printf(dev, "Could not allocate memory for output.\n"); 4508 goto out; 4509 } 4510 int final_buff_len = 0; 4511 4512 u8 cluster_id = 1; 4513 bool more = true; 4514 4515 u8 dump_buf[4096]; 4516 u16 curr_buff_size = 4096; 4517 u8 curr_next_table = 0; 4518 u32 curr_next_index = 0; 4519 4520 u16 ret_buff_size; 4521 u8 ret_next_table; 4522 u32 ret_next_index; 4523 4524 sbuf_cat(buf, "\n"); 4525 4526 while (more) { 4527 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, 4528 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); 4529 if (status) { 4530 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", 4531 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 4532 goto free_out; 4533 } 4534 4535 /* copy info out of temp buffer */ 4536 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); 4537 final_buff_len += ret_buff_size; 4538 4539 if (ret_next_table != curr_next_table) { 4540 /* We're done with the current table; we can dump out read data. */ 4541 sbuf_printf(buf, "%d:", curr_next_table); 4542 int bytes_printed = 0; 4543 while (bytes_printed <= final_buff_len) { 4544 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); 4545 bytes_printed += 16; 4546 } 4547 sbuf_cat(buf, "\n"); 4548 4549 /* The entire cluster has been read; we're finished */ 4550 if (ret_next_table == 0xFF) 4551 break; 4552 4553 /* Otherwise clear the output buffer and continue reading */ 4554 bzero(final_buff, IXL_FINAL_BUFF_SIZE); 4555 final_buff_len = 0; 4556 } 4557 4558 if (ret_next_index == 0xFFFFFFFF) 4559 ret_next_index = 0; 4560 4561 bzero(dump_buf, sizeof(dump_buf)); 4562 curr_next_table = ret_next_table; 4563 curr_next_index = ret_next_index; 4564 } 4565 4566 free_out: 4567 free(final_buff, M_IXL); 4568 out: 4569 error = sbuf_finish(buf); 4570 if (error) 4571 device_printf(dev, "Error finishing sbuf: %d\n", error); 4572 sbuf_delete(buf); 4573 4574 return (error); 4575 } 4576 4577 static int 4578 ixl_start_fw_lldp(struct ixl_pf *pf) 4579 { 4580 struct i40e_hw *hw = &pf->hw; 4581 enum i40e_status_code status; 4582 4583 status = i40e_aq_start_lldp(hw, false, NULL); 4584 if (status != I40E_SUCCESS) { 4585 switch (hw->aq.asq_last_status) { 4586 case I40E_AQ_RC_EEXIST: 4587 device_printf(pf->dev, 4588 "FW LLDP agent is already running\n"); 4589 break; 4590 case I40E_AQ_RC_EPERM: 4591 device_printf(pf->dev, 4592 "Device configuration forbids SW from starting " 4593 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII " 4594 "attribute to \"Enabled\" to use this sysctl\n"); 4595 return (EINVAL); 4596 default: 4597 device_printf(pf->dev, 4598 "Starting FW LLDP agent failed: error: %s, %s\n", 4599 i40e_stat_str(hw, status), 4600 i40e_aq_str(hw, hw->aq.asq_last_status)); 4601 return (EINVAL); 4602 } 4603 } 4604 4605 ixl_clear_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED); 4606 return (0); 4607 } 4608 4609 static int 4610 ixl_stop_fw_lldp(struct ixl_pf *pf) 4611 { 4612 struct i40e_hw *hw = &pf->hw; 4613 device_t dev = pf->dev; 4614 enum i40e_status_code status; 4615 4616 if (hw->func_caps.npar_enable != 0) { 4617 device_printf(dev, 4618 "Disabling FW LLDP agent is not supported on this device\n"); 4619 return (EINVAL); 4620 } 4621 4622 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) { 4623 device_printf(dev, 4624 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); 4625 return (EINVAL); 4626 } 4627 4628 status = i40e_aq_stop_lldp(hw, true, false, NULL); 4629 if (status != I40E_SUCCESS) { 4630 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) { 4631 device_printf(dev, 4632 "Disabling FW LLDP agent failed: error: %s, %s\n", 4633 i40e_stat_str(hw, status), 4634 i40e_aq_str(hw, hw->aq.asq_last_status)); 4635 return (EINVAL); 4636 } 4637 4638 device_printf(dev, "FW LLDP agent is already stopped\n"); 4639 } 4640 4641 i40e_aq_set_dcb_parameters(hw, true, NULL); 4642 ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED); 4643 return (0); 4644 } 4645 4646 static int 4647 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) 4648 { 4649 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4650 int state, new_state, error = 0; 4651 4652 state = new_state = !ixl_test_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED); 4653 4654 /* Read in new mode */ 4655 error = sysctl_handle_int(oidp, &new_state, 0, req); 4656 if ((error) || (req->newptr == NULL)) 4657 return (error); 4658 4659 /* Already in requested state */ 4660 if (new_state == state) 4661 return (error); 4662 4663 if (new_state == 0) 4664 return ixl_stop_fw_lldp(pf); 4665 4666 return ixl_start_fw_lldp(pf); 4667 } 4668 4669 static int 4670 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 4671 { 4672 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4673 int state, new_state; 4674 int sysctl_handle_status = 0; 4675 enum i40e_status_code cmd_status; 4676 4677 /* Init states' values */ 4678 state = new_state = ixl_test_state(&pf->state, IXL_STATE_EEE_ENABLED); 4679 4680 /* Get requested mode */ 4681 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req); 4682 if ((sysctl_handle_status) || (req->newptr == NULL)) 4683 return (sysctl_handle_status); 4684 4685 /* Check if state has changed */ 4686 if (new_state == state) 4687 return (0); 4688 4689 /* Set new state */ 4690 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state)); 4691 4692 /* Save new state or report error */ 4693 if (!cmd_status) { 4694 if (new_state == 0) 4695 ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED); 4696 else 4697 ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED); 4698 } else if (cmd_status == I40E_ERR_CONFIG) 4699 return (EPERM); 4700 else 4701 return (EIO); 4702 4703 return (0); 4704 } 4705 4706 static int 4707 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS) 4708 { 4709 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4710 int error, state; 4711 4712 state = ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN); 4713 4714 error = sysctl_handle_int(oidp, &state, 0, req); 4715 if ((error) || (req->newptr == NULL)) 4716 return (error); 4717 4718 if (state == 0) 4719 ixl_clear_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN); 4720 else 4721 ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN); 4722 4723 return (0); 4724 } 4725 4726 4727 int 4728 ixl_attach_get_link_status(struct ixl_pf *pf) 4729 { 4730 struct i40e_hw *hw = &pf->hw; 4731 device_t dev = pf->dev; 4732 enum i40e_status_code status; 4733 4734 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 4735 (hw->aq.fw_maj_ver < 4)) { 4736 i40e_msec_delay(75); 4737 status = i40e_aq_set_link_restart_an(hw, TRUE, NULL); 4738 if (status != I40E_SUCCESS) { 4739 device_printf(dev, 4740 "%s link restart failed status: %s, aq_err=%s\n", 4741 __func__, i40e_stat_str(hw, status), 4742 i40e_aq_str(hw, hw->aq.asq_last_status)); 4743 return (EINVAL); 4744 } 4745 } 4746 4747 /* Determine link state */ 4748 hw->phy.get_link_info = TRUE; 4749 status = i40e_get_link_status(hw, &pf->link_up); 4750 if (status != I40E_SUCCESS) { 4751 device_printf(dev, 4752 "%s get link status, status: %s aq_err=%s\n", 4753 __func__, i40e_stat_str(hw, status), 4754 i40e_aq_str(hw, hw->aq.asq_last_status)); 4755 /* 4756 * Most probably FW has not finished configuring PHY. 4757 * Retry periodically in a timer callback. 4758 */ 4759 ixl_set_state(&pf->state, IXL_STATE_LINK_POLLING); 4760 pf->link_poll_start = getsbinuptime(); 4761 return (EAGAIN); 4762 } 4763 ixl_dbg_link(pf, "%s link_up: %d\n", __func__, pf->link_up); 4764 4765 /* Flow Control mode not set by user, read current FW settings */ 4766 if (pf->fc == -1) 4767 pf->fc = hw->fc.current_mode; 4768 4769 return (0); 4770 } 4771 4772 static int 4773 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS) 4774 { 4775 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4776 int requested = 0, error = 0; 4777 4778 /* Read in new mode */ 4779 error = sysctl_handle_int(oidp, &requested, 0, req); 4780 if ((error) || (req->newptr == NULL)) 4781 return (error); 4782 4783 /* Initiate the PF reset later in the admin task */ 4784 ixl_set_state(&pf->state, IXL_STATE_PF_RESET_REQ); 4785 4786 return (error); 4787 } 4788 4789 static int 4790 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS) 4791 { 4792 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4793 struct i40e_hw *hw = &pf->hw; 4794 int requested = 0, error = 0; 4795 4796 /* Read in new mode */ 4797 error = sysctl_handle_int(oidp, &requested, 0, req); 4798 if ((error) || (req->newptr == NULL)) 4799 return (error); 4800 4801 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 4802 4803 return (error); 4804 } 4805 4806 static int 4807 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS) 4808 { 4809 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4810 struct i40e_hw *hw = &pf->hw; 4811 int requested = 0, error = 0; 4812 4813 /* Read in new mode */ 4814 error = sysctl_handle_int(oidp, &requested, 0, req); 4815 if ((error) || (req->newptr == NULL)) 4816 return (error); 4817 4818 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK); 4819 4820 return (error); 4821 } 4822 4823 /* 4824 * Print out mapping of TX queue indexes and Rx queue indexes 4825 * to MSI-X vectors. 4826 */ 4827 static int 4828 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) 4829 { 4830 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4831 struct ixl_vsi *vsi = &pf->vsi; 4832 struct i40e_hw *hw = vsi->hw; 4833 device_t dev = pf->dev; 4834 struct sbuf *buf; 4835 int error = 0; 4836 4837 struct ixl_rx_queue *rx_que = vsi->rx_queues; 4838 struct ixl_tx_queue *tx_que = vsi->tx_queues; 4839 4840 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4841 if (!buf) { 4842 device_printf(dev, "Could not allocate sbuf for output.\n"); 4843 return (ENOMEM); 4844 } 4845 4846 sbuf_cat(buf, "\n"); 4847 for (int i = 0; i < vsi->num_rx_queues; i++) { 4848 rx_que = &vsi->rx_queues[i]; 4849 sbuf_printf(buf, 4850 "(rxq %3d): %d LNKLSTN: %08x QINT_RQCTL: %08x\n", 4851 i, rx_que->msix, 4852 rd32(hw, I40E_PFINT_LNKLSTN(rx_que->msix - 1)), 4853 rd32(hw, I40E_QINT_RQCTL(rx_que->msix - 1))); 4854 } 4855 for (int i = 0; i < vsi->num_tx_queues; i++) { 4856 tx_que = &vsi->tx_queues[i]; 4857 sbuf_printf(buf, "(txq %3d): %d QINT_TQCTL: %08x\n", 4858 i, tx_que->msix, 4859 rd32(hw, I40E_QINT_TQCTL(tx_que->msix - 1))); 4860 } 4861 4862 error = sbuf_finish(buf); 4863 if (error) 4864 device_printf(dev, "Error finishing sbuf: %d\n", error); 4865 sbuf_delete(buf); 4866 4867 return (error); 4868 } 4869 4870 static int 4871 ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS) 4872 { 4873 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4874 struct ixl_vsi *vsi = &pf->vsi; 4875 struct i40e_hw *hw = vsi->hw; 4876 device_t dev = pf->dev; 4877 struct sbuf *buf; 4878 int error = 0; 4879 4880 struct ixl_rx_queue *rx_que = vsi->rx_queues; 4881 4882 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4883 if (!buf) { 4884 device_printf(dev, "Could not allocate sbuf for output.\n"); 4885 return (ENOMEM); 4886 } 4887 4888 sbuf_cat(buf, "\n"); 4889 for (int i = 0; i < vsi->num_rx_queues; i++) { 4890 rx_que = &vsi->rx_queues[i]; 4891 sbuf_printf(buf, 4892 "(rxq %3d): %d PFINT_DYN_CTLN: %08x\n", 4893 i, rx_que->msix, 4894 rd32(hw, I40E_PFINT_DYN_CTLN(rx_que->msix - 1))); 4895 } 4896 4897 error = sbuf_finish(buf); 4898 if (error) 4899 device_printf(dev, "Error finishing sbuf: %d\n", error); 4900 sbuf_delete(buf); 4901 4902 return (error); 4903 } 4904