1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "ixl_pf.h" 37 38 #ifdef PCI_IOV 39 #include "ixl_pf_iov.h" 40 #endif 41 42 #ifdef IXL_IW 43 #include "ixl_iw.h" 44 #include "ixl_iw_int.h" 45 #endif 46 47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); 48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); 49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed); 50 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int); 51 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int); 52 static char * ixl_switch_element_string(struct sbuf *, u8, u16); 53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *); 54 55 /* Sysctls */ 56 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); 57 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); 58 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 59 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 60 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); 61 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); 62 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); 63 64 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 65 static int ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS); 66 67 /* Debug Sysctls */ 68 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); 69 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); 70 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); 71 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); 72 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); 73 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS); 74 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); 75 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); 76 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); 77 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); 78 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); 79 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); 80 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); 81 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); 82 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); 83 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); 84 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); 85 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); 86 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); 87 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 88 89 /* Debug Sysctls */ 90 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS); 91 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS); 92 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS); 93 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); 94 #ifdef IXL_DEBUG 95 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); 96 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); 97 #endif 98 99 #ifdef IXL_IW 100 extern int ixl_enable_iwarp; 101 extern int ixl_limit_iwarp_msix; 102 #endif 103 104 static const char * const ixl_fc_string[6] = { 105 "None", 106 "Rx", 107 "Tx", 108 "Full", 109 "Priority", 110 "Default" 111 }; 112 113 static char *ixl_fec_string[3] = { 114 "CL108 RS-FEC", 115 "CL74 FC-FEC/BASE-R", 116 "None" 117 }; 118 119 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); 120 121 /* 122 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string 123 */ 124 void 125 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) 126 { 127 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); 128 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); 129 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); 130 131 sbuf_printf(buf, 132 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", 133 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 134 hw->aq.api_maj_ver, hw->aq.api_min_ver, 135 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> 136 IXL_NVM_VERSION_HI_SHIFT, 137 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> 138 IXL_NVM_VERSION_LO_SHIFT, 139 hw->nvm.eetrack, 140 oem_ver, oem_build, oem_patch); 141 } 142 143 void 144 ixl_print_nvm_version(struct ixl_pf *pf) 145 { 146 struct i40e_hw *hw = &pf->hw; 147 device_t dev = pf->dev; 148 struct sbuf *sbuf; 149 150 sbuf = sbuf_new_auto(); 151 ixl_nvm_version_str(hw, sbuf); 152 sbuf_finish(sbuf); 153 device_printf(dev, "%s\n", sbuf_data(sbuf)); 154 sbuf_delete(sbuf); 155 } 156 157 /** 158 * ixl_get_fw_mode - Check the state of FW 159 * @hw: device hardware structure 160 * 161 * Identify state of FW. It might be in a recovery mode 162 * which limits functionality and requires special handling 163 * from the driver. 164 * 165 * @returns FW mode (normal, recovery, unexpected EMP reset) 166 */ 167 static enum ixl_fw_mode 168 ixl_get_fw_mode(struct ixl_pf *pf) 169 { 170 struct i40e_hw *hw = &pf->hw; 171 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL; 172 u32 fwsts; 173 174 #ifdef IXL_DEBUG 175 if (pf->recovery_mode) 176 return IXL_FW_MODE_RECOVERY; 177 #endif 178 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; 179 180 /* Is set and has one of expected values */ 181 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK && 182 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) || 183 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || 184 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK) 185 fw_mode = IXL_FW_MODE_RECOVERY; 186 else { 187 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 && 188 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10) 189 fw_mode = IXL_FW_MODE_UEMPR; 190 } 191 return (fw_mode); 192 } 193 194 /** 195 * ixl_pf_reset - Reset the PF 196 * @pf: PF structure 197 * 198 * Ensure that FW is in the right state and do the reset 199 * if needed. 200 * 201 * @returns zero on success, or an error code on failure. 202 */ 203 int 204 ixl_pf_reset(struct ixl_pf *pf) 205 { 206 struct i40e_hw *hw = &pf->hw; 207 enum i40e_status_code status; 208 enum ixl_fw_mode fw_mode; 209 210 fw_mode = ixl_get_fw_mode(pf); 211 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode); 212 if (fw_mode == IXL_FW_MODE_RECOVERY) { 213 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 214 /* Don't try to reset device if it's in recovery mode */ 215 return (0); 216 } 217 218 status = i40e_pf_reset(hw); 219 if (status == I40E_SUCCESS) 220 return (0); 221 222 /* Check FW mode again in case it has changed while 223 * waiting for reset to complete */ 224 fw_mode = ixl_get_fw_mode(pf); 225 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode); 226 if (fw_mode == IXL_FW_MODE_RECOVERY) { 227 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 228 return (0); 229 } 230 231 if (fw_mode == IXL_FW_MODE_UEMPR) 232 device_printf(pf->dev, 233 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); 234 else 235 device_printf(pf->dev, "PF reset failure %s\n", 236 i40e_stat_str(hw, status)); 237 return (EIO); 238 } 239 240 /** 241 * ixl_setup_hmc - Setup LAN Host Memory Cache 242 * @pf: PF structure 243 * 244 * Init and configure LAN Host Memory Cache 245 * 246 * @returns 0 on success, EIO on error 247 */ 248 int 249 ixl_setup_hmc(struct ixl_pf *pf) 250 { 251 struct i40e_hw *hw = &pf->hw; 252 enum i40e_status_code status; 253 254 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 255 hw->func_caps.num_rx_qp, 0, 0); 256 if (status) { 257 device_printf(pf->dev, "init_lan_hmc failed: %s\n", 258 i40e_stat_str(hw, status)); 259 return (EIO); 260 } 261 262 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 263 if (status) { 264 device_printf(pf->dev, "configure_lan_hmc failed: %s\n", 265 i40e_stat_str(hw, status)); 266 return (EIO); 267 } 268 269 return (0); 270 } 271 272 /** 273 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache 274 * @pf: PF structure 275 * 276 * Shutdown Host Memory Cache if configured. 277 * 278 */ 279 void 280 ixl_shutdown_hmc(struct ixl_pf *pf) 281 { 282 struct i40e_hw *hw = &pf->hw; 283 enum i40e_status_code status; 284 285 /* HMC not configured, no need to shutdown */ 286 if (hw->hmc.hmc_obj == NULL) 287 return; 288 289 status = i40e_shutdown_lan_hmc(hw); 290 if (status) 291 device_printf(pf->dev, 292 "Shutdown LAN HMC failed with code %s\n", 293 i40e_stat_str(hw, status)); 294 } 295 /* 296 * Write PF ITR values to queue ITR registers. 297 */ 298 void 299 ixl_configure_itr(struct ixl_pf *pf) 300 { 301 ixl_configure_tx_itr(pf); 302 ixl_configure_rx_itr(pf); 303 } 304 305 /********************************************************************* 306 * 307 * Get the hardware capabilities 308 * 309 **********************************************************************/ 310 311 int 312 ixl_get_hw_capabilities(struct ixl_pf *pf) 313 { 314 struct i40e_aqc_list_capabilities_element_resp *buf; 315 struct i40e_hw *hw = &pf->hw; 316 device_t dev = pf->dev; 317 enum i40e_status_code status; 318 int len, i2c_intfc_num; 319 bool again = TRUE; 320 u16 needed; 321 322 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 323 hw->func_caps.iwarp = 0; 324 return (0); 325 } 326 327 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 328 retry: 329 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) 330 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) { 331 device_printf(dev, "Unable to allocate cap memory\n"); 332 return (ENOMEM); 333 } 334 335 /* This populates the hw struct */ 336 status = i40e_aq_discover_capabilities(hw, buf, len, 337 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 338 free(buf, M_IXL); 339 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && 340 (again == TRUE)) { 341 /* retry once with a larger buffer */ 342 again = FALSE; 343 len = needed; 344 goto retry; 345 } else if (status != I40E_SUCCESS) { 346 device_printf(dev, "capability discovery failed; status %s, error %s\n", 347 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 348 return (ENODEV); 349 } 350 351 /* 352 * Some devices have both MDIO and I2C; since this isn't reported 353 * by the FW, check registers to see if an I2C interface exists. 354 */ 355 i2c_intfc_num = ixl_find_i2c_interface(pf); 356 if (i2c_intfc_num != -1) 357 pf->has_i2c = true; 358 359 /* Determine functions to use for driver I2C accesses */ 360 switch (pf->i2c_access_method) { 361 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: { 362 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 363 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 364 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 365 } else { 366 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 367 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 368 } 369 break; 370 } 371 case IXL_I2C_ACCESS_METHOD_AQ: 372 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 373 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 374 break; 375 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD: 376 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 377 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 378 break; 379 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS: 380 pf->read_i2c_byte = ixl_read_i2c_byte_bb; 381 pf->write_i2c_byte = ixl_write_i2c_byte_bb; 382 break; 383 default: 384 /* Should not happen */ 385 device_printf(dev, "Error setting I2C access functions\n"); 386 break; 387 } 388 389 /* Keep link active by default */ 390 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN); 391 392 /* Print a subset of the capability information. */ 393 device_printf(dev, 394 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n", 395 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, 396 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, 397 (hw->func_caps.mdio_port_mode == 2) ? "I2C" : 398 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : 399 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : 400 "MDIO shared"); 401 402 return (0); 403 } 404 405 /* For the set_advertise sysctl */ 406 void 407 ixl_set_initial_advertised_speeds(struct ixl_pf *pf) 408 { 409 device_t dev = pf->dev; 410 int err; 411 412 /* Make sure to initialize the device to the complete list of 413 * supported speeds on driver load, to ensure unloading and 414 * reloading the driver will restore this value. 415 */ 416 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); 417 if (err) { 418 /* Non-fatal error */ 419 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", 420 __func__, err); 421 return; 422 } 423 424 pf->advertised_speed = 425 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 426 } 427 428 int 429 ixl_teardown_hw_structs(struct ixl_pf *pf) 430 { 431 enum i40e_status_code status = 0; 432 struct i40e_hw *hw = &pf->hw; 433 device_t dev = pf->dev; 434 435 /* Shutdown LAN HMC */ 436 if (hw->hmc.hmc_obj) { 437 status = i40e_shutdown_lan_hmc(hw); 438 if (status) { 439 device_printf(dev, 440 "init: LAN HMC shutdown failure; status %s\n", 441 i40e_stat_str(hw, status)); 442 goto err_out; 443 } 444 } 445 446 /* Shutdown admin queue */ 447 ixl_disable_intr0(hw); 448 status = i40e_shutdown_adminq(hw); 449 if (status) 450 device_printf(dev, 451 "init: Admin Queue shutdown failure; status %s\n", 452 i40e_stat_str(hw, status)); 453 454 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); 455 err_out: 456 return (status); 457 } 458 459 /* 460 ** Creates new filter with given MAC address and VLAN ID 461 */ 462 static struct ixl_mac_filter * 463 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 464 { 465 struct ixl_mac_filter *f; 466 467 /* create a new empty filter */ 468 f = malloc(sizeof(struct ixl_mac_filter), 469 M_IXL, M_NOWAIT | M_ZERO); 470 if (f) { 471 LIST_INSERT_HEAD(headp, f, ftle); 472 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); 473 f->vlan = vlan; 474 } 475 476 return (f); 477 } 478 479 /** 480 * ixl_free_filters - Free all filters in given list 481 * headp - pointer to list head 482 * 483 * Frees memory used by each entry in the list. 484 * Does not remove filters from HW. 485 */ 486 void 487 ixl_free_filters(struct ixl_ftl_head *headp) 488 { 489 struct ixl_mac_filter *f, *nf; 490 491 f = LIST_FIRST(headp); 492 while (f != NULL) { 493 nf = LIST_NEXT(f, ftle); 494 free(f, M_IXL); 495 f = nf; 496 } 497 498 LIST_INIT(headp); 499 } 500 501 static u_int 502 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 503 { 504 struct ixl_add_maddr_arg *ama = arg; 505 struct ixl_vsi *vsi = ama->vsi; 506 const u8 *macaddr = (u8*)LLADDR(sdl); 507 struct ixl_mac_filter *f; 508 509 /* Does one already exist */ 510 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 511 if (f != NULL) 512 return (0); 513 514 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY); 515 if (f == NULL) { 516 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 517 return (0); 518 } 519 f->flags |= IXL_FILTER_MC; 520 521 return (1); 522 } 523 524 /********************************************************************* 525 * Filter Routines 526 * 527 * Routines for multicast and vlan filter management. 528 * 529 *********************************************************************/ 530 void 531 ixl_add_multi(struct ixl_vsi *vsi) 532 { 533 if_t ifp = vsi->ifp; 534 struct i40e_hw *hw = vsi->hw; 535 int mcnt = 0; 536 struct ixl_add_maddr_arg cb_arg; 537 538 IOCTL_DEBUGOUT("ixl_add_multi: begin"); 539 540 mcnt = if_llmaddr_count(ifp); 541 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { 542 i40e_aq_set_vsi_multicast_promiscuous(hw, 543 vsi->seid, TRUE, NULL); 544 /* delete all existing MC filters */ 545 ixl_del_multi(vsi, true); 546 return; 547 } 548 549 cb_arg.vsi = vsi; 550 LIST_INIT(&cb_arg.to_add); 551 552 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg); 553 if (mcnt > 0) 554 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt); 555 556 IOCTL_DEBUGOUT("ixl_add_multi: end"); 557 } 558 559 static u_int 560 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 561 { 562 struct ixl_mac_filter *f = arg; 563 564 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl))) 565 return (1); 566 else 567 return (0); 568 } 569 570 void 571 ixl_del_multi(struct ixl_vsi *vsi, bool all) 572 { 573 struct ixl_ftl_head to_del; 574 if_t ifp = vsi->ifp; 575 struct ixl_mac_filter *f, *fn; 576 int mcnt = 0; 577 578 IOCTL_DEBUGOUT("ixl_del_multi: begin"); 579 580 LIST_INIT(&to_del); 581 /* Search for removed multicast addresses */ 582 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) { 583 if ((f->flags & IXL_FILTER_MC) == 0 || 584 (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0))) 585 continue; 586 587 LIST_REMOVE(f, ftle); 588 LIST_INSERT_HEAD(&to_del, f, ftle); 589 mcnt++; 590 } 591 592 if (mcnt > 0) 593 ixl_del_hw_filters(vsi, &to_del, mcnt); 594 } 595 596 void 597 ixl_link_up_msg(struct ixl_pf *pf) 598 { 599 struct i40e_hw *hw = &pf->hw; 600 if_t ifp = pf->vsi.ifp; 601 char *req_fec_string, *neg_fec_string; 602 u8 fec_abilities; 603 604 fec_abilities = hw->phy.link_info.req_fec_info; 605 /* If both RS and KR are requested, only show RS */ 606 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) 607 req_fec_string = ixl_fec_string[0]; 608 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) 609 req_fec_string = ixl_fec_string[1]; 610 else 611 req_fec_string = ixl_fec_string[2]; 612 613 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) 614 neg_fec_string = ixl_fec_string[0]; 615 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) 616 neg_fec_string = ixl_fec_string[1]; 617 else 618 neg_fec_string = ixl_fec_string[2]; 619 620 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 621 if_name(ifp), 622 ixl_link_speed_string(hw->phy.link_info.link_speed), 623 req_fec_string, neg_fec_string, 624 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", 625 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && 626 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 627 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? 628 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 629 ixl_fc_string[1] : ixl_fc_string[0]); 630 } 631 632 /* 633 * Configure admin queue/misc interrupt cause registers in hardware. 634 */ 635 void 636 ixl_configure_intr0_msix(struct ixl_pf *pf) 637 { 638 struct i40e_hw *hw = &pf->hw; 639 u32 reg; 640 641 /* First set up the adminq - vector 0 */ 642 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 643 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 644 645 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 646 I40E_PFINT_ICR0_ENA_GRST_MASK | 647 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 648 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 649 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 650 I40E_PFINT_ICR0_ENA_VFLR_MASK | 651 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 652 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; 653 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 654 655 /* 656 * 0x7FF is the end of the queue list. 657 * This means we won't use MSI-X vector 0 for a queue interrupt 658 * in MSI-X mode. 659 */ 660 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 661 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ 662 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); 663 664 wr32(hw, I40E_PFINT_DYN_CTL0, 665 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 666 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 667 668 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 669 } 670 671 void 672 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types) 673 { 674 /* Display supported media types */ 675 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) 676 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL); 677 678 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) 679 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL); 680 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) 681 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL); 682 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) 683 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL); 684 685 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T)) 686 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL); 687 688 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T)) 689 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL); 690 691 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || 692 phy_types & (I40E_CAP_PHY_TYPE_XFI) || 693 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) 694 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 695 696 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) 697 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL); 698 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) 699 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL); 700 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) 701 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL); 702 703 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || 704 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || 705 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || 706 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || 707 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 708 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL); 709 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) 710 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL); 711 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) 712 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL); 713 714 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) 715 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL); 716 717 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) 718 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) 719 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL); 720 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) 721 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL); 722 if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) 723 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL); 724 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) 725 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 726 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) 727 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL); 728 729 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) 730 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL); 731 732 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 733 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL); 734 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) 735 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); 736 737 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) 738 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL); 739 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) 740 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL); 741 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) 742 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL); 743 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) 744 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL); 745 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) 746 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL); 747 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) 748 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL); 749 } 750 751 /********************************************************************* 752 * 753 * Get Firmware Switch configuration 754 * - this will need to be more robust when more complex 755 * switch configurations are enabled. 756 * 757 **********************************************************************/ 758 int 759 ixl_switch_config(struct ixl_pf *pf) 760 { 761 struct i40e_hw *hw = &pf->hw; 762 struct ixl_vsi *vsi = &pf->vsi; 763 device_t dev = iflib_get_dev(vsi->ctx); 764 struct i40e_aqc_get_switch_config_resp *sw_config; 765 u8 aq_buf[I40E_AQ_LARGE_BUF]; 766 int ret; 767 u16 next = 0; 768 769 memset(&aq_buf, 0, sizeof(aq_buf)); 770 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 771 ret = i40e_aq_get_switch_config(hw, sw_config, 772 sizeof(aq_buf), &next, NULL); 773 if (ret) { 774 device_printf(dev, "aq_get_switch_config() failed, error %d," 775 " aq_error %d\n", ret, pf->hw.aq.asq_last_status); 776 return (ret); 777 } 778 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { 779 device_printf(dev, 780 "Switch config: header reported: %d in structure, %d total\n", 781 LE16_TO_CPU(sw_config->header.num_reported), 782 LE16_TO_CPU(sw_config->header.num_total)); 783 for (int i = 0; 784 i < LE16_TO_CPU(sw_config->header.num_reported); i++) { 785 device_printf(dev, 786 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, 787 sw_config->element[i].element_type, 788 LE16_TO_CPU(sw_config->element[i].seid), 789 LE16_TO_CPU(sw_config->element[i].uplink_seid), 790 LE16_TO_CPU(sw_config->element[i].downlink_seid)); 791 } 792 } 793 /* Simplified due to a single VSI */ 794 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid); 795 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid); 796 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid); 797 return (ret); 798 } 799 800 void 801 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls) 802 { 803 struct sysctl_oid *tree; 804 struct sysctl_oid_list *child; 805 struct sysctl_oid_list *vsi_list; 806 807 tree = device_get_sysctl_tree(vsi->dev); 808 child = SYSCTL_CHILDREN(tree); 809 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name, 810 CTLFLAG_RD, NULL, "VSI Number"); 811 812 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 813 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats); 814 815 /* Copy of netstat RX errors counter for validation purposes */ 816 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors", 817 CTLFLAG_RD, &vsi->ierrors, 818 "RX packet errors"); 819 820 if (queues_sysctls) 821 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx); 822 } 823 824 /* 825 * Used to set the Tx ITR value for all of the PF LAN VSI's queues. 826 * Writes to the ITR registers immediately. 827 */ 828 static int 829 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) 830 { 831 struct ixl_pf *pf = (struct ixl_pf *)arg1; 832 device_t dev = pf->dev; 833 int error = 0; 834 int requested_tx_itr; 835 836 requested_tx_itr = pf->tx_itr; 837 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); 838 if ((error) || (req->newptr == NULL)) 839 return (error); 840 if (pf->dynamic_tx_itr) { 841 device_printf(dev, 842 "Cannot set TX itr value while dynamic TX itr is enabled\n"); 843 return (EINVAL); 844 } 845 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { 846 device_printf(dev, 847 "Invalid TX itr value; value must be between 0 and %d\n", 848 IXL_MAX_ITR); 849 return (EINVAL); 850 } 851 852 pf->tx_itr = requested_tx_itr; 853 ixl_configure_tx_itr(pf); 854 855 return (error); 856 } 857 858 /* 859 * Used to set the Rx ITR value for all of the PF LAN VSI's queues. 860 * Writes to the ITR registers immediately. 861 */ 862 static int 863 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) 864 { 865 struct ixl_pf *pf = (struct ixl_pf *)arg1; 866 device_t dev = pf->dev; 867 int error = 0; 868 int requested_rx_itr; 869 870 requested_rx_itr = pf->rx_itr; 871 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); 872 if ((error) || (req->newptr == NULL)) 873 return (error); 874 if (pf->dynamic_rx_itr) { 875 device_printf(dev, 876 "Cannot set RX itr value while dynamic RX itr is enabled\n"); 877 return (EINVAL); 878 } 879 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { 880 device_printf(dev, 881 "Invalid RX itr value; value must be between 0 and %d\n", 882 IXL_MAX_ITR); 883 return (EINVAL); 884 } 885 886 pf->rx_itr = requested_rx_itr; 887 ixl_configure_rx_itr(pf); 888 889 return (error); 890 } 891 892 void 893 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 894 struct sysctl_oid_list *child, 895 struct i40e_hw_port_stats *stats) 896 { 897 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 898 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics"); 899 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); 900 901 struct i40e_eth_stats *eth_stats = &stats->eth; 902 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); 903 904 struct ixl_sysctl_info ctls[] = 905 { 906 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 907 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 908 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 909 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 910 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, 911 /* Packet Reception Stats */ 912 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 913 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 914 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 915 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 916 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 917 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 918 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 919 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 920 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 921 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 922 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 923 {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, 924 /* Packet Transmission Stats */ 925 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 926 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 927 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 928 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 929 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 930 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 931 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 932 /* Flow control */ 933 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 934 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 935 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 936 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 937 /* End */ 938 {0,0,0} 939 }; 940 941 struct ixl_sysctl_info *entry = ctls; 942 while (entry->stat != 0) 943 { 944 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, 945 CTLFLAG_RD, entry->stat, 946 entry->description); 947 entry++; 948 } 949 } 950 951 void 952 ixl_set_rss_key(struct ixl_pf *pf) 953 { 954 struct i40e_hw *hw = &pf->hw; 955 struct ixl_vsi *vsi = &pf->vsi; 956 device_t dev = pf->dev; 957 u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; 958 enum i40e_status_code status; 959 960 #ifdef RSS 961 /* Fetch the configured RSS key */ 962 rss_getkey((uint8_t *) &rss_seed); 963 #else 964 ixl_get_default_rss_key(rss_seed); 965 #endif 966 /* Fill out hash function seed */ 967 if (hw->mac.type == I40E_MAC_X722) { 968 struct i40e_aqc_get_set_rss_key_data key_data; 969 bcopy(rss_seed, &key_data, 52); 970 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); 971 if (status) 972 device_printf(dev, 973 "i40e_aq_set_rss_key status %s, error %s\n", 974 i40e_stat_str(hw, status), 975 i40e_aq_str(hw, hw->aq.asq_last_status)); 976 } else { 977 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) 978 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); 979 } 980 } 981 982 /* 983 * Configure enabled PCTYPES for RSS. 984 */ 985 void 986 ixl_set_rss_pctypes(struct ixl_pf *pf) 987 { 988 struct i40e_hw *hw = &pf->hw; 989 u64 set_hena = 0, hena; 990 991 #ifdef RSS 992 u32 rss_hash_config; 993 994 rss_hash_config = rss_gethashconfig(); 995 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 996 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); 997 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 998 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 999 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1000 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 1001 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1002 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); 1003 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1004 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); 1005 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1006 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 1007 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1008 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 1009 #else 1010 if (hw->mac.type == I40E_MAC_X722) 1011 set_hena = IXL_DEFAULT_RSS_HENA_X722; 1012 else 1013 set_hena = IXL_DEFAULT_RSS_HENA_XL710; 1014 #endif 1015 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 1016 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 1017 hena |= set_hena; 1018 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 1019 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 1020 1021 } 1022 1023 /* 1024 ** Setup the PF's RSS parameters. 1025 */ 1026 void 1027 ixl_config_rss(struct ixl_pf *pf) 1028 { 1029 ixl_set_rss_key(pf); 1030 ixl_set_rss_pctypes(pf); 1031 ixl_set_rss_hlut(pf); 1032 } 1033 1034 /* 1035 * In some firmware versions there is default MAC/VLAN filter 1036 * configured which interferes with filters managed by driver. 1037 * Make sure it's removed. 1038 */ 1039 void 1040 ixl_del_default_hw_filters(struct ixl_vsi *vsi) 1041 { 1042 struct i40e_aqc_remove_macvlan_element_data e; 1043 1044 bzero(&e, sizeof(e)); 1045 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1046 e.vlan_tag = 0; 1047 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1048 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1049 1050 bzero(&e, sizeof(e)); 1051 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1052 e.vlan_tag = 0; 1053 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1054 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1055 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1056 } 1057 1058 /* 1059 ** Initialize filter list and add filters that the hardware 1060 ** needs to know about. 1061 ** 1062 ** Requires VSI's seid to be set before calling. 1063 */ 1064 void 1065 ixl_init_filters(struct ixl_vsi *vsi) 1066 { 1067 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1068 1069 ixl_dbg_filter(pf, "%s: start\n", __func__); 1070 1071 /* Initialize mac filter list for VSI */ 1072 LIST_INIT(&vsi->ftl); 1073 vsi->num_hw_filters = 0; 1074 1075 /* Receive broadcast Ethernet frames */ 1076 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL); 1077 1078 if (IXL_VSI_IS_VF(vsi)) 1079 return; 1080 1081 ixl_del_default_hw_filters(vsi); 1082 1083 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); 1084 1085 /* 1086 * Prevent Tx flow control frames from being sent out by 1087 * non-firmware transmitters. 1088 * This affects every VSI in the PF. 1089 */ 1090 #ifndef IXL_DEBUG_FC 1091 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1092 #else 1093 if (pf->enable_tx_fc_filter) 1094 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1095 #endif 1096 } 1097 1098 void 1099 ixl_reconfigure_filters(struct ixl_vsi *vsi) 1100 { 1101 struct i40e_hw *hw = vsi->hw; 1102 struct ixl_ftl_head tmp; 1103 int cnt; 1104 1105 /* 1106 * The ixl_add_hw_filters function adds filters configured 1107 * in HW to a list in VSI. Move all filters to a temporary 1108 * list to avoid corrupting it by concatenating to itself. 1109 */ 1110 LIST_INIT(&tmp); 1111 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle); 1112 cnt = vsi->num_hw_filters; 1113 vsi->num_hw_filters = 0; 1114 1115 ixl_add_hw_filters(vsi, &tmp, cnt); 1116 1117 /* 1118 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp 1119 * will be NULL. Furthermore, the ftl of such vsi already contains 1120 * IXL_VLAN_ANY filter so we can skip that as well. 1121 */ 1122 if (hw == NULL) 1123 return; 1124 1125 /* Filter could be removed if MAC address was changed */ 1126 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 1127 1128 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0) 1129 return; 1130 /* 1131 * VLAN HW filtering is enabled, make sure that filters 1132 * for all registered VLAN tags are configured 1133 */ 1134 ixl_add_vlan_filters(vsi, hw->mac.addr); 1135 } 1136 1137 /* 1138 * This routine adds a MAC/VLAN filter to the software filter 1139 * list, then adds that new filter to the HW if it doesn't already 1140 * exist in the SW filter list. 1141 */ 1142 void 1143 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1144 { 1145 struct ixl_mac_filter *f, *tmp; 1146 struct ixl_pf *pf; 1147 device_t dev; 1148 struct ixl_ftl_head to_add; 1149 int to_add_cnt; 1150 1151 pf = vsi->back; 1152 dev = pf->dev; 1153 to_add_cnt = 1; 1154 1155 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n", 1156 MAC_FORMAT_ARGS(macaddr), vlan); 1157 1158 /* Does one already exist */ 1159 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1160 if (f != NULL) 1161 return; 1162 1163 LIST_INIT(&to_add); 1164 f = ixl_new_filter(&to_add, macaddr, vlan); 1165 if (f == NULL) { 1166 device_printf(dev, "WARNING: no filter available!!\n"); 1167 return; 1168 } 1169 if (f->vlan != IXL_VLAN_ANY) 1170 f->flags |= IXL_FILTER_VLAN; 1171 else 1172 vsi->num_macs++; 1173 1174 /* 1175 ** Is this the first vlan being registered, if so we 1176 ** need to remove the ANY filter that indicates we are 1177 ** not in a vlan, and replace that with a 0 filter. 1178 */ 1179 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { 1180 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 1181 if (tmp != NULL) { 1182 struct ixl_ftl_head to_del; 1183 1184 /* Prepare new filter first to avoid removing 1185 * VLAN_ANY filter if allocation fails */ 1186 f = ixl_new_filter(&to_add, macaddr, 0); 1187 if (f == NULL) { 1188 device_printf(dev, "WARNING: no filter available!!\n"); 1189 free(LIST_FIRST(&to_add), M_IXL); 1190 return; 1191 } 1192 to_add_cnt++; 1193 1194 LIST_REMOVE(tmp, ftle); 1195 LIST_INIT(&to_del); 1196 LIST_INSERT_HEAD(&to_del, tmp, ftle); 1197 ixl_del_hw_filters(vsi, &to_del, 1); 1198 } 1199 } 1200 1201 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1202 } 1203 1204 /** 1205 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs 1206 * @vsi: pointer to VSI 1207 * @macaddr: MAC address 1208 * 1209 * Adds MAC/VLAN filter for each VLAN configured on the interface 1210 * if there is enough HW filters. Otherwise adds a single filter 1211 * for all tagged and untagged frames to allow all configured VLANs 1212 * to recieve traffic. 1213 */ 1214 void 1215 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1216 { 1217 struct ixl_ftl_head to_add; 1218 struct ixl_mac_filter *f; 1219 int to_add_cnt = 0; 1220 int i, vlan = 0; 1221 1222 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) { 1223 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1224 return; 1225 } 1226 LIST_INIT(&to_add); 1227 1228 /* Add filter for untagged frames if it does not exist yet */ 1229 f = ixl_find_filter(&vsi->ftl, macaddr, 0); 1230 if (f == NULL) { 1231 f = ixl_new_filter(&to_add, macaddr, 0); 1232 if (f == NULL) { 1233 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1234 return; 1235 } 1236 to_add_cnt++; 1237 } 1238 1239 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) { 1240 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan); 1241 if (vlan == -1) 1242 break; 1243 1244 /* Does one already exist */ 1245 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1246 if (f != NULL) 1247 continue; 1248 1249 f = ixl_new_filter(&to_add, macaddr, vlan); 1250 if (f == NULL) { 1251 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1252 ixl_free_filters(&to_add); 1253 return; 1254 } 1255 to_add_cnt++; 1256 } 1257 1258 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1259 } 1260 1261 void 1262 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1263 { 1264 struct ixl_mac_filter *f, *tmp; 1265 struct ixl_ftl_head ftl_head; 1266 int to_del_cnt = 1; 1267 1268 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1269 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n", 1270 MAC_FORMAT_ARGS(macaddr), vlan); 1271 1272 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1273 if (f == NULL) 1274 return; 1275 1276 LIST_REMOVE(f, ftle); 1277 LIST_INIT(&ftl_head); 1278 LIST_INSERT_HEAD(&ftl_head, f, ftle); 1279 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0) 1280 vsi->num_macs--; 1281 1282 /* If this is not the last vlan just remove the filter */ 1283 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) { 1284 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1285 return; 1286 } 1287 1288 /* It's the last vlan, we need to switch back to a non-vlan filter */ 1289 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0); 1290 if (tmp != NULL) { 1291 LIST_REMOVE(tmp, ftle); 1292 LIST_INSERT_AFTER(f, tmp, ftle); 1293 to_del_cnt++; 1294 } 1295 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1296 1297 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1298 } 1299 1300 /** 1301 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC 1302 * @vsi: VSI which filters need to be removed 1303 * @macaddr: MAC address 1304 * 1305 * Remove all MAC/VLAN filters with a given MAC address. For multicast 1306 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY) 1307 * so skip them to speed up processing. Those filters should be removed 1308 * using ixl_del_filter function. 1309 */ 1310 void 1311 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1312 { 1313 struct ixl_mac_filter *f, *tmp; 1314 struct ixl_ftl_head to_del; 1315 int to_del_cnt = 0; 1316 1317 LIST_INIT(&to_del); 1318 1319 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) { 1320 if ((f->flags & IXL_FILTER_MC) != 0 || 1321 !ixl_ether_is_equal(f->macaddr, macaddr)) 1322 continue; 1323 1324 LIST_REMOVE(f, ftle); 1325 LIST_INSERT_HEAD(&to_del, f, ftle); 1326 to_del_cnt++; 1327 } 1328 1329 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1330 "%s: " MAC_FORMAT ", to_del_cnt: %d\n", 1331 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt); 1332 if (to_del_cnt > 0) 1333 ixl_del_hw_filters(vsi, &to_del, to_del_cnt); 1334 } 1335 1336 /* 1337 ** Find the filter with both matching mac addr and vlan id 1338 */ 1339 struct ixl_mac_filter * 1340 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 1341 { 1342 struct ixl_mac_filter *f; 1343 1344 LIST_FOREACH(f, headp, ftle) { 1345 if (ixl_ether_is_equal(f->macaddr, macaddr) && 1346 (f->vlan == vlan)) { 1347 return (f); 1348 } 1349 } 1350 1351 return (NULL); 1352 } 1353 1354 /* 1355 ** This routine takes additions to the vsi filter 1356 ** table and creates an Admin Queue call to create 1357 ** the filters in the hardware. 1358 */ 1359 void 1360 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt) 1361 { 1362 struct i40e_aqc_add_macvlan_element_data *a, *b; 1363 struct ixl_mac_filter *f, *fn; 1364 struct ixl_pf *pf; 1365 struct i40e_hw *hw; 1366 device_t dev; 1367 enum i40e_status_code status; 1368 int j = 0; 1369 1370 pf = vsi->back; 1371 dev = vsi->dev; 1372 hw = &pf->hw; 1373 1374 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt); 1375 1376 if (cnt < 1) { 1377 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n"); 1378 return; 1379 } 1380 1381 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, 1382 M_IXL, M_NOWAIT | M_ZERO); 1383 if (a == NULL) { 1384 device_printf(dev, "add_hw_filters failed to get memory\n"); 1385 return; 1386 } 1387 1388 LIST_FOREACH(f, to_add, ftle) { 1389 b = &a[j]; // a pox on fvl long names :) 1390 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); 1391 if (f->vlan == IXL_VLAN_ANY) { 1392 b->vlan_tag = 0; 1393 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; 1394 } else { 1395 b->vlan_tag = f->vlan; 1396 b->flags = 0; 1397 } 1398 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1399 /* Some FW versions do not set match method 1400 * when adding filters fails. Initialize it with 1401 * expected error value to allow detection which 1402 * filters were not added */ 1403 b->match_method = I40E_AQC_MM_ERR_NO_RES; 1404 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n", 1405 MAC_FORMAT_ARGS(f->macaddr)); 1406 1407 if (++j == cnt) 1408 break; 1409 } 1410 if (j != cnt) { 1411 /* Something went wrong */ 1412 device_printf(dev, 1413 "%s ERROR: list of filters to short expected: %d, found: %d\n", 1414 __func__, cnt, j); 1415 ixl_free_filters(to_add); 1416 goto out_free; 1417 } 1418 1419 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); 1420 if (status == I40E_SUCCESS) { 1421 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle); 1422 vsi->num_hw_filters += j; 1423 goto out_free; 1424 } 1425 1426 device_printf(dev, 1427 "i40e_aq_add_macvlan status %s, error %s\n", 1428 i40e_stat_str(hw, status), 1429 i40e_aq_str(hw, hw->aq.asq_last_status)); 1430 j = 0; 1431 1432 /* Verify which filters were actually configured in HW 1433 * and add them to the list */ 1434 LIST_FOREACH_SAFE(f, to_add, ftle, fn) { 1435 LIST_REMOVE(f, ftle); 1436 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) { 1437 ixl_dbg_filter(pf, 1438 "%s filter " MAC_FORMAT " VTAG: %d not added\n", 1439 __func__, 1440 MAC_FORMAT_ARGS(f->macaddr), 1441 f->vlan); 1442 free(f, M_IXL); 1443 } else { 1444 LIST_INSERT_HEAD(&vsi->ftl, f, ftle); 1445 vsi->num_hw_filters++; 1446 } 1447 j++; 1448 } 1449 1450 out_free: 1451 free(a, M_IXL); 1452 } 1453 1454 /* 1455 ** This routine takes removals in the vsi filter 1456 ** table and creates an Admin Queue call to delete 1457 ** the filters in the hardware. 1458 */ 1459 void 1460 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt) 1461 { 1462 struct i40e_aqc_remove_macvlan_element_data *d, *e; 1463 struct ixl_pf *pf; 1464 struct i40e_hw *hw; 1465 device_t dev; 1466 struct ixl_mac_filter *f, *f_temp; 1467 enum i40e_status_code status; 1468 int j = 0; 1469 1470 pf = vsi->back; 1471 hw = &pf->hw; 1472 dev = vsi->dev; 1473 1474 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt); 1475 1476 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, 1477 M_IXL, M_NOWAIT | M_ZERO); 1478 if (d == NULL) { 1479 device_printf(dev, "%s: failed to get memory\n", __func__); 1480 return; 1481 } 1482 1483 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) { 1484 e = &d[j]; // a pox on fvl long names :) 1485 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); 1486 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1487 if (f->vlan == IXL_VLAN_ANY) { 1488 e->vlan_tag = 0; 1489 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1490 } else { 1491 e->vlan_tag = f->vlan; 1492 } 1493 1494 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n", 1495 MAC_FORMAT_ARGS(f->macaddr)); 1496 1497 /* delete entry from the list */ 1498 LIST_REMOVE(f, ftle); 1499 free(f, M_IXL); 1500 if (++j == cnt) 1501 break; 1502 } 1503 if (j != cnt || !LIST_EMPTY(to_del)) { 1504 /* Something went wrong */ 1505 device_printf(dev, 1506 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n", 1507 __func__, cnt, j); 1508 ixl_free_filters(to_del); 1509 goto out_free; 1510 } 1511 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); 1512 if (status) { 1513 device_printf(dev, 1514 "%s: i40e_aq_remove_macvlan status %s, error %s\n", 1515 __func__, i40e_stat_str(hw, status), 1516 i40e_aq_str(hw, hw->aq.asq_last_status)); 1517 for (int i = 0; i < j; i++) { 1518 if (d[i].error_code == 0) 1519 continue; 1520 device_printf(dev, 1521 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n", 1522 __func__, MAC_FORMAT_ARGS(d[i].mac_addr), 1523 d[i].vlan_tag); 1524 } 1525 } 1526 1527 vsi->num_hw_filters -= j; 1528 1529 out_free: 1530 free(d, M_IXL); 1531 1532 ixl_dbg_filter(pf, "%s: end\n", __func__); 1533 } 1534 1535 int 1536 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1537 { 1538 struct i40e_hw *hw = &pf->hw; 1539 int error = 0; 1540 u32 reg; 1541 u16 pf_qidx; 1542 1543 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1544 1545 ixl_dbg(pf, IXL_DBG_EN_DIS, 1546 "Enabling PF TX ring %4d / VSI TX ring %4d...\n", 1547 pf_qidx, vsi_qidx); 1548 1549 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); 1550 1551 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1552 reg |= I40E_QTX_ENA_QENA_REQ_MASK | 1553 I40E_QTX_ENA_QENA_STAT_MASK; 1554 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1555 /* Verify the enable took */ 1556 for (int j = 0; j < 10; j++) { 1557 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1558 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 1559 break; 1560 i40e_usec_delay(10); 1561 } 1562 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 1563 device_printf(pf->dev, "TX queue %d still disabled!\n", 1564 pf_qidx); 1565 error = ETIMEDOUT; 1566 } 1567 1568 return (error); 1569 } 1570 1571 int 1572 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1573 { 1574 struct i40e_hw *hw = &pf->hw; 1575 int error = 0; 1576 u32 reg; 1577 u16 pf_qidx; 1578 1579 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1580 1581 ixl_dbg(pf, IXL_DBG_EN_DIS, 1582 "Enabling PF RX ring %4d / VSI RX ring %4d...\n", 1583 pf_qidx, vsi_qidx); 1584 1585 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1586 reg |= I40E_QRX_ENA_QENA_REQ_MASK | 1587 I40E_QRX_ENA_QENA_STAT_MASK; 1588 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1589 /* Verify the enable took */ 1590 for (int j = 0; j < 10; j++) { 1591 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1592 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 1593 break; 1594 i40e_usec_delay(10); 1595 } 1596 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 1597 device_printf(pf->dev, "RX queue %d still disabled!\n", 1598 pf_qidx); 1599 error = ETIMEDOUT; 1600 } 1601 1602 return (error); 1603 } 1604 1605 int 1606 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1607 { 1608 int error = 0; 1609 1610 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); 1611 /* Called function already prints error message */ 1612 if (error) 1613 return (error); 1614 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); 1615 return (error); 1616 } 1617 1618 /* 1619 * Returns error on first ring that is detected hung. 1620 */ 1621 int 1622 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1623 { 1624 struct i40e_hw *hw = &pf->hw; 1625 int error = 0; 1626 u32 reg; 1627 u16 pf_qidx; 1628 1629 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1630 1631 ixl_dbg(pf, IXL_DBG_EN_DIS, 1632 "Disabling PF TX ring %4d / VSI TX ring %4d...\n", 1633 pf_qidx, vsi_qidx); 1634 1635 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); 1636 i40e_usec_delay(500); 1637 1638 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1639 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 1640 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1641 /* Verify the disable took */ 1642 for (int j = 0; j < 10; j++) { 1643 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1644 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) 1645 break; 1646 i40e_msec_delay(10); 1647 } 1648 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { 1649 device_printf(pf->dev, "TX queue %d still enabled!\n", 1650 pf_qidx); 1651 error = ETIMEDOUT; 1652 } 1653 1654 return (error); 1655 } 1656 1657 /* 1658 * Returns error on first ring that is detected hung. 1659 */ 1660 int 1661 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1662 { 1663 struct i40e_hw *hw = &pf->hw; 1664 int error = 0; 1665 u32 reg; 1666 u16 pf_qidx; 1667 1668 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1669 1670 ixl_dbg(pf, IXL_DBG_EN_DIS, 1671 "Disabling PF RX ring %4d / VSI RX ring %4d...\n", 1672 pf_qidx, vsi_qidx); 1673 1674 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1675 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 1676 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1677 /* Verify the disable took */ 1678 for (int j = 0; j < 10; j++) { 1679 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1680 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) 1681 break; 1682 i40e_msec_delay(10); 1683 } 1684 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { 1685 device_printf(pf->dev, "RX queue %d still enabled!\n", 1686 pf_qidx); 1687 error = ETIMEDOUT; 1688 } 1689 1690 return (error); 1691 } 1692 1693 int 1694 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1695 { 1696 int error = 0; 1697 1698 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); 1699 /* Called function already prints error message */ 1700 if (error) 1701 return (error); 1702 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); 1703 return (error); 1704 } 1705 1706 static void 1707 ixl_handle_tx_mdd_event(struct ixl_pf *pf) 1708 { 1709 struct i40e_hw *hw = &pf->hw; 1710 device_t dev = pf->dev; 1711 struct ixl_vf *vf; 1712 bool mdd_detected = false; 1713 bool pf_mdd_detected = false; 1714 bool vf_mdd_detected = false; 1715 u16 vf_num, queue; 1716 u8 pf_num, event; 1717 u8 pf_mdet_num, vp_mdet_num; 1718 u32 reg; 1719 1720 /* find what triggered the MDD event */ 1721 reg = rd32(hw, I40E_GL_MDET_TX); 1722 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 1723 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 1724 I40E_GL_MDET_TX_PF_NUM_SHIFT; 1725 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 1726 I40E_GL_MDET_TX_VF_NUM_SHIFT; 1727 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 1728 I40E_GL_MDET_TX_EVENT_SHIFT; 1729 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 1730 I40E_GL_MDET_TX_QUEUE_SHIFT; 1731 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 1732 mdd_detected = true; 1733 } 1734 1735 if (!mdd_detected) 1736 return; 1737 1738 reg = rd32(hw, I40E_PF_MDET_TX); 1739 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 1740 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 1741 pf_mdet_num = hw->pf_id; 1742 pf_mdd_detected = true; 1743 } 1744 1745 /* Check if MDD was caused by a VF */ 1746 for (int i = 0; i < pf->num_vfs; i++) { 1747 vf = &(pf->vfs[i]); 1748 reg = rd32(hw, I40E_VP_MDET_TX(i)); 1749 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 1750 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 1751 vp_mdet_num = i; 1752 vf->num_mdd_events++; 1753 vf_mdd_detected = true; 1754 } 1755 } 1756 1757 /* Print out an error message */ 1758 if (vf_mdd_detected && pf_mdd_detected) 1759 device_printf(dev, 1760 "Malicious Driver Detection event %d" 1761 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n", 1762 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num); 1763 else if (vf_mdd_detected && !pf_mdd_detected) 1764 device_printf(dev, 1765 "Malicious Driver Detection event %d" 1766 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n", 1767 event, queue, pf_num, vf_num, vp_mdet_num); 1768 else if (!vf_mdd_detected && pf_mdd_detected) 1769 device_printf(dev, 1770 "Malicious Driver Detection event %d" 1771 " on TX queue %d, pf number %d (PF-%d)\n", 1772 event, queue, pf_num, pf_mdet_num); 1773 /* Theoretically shouldn't happen */ 1774 else 1775 device_printf(dev, 1776 "TX Malicious Driver Detection event (unknown)\n"); 1777 } 1778 1779 static void 1780 ixl_handle_rx_mdd_event(struct ixl_pf *pf) 1781 { 1782 struct i40e_hw *hw = &pf->hw; 1783 device_t dev = pf->dev; 1784 struct ixl_vf *vf; 1785 bool mdd_detected = false; 1786 bool pf_mdd_detected = false; 1787 bool vf_mdd_detected = false; 1788 u16 queue; 1789 u8 pf_num, event; 1790 u8 pf_mdet_num, vp_mdet_num; 1791 u32 reg; 1792 1793 /* 1794 * GL_MDET_RX doesn't contain VF number information, unlike 1795 * GL_MDET_TX. 1796 */ 1797 reg = rd32(hw, I40E_GL_MDET_RX); 1798 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 1799 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 1800 I40E_GL_MDET_RX_FUNCTION_SHIFT; 1801 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 1802 I40E_GL_MDET_RX_EVENT_SHIFT; 1803 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 1804 I40E_GL_MDET_RX_QUEUE_SHIFT; 1805 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 1806 mdd_detected = true; 1807 } 1808 1809 if (!mdd_detected) 1810 return; 1811 1812 reg = rd32(hw, I40E_PF_MDET_RX); 1813 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 1814 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 1815 pf_mdet_num = hw->pf_id; 1816 pf_mdd_detected = true; 1817 } 1818 1819 /* Check if MDD was caused by a VF */ 1820 for (int i = 0; i < pf->num_vfs; i++) { 1821 vf = &(pf->vfs[i]); 1822 reg = rd32(hw, I40E_VP_MDET_RX(i)); 1823 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 1824 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 1825 vp_mdet_num = i; 1826 vf->num_mdd_events++; 1827 vf_mdd_detected = true; 1828 } 1829 } 1830 1831 /* Print out an error message */ 1832 if (vf_mdd_detected && pf_mdd_detected) 1833 device_printf(dev, 1834 "Malicious Driver Detection event %d" 1835 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n", 1836 event, queue, pf_num, pf_mdet_num, vp_mdet_num); 1837 else if (vf_mdd_detected && !pf_mdd_detected) 1838 device_printf(dev, 1839 "Malicious Driver Detection event %d" 1840 " on RX queue %d, pf number %d, (VF-%d)\n", 1841 event, queue, pf_num, vp_mdet_num); 1842 else if (!vf_mdd_detected && pf_mdd_detected) 1843 device_printf(dev, 1844 "Malicious Driver Detection event %d" 1845 " on RX queue %d, pf number %d (PF-%d)\n", 1846 event, queue, pf_num, pf_mdet_num); 1847 /* Theoretically shouldn't happen */ 1848 else 1849 device_printf(dev, 1850 "RX Malicious Driver Detection event (unknown)\n"); 1851 } 1852 1853 /** 1854 * ixl_handle_mdd_event 1855 * 1856 * Called from interrupt handler to identify possibly malicious vfs 1857 * (But also detects events from the PF, as well) 1858 **/ 1859 void 1860 ixl_handle_mdd_event(struct ixl_pf *pf) 1861 { 1862 struct i40e_hw *hw = &pf->hw; 1863 u32 reg; 1864 1865 /* 1866 * Handle both TX/RX because it's possible they could 1867 * both trigger in the same interrupt. 1868 */ 1869 ixl_handle_tx_mdd_event(pf); 1870 ixl_handle_rx_mdd_event(pf); 1871 1872 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING); 1873 1874 /* re-enable mdd interrupt cause */ 1875 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1876 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 1877 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1878 ixl_flush(hw); 1879 } 1880 1881 void 1882 ixl_enable_intr0(struct i40e_hw *hw) 1883 { 1884 u32 reg; 1885 1886 /* Use IXL_ITR_NONE so ITR isn't updated here */ 1887 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | 1888 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 1889 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 1890 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1891 } 1892 1893 void 1894 ixl_disable_intr0(struct i40e_hw *hw) 1895 { 1896 u32 reg; 1897 1898 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; 1899 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1900 ixl_flush(hw); 1901 } 1902 1903 void 1904 ixl_enable_queue(struct i40e_hw *hw, int id) 1905 { 1906 u32 reg; 1907 1908 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | 1909 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1910 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 1911 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1912 } 1913 1914 void 1915 ixl_disable_queue(struct i40e_hw *hw, int id) 1916 { 1917 u32 reg; 1918 1919 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1920 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1921 } 1922 1923 void 1924 ixl_handle_empr_reset(struct ixl_pf *pf) 1925 { 1926 struct ixl_vsi *vsi = &pf->vsi; 1927 bool is_up = !!(if_getdrvflags(vsi->ifp) & IFF_DRV_RUNNING); 1928 1929 ixl_prepare_for_reset(pf, is_up); 1930 /* 1931 * i40e_pf_reset checks the type of reset and acts 1932 * accordingly. If EMP or Core reset was performed 1933 * doing PF reset is not necessary and it sometimes 1934 * fails. 1935 */ 1936 ixl_pf_reset(pf); 1937 1938 if (!IXL_PF_IN_RECOVERY_MODE(pf) && 1939 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) { 1940 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 1941 device_printf(pf->dev, 1942 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 1943 pf->link_up = FALSE; 1944 ixl_update_link_status(pf); 1945 } 1946 1947 ixl_rebuild_hw_structs_after_reset(pf, is_up); 1948 1949 atomic_clear_32(&pf->state, IXL_PF_STATE_RESETTING); 1950 } 1951 1952 void 1953 ixl_update_stats_counters(struct ixl_pf *pf) 1954 { 1955 struct i40e_hw *hw = &pf->hw; 1956 struct ixl_vsi *vsi = &pf->vsi; 1957 struct ixl_vf *vf; 1958 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx; 1959 1960 struct i40e_hw_port_stats *nsd = &pf->stats; 1961 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 1962 1963 /* Update hw stats */ 1964 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 1965 pf->stat_offsets_loaded, 1966 &osd->crc_errors, &nsd->crc_errors); 1967 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 1968 pf->stat_offsets_loaded, 1969 &osd->illegal_bytes, &nsd->illegal_bytes); 1970 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 1971 I40E_GLPRT_GORCL(hw->port), 1972 pf->stat_offsets_loaded, 1973 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 1974 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 1975 I40E_GLPRT_GOTCL(hw->port), 1976 pf->stat_offsets_loaded, 1977 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 1978 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 1979 pf->stat_offsets_loaded, 1980 &osd->eth.rx_discards, 1981 &nsd->eth.rx_discards); 1982 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 1983 I40E_GLPRT_UPRCL(hw->port), 1984 pf->stat_offsets_loaded, 1985 &osd->eth.rx_unicast, 1986 &nsd->eth.rx_unicast); 1987 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 1988 I40E_GLPRT_UPTCL(hw->port), 1989 pf->stat_offsets_loaded, 1990 &osd->eth.tx_unicast, 1991 &nsd->eth.tx_unicast); 1992 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 1993 I40E_GLPRT_MPRCL(hw->port), 1994 pf->stat_offsets_loaded, 1995 &osd->eth.rx_multicast, 1996 &nsd->eth.rx_multicast); 1997 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 1998 I40E_GLPRT_MPTCL(hw->port), 1999 pf->stat_offsets_loaded, 2000 &osd->eth.tx_multicast, 2001 &nsd->eth.tx_multicast); 2002 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 2003 I40E_GLPRT_BPRCL(hw->port), 2004 pf->stat_offsets_loaded, 2005 &osd->eth.rx_broadcast, 2006 &nsd->eth.rx_broadcast); 2007 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 2008 I40E_GLPRT_BPTCL(hw->port), 2009 pf->stat_offsets_loaded, 2010 &osd->eth.tx_broadcast, 2011 &nsd->eth.tx_broadcast); 2012 2013 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 2014 pf->stat_offsets_loaded, 2015 &osd->tx_dropped_link_down, 2016 &nsd->tx_dropped_link_down); 2017 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 2018 pf->stat_offsets_loaded, 2019 &osd->mac_local_faults, 2020 &nsd->mac_local_faults); 2021 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 2022 pf->stat_offsets_loaded, 2023 &osd->mac_remote_faults, 2024 &nsd->mac_remote_faults); 2025 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 2026 pf->stat_offsets_loaded, 2027 &osd->rx_length_errors, 2028 &nsd->rx_length_errors); 2029 2030 /* Flow control (LFC) stats */ 2031 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 2032 pf->stat_offsets_loaded, 2033 &osd->link_xon_rx, &nsd->link_xon_rx); 2034 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 2035 pf->stat_offsets_loaded, 2036 &osd->link_xon_tx, &nsd->link_xon_tx); 2037 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 2038 pf->stat_offsets_loaded, 2039 &osd->link_xoff_rx, &nsd->link_xoff_rx); 2040 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 2041 pf->stat_offsets_loaded, 2042 &osd->link_xoff_tx, &nsd->link_xoff_tx); 2043 2044 /* 2045 * For watchdog management we need to know if we have been paused 2046 * during the last interval, so capture that here. 2047 */ 2048 if (pf->stats.link_xoff_rx != prev_link_xoff_rx) 2049 vsi->shared->isc_pause_frames = 1; 2050 2051 /* Packet size stats rx */ 2052 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 2053 I40E_GLPRT_PRC64L(hw->port), 2054 pf->stat_offsets_loaded, 2055 &osd->rx_size_64, &nsd->rx_size_64); 2056 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 2057 I40E_GLPRT_PRC127L(hw->port), 2058 pf->stat_offsets_loaded, 2059 &osd->rx_size_127, &nsd->rx_size_127); 2060 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 2061 I40E_GLPRT_PRC255L(hw->port), 2062 pf->stat_offsets_loaded, 2063 &osd->rx_size_255, &nsd->rx_size_255); 2064 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 2065 I40E_GLPRT_PRC511L(hw->port), 2066 pf->stat_offsets_loaded, 2067 &osd->rx_size_511, &nsd->rx_size_511); 2068 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 2069 I40E_GLPRT_PRC1023L(hw->port), 2070 pf->stat_offsets_loaded, 2071 &osd->rx_size_1023, &nsd->rx_size_1023); 2072 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 2073 I40E_GLPRT_PRC1522L(hw->port), 2074 pf->stat_offsets_loaded, 2075 &osd->rx_size_1522, &nsd->rx_size_1522); 2076 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 2077 I40E_GLPRT_PRC9522L(hw->port), 2078 pf->stat_offsets_loaded, 2079 &osd->rx_size_big, &nsd->rx_size_big); 2080 2081 /* Packet size stats tx */ 2082 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 2083 I40E_GLPRT_PTC64L(hw->port), 2084 pf->stat_offsets_loaded, 2085 &osd->tx_size_64, &nsd->tx_size_64); 2086 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 2087 I40E_GLPRT_PTC127L(hw->port), 2088 pf->stat_offsets_loaded, 2089 &osd->tx_size_127, &nsd->tx_size_127); 2090 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 2091 I40E_GLPRT_PTC255L(hw->port), 2092 pf->stat_offsets_loaded, 2093 &osd->tx_size_255, &nsd->tx_size_255); 2094 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 2095 I40E_GLPRT_PTC511L(hw->port), 2096 pf->stat_offsets_loaded, 2097 &osd->tx_size_511, &nsd->tx_size_511); 2098 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 2099 I40E_GLPRT_PTC1023L(hw->port), 2100 pf->stat_offsets_loaded, 2101 &osd->tx_size_1023, &nsd->tx_size_1023); 2102 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 2103 I40E_GLPRT_PTC1522L(hw->port), 2104 pf->stat_offsets_loaded, 2105 &osd->tx_size_1522, &nsd->tx_size_1522); 2106 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 2107 I40E_GLPRT_PTC9522L(hw->port), 2108 pf->stat_offsets_loaded, 2109 &osd->tx_size_big, &nsd->tx_size_big); 2110 2111 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 2112 pf->stat_offsets_loaded, 2113 &osd->rx_undersize, &nsd->rx_undersize); 2114 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 2115 pf->stat_offsets_loaded, 2116 &osd->rx_fragments, &nsd->rx_fragments); 2117 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 2118 pf->stat_offsets_loaded, 2119 &osd->rx_oversize, &nsd->rx_oversize); 2120 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 2121 pf->stat_offsets_loaded, 2122 &osd->rx_jabber, &nsd->rx_jabber); 2123 /* EEE */ 2124 i40e_get_phy_lpi_status(hw, nsd); 2125 2126 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded, 2127 &osd->tx_lpi_count, &nsd->tx_lpi_count, 2128 &osd->rx_lpi_count, &nsd->rx_lpi_count); 2129 2130 pf->stat_offsets_loaded = true; 2131 /* End hw stats */ 2132 2133 /* Update vsi stats */ 2134 ixl_update_vsi_stats(vsi); 2135 2136 for (int i = 0; i < pf->num_vfs; i++) { 2137 vf = &pf->vfs[i]; 2138 if (vf->vf_flags & VF_FLAG_ENABLED) 2139 ixl_update_eth_stats(&pf->vfs[i].vsi); 2140 } 2141 } 2142 2143 /** 2144 * Update VSI-specific ethernet statistics counters. 2145 **/ 2146 void 2147 ixl_update_eth_stats(struct ixl_vsi *vsi) 2148 { 2149 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 2150 struct i40e_hw *hw = &pf->hw; 2151 struct i40e_eth_stats *es; 2152 struct i40e_eth_stats *oes; 2153 u16 stat_idx = vsi->info.stat_counter_idx; 2154 2155 es = &vsi->eth_stats; 2156 oes = &vsi->eth_stats_offsets; 2157 2158 /* Gather up the stats that the hw collects */ 2159 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 2160 vsi->stat_offsets_loaded, 2161 &oes->tx_errors, &es->tx_errors); 2162 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 2163 vsi->stat_offsets_loaded, 2164 &oes->rx_discards, &es->rx_discards); 2165 2166 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 2167 I40E_GLV_GORCL(stat_idx), 2168 vsi->stat_offsets_loaded, 2169 &oes->rx_bytes, &es->rx_bytes); 2170 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 2171 I40E_GLV_UPRCL(stat_idx), 2172 vsi->stat_offsets_loaded, 2173 &oes->rx_unicast, &es->rx_unicast); 2174 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 2175 I40E_GLV_MPRCL(stat_idx), 2176 vsi->stat_offsets_loaded, 2177 &oes->rx_multicast, &es->rx_multicast); 2178 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 2179 I40E_GLV_BPRCL(stat_idx), 2180 vsi->stat_offsets_loaded, 2181 &oes->rx_broadcast, &es->rx_broadcast); 2182 2183 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 2184 I40E_GLV_GOTCL(stat_idx), 2185 vsi->stat_offsets_loaded, 2186 &oes->tx_bytes, &es->tx_bytes); 2187 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 2188 I40E_GLV_UPTCL(stat_idx), 2189 vsi->stat_offsets_loaded, 2190 &oes->tx_unicast, &es->tx_unicast); 2191 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 2192 I40E_GLV_MPTCL(stat_idx), 2193 vsi->stat_offsets_loaded, 2194 &oes->tx_multicast, &es->tx_multicast); 2195 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 2196 I40E_GLV_BPTCL(stat_idx), 2197 vsi->stat_offsets_loaded, 2198 &oes->tx_broadcast, &es->tx_broadcast); 2199 vsi->stat_offsets_loaded = true; 2200 } 2201 2202 void 2203 ixl_update_vsi_stats(struct ixl_vsi *vsi) 2204 { 2205 struct ixl_pf *pf; 2206 struct i40e_eth_stats *es; 2207 u64 tx_discards, csum_errs; 2208 2209 struct i40e_hw_port_stats *nsd; 2210 2211 pf = vsi->back; 2212 es = &vsi->eth_stats; 2213 nsd = &pf->stats; 2214 2215 ixl_update_eth_stats(vsi); 2216 2217 tx_discards = es->tx_discards + nsd->tx_dropped_link_down; 2218 2219 csum_errs = 0; 2220 for (int i = 0; i < vsi->num_rx_queues; i++) 2221 csum_errs += vsi->rx_queues[i].rxr.csum_errs; 2222 nsd->checksum_error = csum_errs; 2223 2224 /* Update ifnet stats */ 2225 IXL_SET_IPACKETS(vsi, es->rx_unicast + 2226 es->rx_multicast + 2227 es->rx_broadcast); 2228 IXL_SET_OPACKETS(vsi, es->tx_unicast + 2229 es->tx_multicast + 2230 es->tx_broadcast); 2231 IXL_SET_IBYTES(vsi, es->rx_bytes); 2232 IXL_SET_OBYTES(vsi, es->tx_bytes); 2233 IXL_SET_IMCASTS(vsi, es->rx_multicast); 2234 IXL_SET_OMCASTS(vsi, es->tx_multicast); 2235 2236 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + 2237 nsd->checksum_error + nsd->rx_length_errors + 2238 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize + 2239 nsd->rx_jabber); 2240 IXL_SET_OERRORS(vsi, es->tx_errors); 2241 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); 2242 IXL_SET_OQDROPS(vsi, tx_discards); 2243 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); 2244 IXL_SET_COLLISIONS(vsi, 0); 2245 } 2246 2247 /** 2248 * Reset all of the stats for the given pf 2249 **/ 2250 void 2251 ixl_pf_reset_stats(struct ixl_pf *pf) 2252 { 2253 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); 2254 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); 2255 pf->stat_offsets_loaded = false; 2256 } 2257 2258 /** 2259 * Resets all stats of the given vsi 2260 **/ 2261 void 2262 ixl_vsi_reset_stats(struct ixl_vsi *vsi) 2263 { 2264 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); 2265 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); 2266 vsi->stat_offsets_loaded = false; 2267 } 2268 2269 /** 2270 * Read and update a 48 bit stat from the hw 2271 * 2272 * Since the device stats are not reset at PFReset, they likely will not 2273 * be zeroed when the driver starts. We'll save the first values read 2274 * and use them as offsets to be subtracted from the raw values in order 2275 * to report stats that count from zero. 2276 **/ 2277 void 2278 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 2279 bool offset_loaded, u64 *offset, u64 *stat) 2280 { 2281 u64 new_data; 2282 2283 new_data = rd64(hw, loreg); 2284 2285 if (!offset_loaded) 2286 *offset = new_data; 2287 if (new_data >= *offset) 2288 *stat = new_data - *offset; 2289 else 2290 *stat = (new_data + ((u64)1 << 48)) - *offset; 2291 *stat &= 0xFFFFFFFFFFFFULL; 2292 } 2293 2294 /** 2295 * Read and update a 32 bit stat from the hw 2296 **/ 2297 void 2298 ixl_stat_update32(struct i40e_hw *hw, u32 reg, 2299 bool offset_loaded, u64 *offset, u64 *stat) 2300 { 2301 u32 new_data; 2302 2303 new_data = rd32(hw, reg); 2304 if (!offset_loaded) 2305 *offset = new_data; 2306 if (new_data >= *offset) 2307 *stat = (u32)(new_data - *offset); 2308 else 2309 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 2310 } 2311 2312 /** 2313 * Add subset of device sysctls safe to use in recovery mode 2314 */ 2315 void 2316 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf) 2317 { 2318 device_t dev = pf->dev; 2319 2320 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2321 struct sysctl_oid_list *ctx_list = 2322 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2323 2324 struct sysctl_oid *debug_node; 2325 struct sysctl_oid_list *debug_list; 2326 2327 SYSCTL_ADD_PROC(ctx, ctx_list, 2328 OID_AUTO, "fw_version", 2329 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2330 ixl_sysctl_show_fw, "A", "Firmware version"); 2331 2332 /* Add sysctls meant to print debug information, but don't list them 2333 * in "sysctl -a" output. */ 2334 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2335 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2336 "Debug Sysctls"); 2337 debug_list = SYSCTL_CHILDREN(debug_node); 2338 2339 SYSCTL_ADD_UINT(ctx, debug_list, 2340 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2341 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2342 2343 SYSCTL_ADD_UINT(ctx, debug_list, 2344 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2345 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2346 2347 SYSCTL_ADD_PROC(ctx, debug_list, 2348 OID_AUTO, "dump_debug_data", 2349 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2350 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2351 2352 SYSCTL_ADD_PROC(ctx, debug_list, 2353 OID_AUTO, "do_pf_reset", 2354 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2355 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2356 2357 SYSCTL_ADD_PROC(ctx, debug_list, 2358 OID_AUTO, "do_core_reset", 2359 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2360 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2361 2362 SYSCTL_ADD_PROC(ctx, debug_list, 2363 OID_AUTO, "do_global_reset", 2364 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2365 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2366 2367 SYSCTL_ADD_PROC(ctx, debug_list, 2368 OID_AUTO, "queue_interrupt_table", 2369 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2370 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2371 } 2372 2373 void 2374 ixl_add_device_sysctls(struct ixl_pf *pf) 2375 { 2376 device_t dev = pf->dev; 2377 struct i40e_hw *hw = &pf->hw; 2378 2379 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2380 struct sysctl_oid_list *ctx_list = 2381 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2382 2383 struct sysctl_oid *debug_node; 2384 struct sysctl_oid_list *debug_list; 2385 2386 struct sysctl_oid *fec_node; 2387 struct sysctl_oid_list *fec_list; 2388 struct sysctl_oid *eee_node; 2389 struct sysctl_oid_list *eee_list; 2390 2391 /* Set up sysctls */ 2392 SYSCTL_ADD_PROC(ctx, ctx_list, 2393 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2394 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); 2395 2396 SYSCTL_ADD_PROC(ctx, ctx_list, 2397 OID_AUTO, "advertise_speed", 2398 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2399 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); 2400 2401 SYSCTL_ADD_PROC(ctx, ctx_list, 2402 OID_AUTO, "supported_speeds", 2403 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2404 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); 2405 2406 SYSCTL_ADD_PROC(ctx, ctx_list, 2407 OID_AUTO, "current_speed", 2408 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2409 ixl_sysctl_current_speed, "A", "Current Port Speed"); 2410 2411 SYSCTL_ADD_PROC(ctx, ctx_list, 2412 OID_AUTO, "fw_version", 2413 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2414 ixl_sysctl_show_fw, "A", "Firmware version"); 2415 2416 SYSCTL_ADD_PROC(ctx, ctx_list, 2417 OID_AUTO, "unallocated_queues", 2418 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2419 ixl_sysctl_unallocated_queues, "I", 2420 "Queues not allocated to a PF or VF"); 2421 2422 SYSCTL_ADD_PROC(ctx, ctx_list, 2423 OID_AUTO, "tx_itr", 2424 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2425 ixl_sysctl_pf_tx_itr, "I", 2426 "Immediately set TX ITR value for all queues"); 2427 2428 SYSCTL_ADD_PROC(ctx, ctx_list, 2429 OID_AUTO, "rx_itr", 2430 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2431 ixl_sysctl_pf_rx_itr, "I", 2432 "Immediately set RX ITR value for all queues"); 2433 2434 SYSCTL_ADD_INT(ctx, ctx_list, 2435 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, 2436 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); 2437 2438 SYSCTL_ADD_INT(ctx, ctx_list, 2439 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, 2440 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); 2441 2442 /* Add FEC sysctls for 25G adapters */ 2443 if (i40e_is_25G_device(hw->device_id)) { 2444 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2445 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2446 "FEC Sysctls"); 2447 fec_list = SYSCTL_CHILDREN(fec_node); 2448 2449 SYSCTL_ADD_PROC(ctx, fec_list, 2450 OID_AUTO, "fc_ability", 2451 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2452 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); 2453 2454 SYSCTL_ADD_PROC(ctx, fec_list, 2455 OID_AUTO, "rs_ability", 2456 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2457 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); 2458 2459 SYSCTL_ADD_PROC(ctx, fec_list, 2460 OID_AUTO, "fc_requested", 2461 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2462 ixl_sysctl_fec_fc_request, "I", 2463 "FC FEC mode requested on link"); 2464 2465 SYSCTL_ADD_PROC(ctx, fec_list, 2466 OID_AUTO, "rs_requested", 2467 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2468 ixl_sysctl_fec_rs_request, "I", 2469 "RS FEC mode requested on link"); 2470 2471 SYSCTL_ADD_PROC(ctx, fec_list, 2472 OID_AUTO, "auto_fec_enabled", 2473 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2474 ixl_sysctl_fec_auto_enable, "I", 2475 "Let FW decide FEC ability/request modes"); 2476 } 2477 2478 SYSCTL_ADD_PROC(ctx, ctx_list, 2479 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2480 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); 2481 2482 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2483 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2484 "Energy Efficient Ethernet (EEE) Sysctls"); 2485 eee_list = SYSCTL_CHILDREN(eee_node); 2486 2487 SYSCTL_ADD_PROC(ctx, eee_list, 2488 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 2489 pf, 0, ixl_sysctl_eee_enable, "I", 2490 "Enable Energy Efficient Ethernet (EEE)"); 2491 2492 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status", 2493 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0, 2494 "TX LPI status"); 2495 2496 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status", 2497 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0, 2498 "RX LPI status"); 2499 2500 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count", 2501 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count, 2502 "TX LPI count"); 2503 2504 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count", 2505 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count, 2506 "RX LPI count"); 2507 2508 SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, 2509 "link_active_on_if_down", 2510 CTLTYPE_INT | CTLFLAG_RWTUN, 2511 pf, 0, ixl_sysctl_set_link_active, "I", 2512 IXL_SYSCTL_HELP_SET_LINK_ACTIVE); 2513 2514 /* Add sysctls meant to print debug information, but don't list them 2515 * in "sysctl -a" output. */ 2516 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2517 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2518 "Debug Sysctls"); 2519 debug_list = SYSCTL_CHILDREN(debug_node); 2520 2521 SYSCTL_ADD_UINT(ctx, debug_list, 2522 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2523 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2524 2525 SYSCTL_ADD_UINT(ctx, debug_list, 2526 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2527 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2528 2529 SYSCTL_ADD_PROC(ctx, debug_list, 2530 OID_AUTO, "link_status", 2531 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2532 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); 2533 2534 SYSCTL_ADD_PROC(ctx, debug_list, 2535 OID_AUTO, "phy_abilities_init", 2536 CTLTYPE_STRING | CTLFLAG_RD, 2537 pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities"); 2538 2539 SYSCTL_ADD_PROC(ctx, debug_list, 2540 OID_AUTO, "phy_abilities", 2541 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2542 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); 2543 2544 SYSCTL_ADD_PROC(ctx, debug_list, 2545 OID_AUTO, "filter_list", 2546 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2547 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); 2548 2549 SYSCTL_ADD_PROC(ctx, debug_list, 2550 OID_AUTO, "hw_res_alloc", 2551 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2552 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); 2553 2554 SYSCTL_ADD_PROC(ctx, debug_list, 2555 OID_AUTO, "switch_config", 2556 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2557 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); 2558 2559 SYSCTL_ADD_PROC(ctx, debug_list, 2560 OID_AUTO, "switch_vlans", 2561 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2562 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration"); 2563 2564 SYSCTL_ADD_PROC(ctx, debug_list, 2565 OID_AUTO, "rss_key", 2566 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2567 pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); 2568 2569 SYSCTL_ADD_PROC(ctx, debug_list, 2570 OID_AUTO, "rss_lut", 2571 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2572 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); 2573 2574 SYSCTL_ADD_PROC(ctx, debug_list, 2575 OID_AUTO, "rss_hena", 2576 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2577 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); 2578 2579 SYSCTL_ADD_PROC(ctx, debug_list, 2580 OID_AUTO, "disable_fw_link_management", 2581 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2582 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); 2583 2584 SYSCTL_ADD_PROC(ctx, debug_list, 2585 OID_AUTO, "dump_debug_data", 2586 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2587 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2588 2589 SYSCTL_ADD_PROC(ctx, debug_list, 2590 OID_AUTO, "do_pf_reset", 2591 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2592 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2593 2594 SYSCTL_ADD_PROC(ctx, debug_list, 2595 OID_AUTO, "do_core_reset", 2596 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2597 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2598 2599 SYSCTL_ADD_PROC(ctx, debug_list, 2600 OID_AUTO, "do_global_reset", 2601 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2602 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2603 2604 SYSCTL_ADD_PROC(ctx, debug_list, 2605 OID_AUTO, "queue_interrupt_table", 2606 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2607 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2608 2609 if (pf->has_i2c) { 2610 SYSCTL_ADD_PROC(ctx, debug_list, 2611 OID_AUTO, "read_i2c_byte", 2612 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2613 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C); 2614 2615 SYSCTL_ADD_PROC(ctx, debug_list, 2616 OID_AUTO, "write_i2c_byte", 2617 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2618 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C); 2619 2620 SYSCTL_ADD_PROC(ctx, debug_list, 2621 OID_AUTO, "read_i2c_diag_data", 2622 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2623 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); 2624 } 2625 } 2626 2627 /* 2628 * Primarily for finding out how many queues can be assigned to VFs, 2629 * at runtime. 2630 */ 2631 static int 2632 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) 2633 { 2634 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2635 int queues; 2636 2637 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); 2638 2639 return sysctl_handle_int(oidp, NULL, queues, req); 2640 } 2641 2642 static const char * 2643 ixl_link_speed_string(enum i40e_aq_link_speed link_speed) 2644 { 2645 const char * link_speed_str[] = { 2646 "Unknown", 2647 "100 Mbps", 2648 "1 Gbps", 2649 "10 Gbps", 2650 "40 Gbps", 2651 "20 Gbps", 2652 "25 Gbps", 2653 "2.5 Gbps", 2654 "5 Gbps" 2655 }; 2656 int index; 2657 2658 switch (link_speed) { 2659 case I40E_LINK_SPEED_100MB: 2660 index = 1; 2661 break; 2662 case I40E_LINK_SPEED_1GB: 2663 index = 2; 2664 break; 2665 case I40E_LINK_SPEED_10GB: 2666 index = 3; 2667 break; 2668 case I40E_LINK_SPEED_40GB: 2669 index = 4; 2670 break; 2671 case I40E_LINK_SPEED_20GB: 2672 index = 5; 2673 break; 2674 case I40E_LINK_SPEED_25GB: 2675 index = 6; 2676 break; 2677 case I40E_LINK_SPEED_2_5GB: 2678 index = 7; 2679 break; 2680 case I40E_LINK_SPEED_5GB: 2681 index = 8; 2682 break; 2683 case I40E_LINK_SPEED_UNKNOWN: 2684 default: 2685 index = 0; 2686 break; 2687 } 2688 2689 return (link_speed_str[index]); 2690 } 2691 2692 int 2693 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2694 { 2695 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2696 struct i40e_hw *hw = &pf->hw; 2697 int error = 0; 2698 2699 ixl_update_link_status(pf); 2700 2701 error = sysctl_handle_string(oidp, 2702 __DECONST(void *, 2703 ixl_link_speed_string(hw->phy.link_info.link_speed)), 2704 8, req); 2705 2706 return (error); 2707 } 2708 2709 /* 2710 * Converts 8-bit speeds value to and from sysctl flags and 2711 * Admin Queue flags. 2712 */ 2713 static u8 2714 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) 2715 { 2716 #define SPEED_MAP_SIZE 8 2717 static u16 speedmap[SPEED_MAP_SIZE] = { 2718 (I40E_LINK_SPEED_100MB | (0x1 << 8)), 2719 (I40E_LINK_SPEED_1GB | (0x2 << 8)), 2720 (I40E_LINK_SPEED_10GB | (0x4 << 8)), 2721 (I40E_LINK_SPEED_20GB | (0x8 << 8)), 2722 (I40E_LINK_SPEED_25GB | (0x10 << 8)), 2723 (I40E_LINK_SPEED_40GB | (0x20 << 8)), 2724 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)), 2725 (I40E_LINK_SPEED_5GB | (0x80 << 8)), 2726 }; 2727 u8 retval = 0; 2728 2729 for (int i = 0; i < SPEED_MAP_SIZE; i++) { 2730 if (to_aq) 2731 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; 2732 else 2733 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; 2734 } 2735 2736 return (retval); 2737 } 2738 2739 int 2740 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) 2741 { 2742 struct i40e_hw *hw = &pf->hw; 2743 device_t dev = pf->dev; 2744 struct i40e_aq_get_phy_abilities_resp abilities; 2745 struct i40e_aq_set_phy_config config; 2746 enum i40e_status_code aq_error = 0; 2747 2748 /* Get current capability information */ 2749 aq_error = i40e_aq_get_phy_capabilities(hw, 2750 FALSE, FALSE, &abilities, NULL); 2751 if (aq_error) { 2752 device_printf(dev, 2753 "%s: Error getting phy capabilities %d," 2754 " aq error: %d\n", __func__, aq_error, 2755 hw->aq.asq_last_status); 2756 return (EIO); 2757 } 2758 2759 /* Prepare new config */ 2760 bzero(&config, sizeof(config)); 2761 if (from_aq) 2762 config.link_speed = speeds; 2763 else 2764 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); 2765 config.phy_type = abilities.phy_type; 2766 config.phy_type_ext = abilities.phy_type_ext; 2767 config.abilities = abilities.abilities 2768 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 2769 config.eee_capability = abilities.eee_capability; 2770 config.eeer = abilities.eeer_val; 2771 config.low_power_ctrl = abilities.d3_lpan; 2772 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 2773 & I40E_AQ_PHY_FEC_CONFIG_MASK; 2774 2775 /* Do aq command & restart link */ 2776 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 2777 if (aq_error) { 2778 device_printf(dev, 2779 "%s: Error setting new phy config %d," 2780 " aq error: %d\n", __func__, aq_error, 2781 hw->aq.asq_last_status); 2782 return (EIO); 2783 } 2784 2785 return (0); 2786 } 2787 2788 /* 2789 ** Supported link speeds 2790 ** Flags: 2791 ** 0x1 - 100 Mb 2792 ** 0x2 - 1G 2793 ** 0x4 - 10G 2794 ** 0x8 - 20G 2795 ** 0x10 - 25G 2796 ** 0x20 - 40G 2797 ** 0x40 - 2.5G 2798 ** 0x80 - 5G 2799 */ 2800 static int 2801 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) 2802 { 2803 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2804 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 2805 2806 return sysctl_handle_int(oidp, NULL, supported, req); 2807 } 2808 2809 /* 2810 ** Control link advertise speed: 2811 ** Flags: 2812 ** 0x1 - advertise 100 Mb 2813 ** 0x2 - advertise 1G 2814 ** 0x4 - advertise 10G 2815 ** 0x8 - advertise 20G 2816 ** 0x10 - advertise 25G 2817 ** 0x20 - advertise 40G 2818 ** 0x40 - advertise 2.5G 2819 ** 0x80 - advertise 5G 2820 ** 2821 ** Set to 0 to disable link 2822 */ 2823 int 2824 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) 2825 { 2826 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2827 device_t dev = pf->dev; 2828 u8 converted_speeds; 2829 int requested_ls = 0; 2830 int error = 0; 2831 2832 /* Read in new mode */ 2833 requested_ls = pf->advertised_speed; 2834 error = sysctl_handle_int(oidp, &requested_ls, 0, req); 2835 if ((error) || (req->newptr == NULL)) 2836 return (error); 2837 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 2838 device_printf(dev, "Interface is currently in FW recovery mode. " 2839 "Setting advertise speed not supported\n"); 2840 return (EINVAL); 2841 } 2842 2843 /* Error out if bits outside of possible flag range are set */ 2844 if ((requested_ls & ~((u8)0xFF)) != 0) { 2845 device_printf(dev, "Input advertised speed out of range; " 2846 "valid flags are: 0x%02x\n", 2847 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2848 return (EINVAL); 2849 } 2850 2851 /* Check if adapter supports input value */ 2852 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); 2853 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { 2854 device_printf(dev, "Invalid advertised speed; " 2855 "valid flags are: 0x%02x\n", 2856 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2857 return (EINVAL); 2858 } 2859 2860 error = ixl_set_advertised_speeds(pf, requested_ls, false); 2861 if (error) 2862 return (error); 2863 2864 pf->advertised_speed = requested_ls; 2865 ixl_update_link_status(pf); 2866 return (0); 2867 } 2868 2869 /* 2870 * Input: bitmap of enum i40e_aq_link_speed 2871 */ 2872 u64 2873 ixl_max_aq_speed_to_value(u8 link_speeds) 2874 { 2875 if (link_speeds & I40E_LINK_SPEED_40GB) 2876 return IF_Gbps(40); 2877 if (link_speeds & I40E_LINK_SPEED_25GB) 2878 return IF_Gbps(25); 2879 if (link_speeds & I40E_LINK_SPEED_20GB) 2880 return IF_Gbps(20); 2881 if (link_speeds & I40E_LINK_SPEED_10GB) 2882 return IF_Gbps(10); 2883 if (link_speeds & I40E_LINK_SPEED_5GB) 2884 return IF_Gbps(5); 2885 if (link_speeds & I40E_LINK_SPEED_2_5GB) 2886 return IF_Mbps(2500); 2887 if (link_speeds & I40E_LINK_SPEED_1GB) 2888 return IF_Gbps(1); 2889 if (link_speeds & I40E_LINK_SPEED_100MB) 2890 return IF_Mbps(100); 2891 else 2892 /* Minimum supported link speed */ 2893 return IF_Mbps(100); 2894 } 2895 2896 /* 2897 ** Get the width and transaction speed of 2898 ** the bus this adapter is plugged into. 2899 */ 2900 void 2901 ixl_get_bus_info(struct ixl_pf *pf) 2902 { 2903 struct i40e_hw *hw = &pf->hw; 2904 device_t dev = pf->dev; 2905 u16 link; 2906 u32 offset, num_ports; 2907 u64 max_speed; 2908 2909 /* Some devices don't use PCIE */ 2910 if (hw->mac.type == I40E_MAC_X722) 2911 return; 2912 2913 /* Read PCI Express Capabilities Link Status Register */ 2914 pci_find_cap(dev, PCIY_EXPRESS, &offset); 2915 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2916 2917 /* Fill out hw struct with PCIE info */ 2918 i40e_set_pci_config_data(hw, link); 2919 2920 /* Use info to print out bandwidth messages */ 2921 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 2922 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": 2923 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": 2924 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), 2925 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : 2926 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : 2927 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : 2928 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : 2929 ("Unknown")); 2930 2931 /* 2932 * If adapter is in slot with maximum supported speed, 2933 * no warning message needs to be printed out. 2934 */ 2935 if (hw->bus.speed >= i40e_bus_speed_8000 2936 && hw->bus.width >= i40e_bus_width_pcie_x8) 2937 return; 2938 2939 num_ports = bitcount32(hw->func_caps.valid_functions); 2940 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; 2941 2942 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { 2943 device_printf(dev, "PCI-Express bandwidth available" 2944 " for this device may be insufficient for" 2945 " optimal performance.\n"); 2946 device_printf(dev, "Please move the device to a different" 2947 " PCI-e link with more lanes and/or higher" 2948 " transfer rate.\n"); 2949 } 2950 } 2951 2952 static int 2953 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 2954 { 2955 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2956 struct i40e_hw *hw = &pf->hw; 2957 struct sbuf *sbuf; 2958 2959 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2960 ixl_nvm_version_str(hw, sbuf); 2961 sbuf_finish(sbuf); 2962 sbuf_delete(sbuf); 2963 2964 return (0); 2965 } 2966 2967 void 2968 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) 2969 { 2970 u8 nvma_ptr = nvma->config & 0xFF; 2971 u8 nvma_flags = (nvma->config & 0xF00) >> 8; 2972 const char * cmd_str; 2973 2974 switch (nvma->command) { 2975 case I40E_NVM_READ: 2976 if (nvma_ptr == 0xF && nvma_flags == 0xF && 2977 nvma->offset == 0 && nvma->data_size == 1) { 2978 device_printf(dev, "NVMUPD: Get Driver Status Command\n"); 2979 return; 2980 } 2981 cmd_str = "READ "; 2982 break; 2983 case I40E_NVM_WRITE: 2984 cmd_str = "WRITE"; 2985 break; 2986 default: 2987 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command); 2988 return; 2989 } 2990 device_printf(dev, 2991 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n", 2992 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size); 2993 } 2994 2995 int 2996 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) 2997 { 2998 struct i40e_hw *hw = &pf->hw; 2999 struct i40e_nvm_access *nvma; 3000 device_t dev = pf->dev; 3001 enum i40e_status_code status = 0; 3002 size_t nvma_size, ifd_len, exp_len; 3003 int err, perrno; 3004 3005 DEBUGFUNC("ixl_handle_nvmupd_cmd"); 3006 3007 /* Sanity checks */ 3008 nvma_size = sizeof(struct i40e_nvm_access); 3009 ifd_len = ifd->ifd_len; 3010 3011 if (ifd_len < nvma_size || 3012 ifd->ifd_data == NULL) { 3013 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", 3014 __func__); 3015 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", 3016 __func__, ifd_len, nvma_size); 3017 device_printf(dev, "%s: data pointer: %p\n", __func__, 3018 ifd->ifd_data); 3019 return (EINVAL); 3020 } 3021 3022 nvma = malloc(ifd_len, M_IXL, M_WAITOK); 3023 err = copyin(ifd->ifd_data, nvma, ifd_len); 3024 if (err) { 3025 device_printf(dev, "%s: Cannot get request from user space\n", 3026 __func__); 3027 free(nvma, M_IXL); 3028 return (err); 3029 } 3030 3031 if (pf->dbg_mask & IXL_DBG_NVMUPD) 3032 ixl_print_nvm_cmd(dev, nvma); 3033 3034 if (IXL_PF_IS_RESETTING(pf)) { 3035 int count = 0; 3036 while (count++ < 100) { 3037 i40e_msec_delay(100); 3038 if (!(IXL_PF_IS_RESETTING(pf))) 3039 break; 3040 } 3041 } 3042 3043 if (IXL_PF_IS_RESETTING(pf)) { 3044 device_printf(dev, 3045 "%s: timeout waiting for EMP reset to finish\n", 3046 __func__); 3047 free(nvma, M_IXL); 3048 return (-EBUSY); 3049 } 3050 3051 if (nvma->data_size < 1 || nvma->data_size > 4096) { 3052 device_printf(dev, 3053 "%s: invalid request, data size not in supported range\n", 3054 __func__); 3055 free(nvma, M_IXL); 3056 return (EINVAL); 3057 } 3058 3059 /* 3060 * Older versions of the NVM update tool don't set ifd_len to the size 3061 * of the entire buffer passed to the ioctl. Check the data_size field 3062 * in the contained i40e_nvm_access struct and ensure everything is 3063 * copied in from userspace. 3064 */ 3065 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */ 3066 3067 if (ifd_len < exp_len) { 3068 ifd_len = exp_len; 3069 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK); 3070 err = copyin(ifd->ifd_data, nvma, ifd_len); 3071 if (err) { 3072 device_printf(dev, "%s: Cannot get request from user space\n", 3073 __func__); 3074 free(nvma, M_IXL); 3075 return (err); 3076 } 3077 } 3078 3079 // TODO: Might need a different lock here 3080 // IXL_PF_LOCK(pf); 3081 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); 3082 // IXL_PF_UNLOCK(pf); 3083 3084 err = copyout(nvma, ifd->ifd_data, ifd_len); 3085 free(nvma, M_IXL); 3086 if (err) { 3087 device_printf(dev, "%s: Cannot return data to user space\n", 3088 __func__); 3089 return (err); 3090 } 3091 3092 /* Let the nvmupdate report errors, show them only when debug is enabled */ 3093 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) 3094 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", 3095 i40e_stat_str(hw, status), perrno); 3096 3097 /* 3098 * -EPERM is actually ERESTART, which the kernel interprets as it needing 3099 * to run this ioctl again. So use -EACCES for -EPERM instead. 3100 */ 3101 if (perrno == -EPERM) 3102 return (-EACCES); 3103 else 3104 return (perrno); 3105 } 3106 3107 int 3108 ixl_find_i2c_interface(struct ixl_pf *pf) 3109 { 3110 struct i40e_hw *hw = &pf->hw; 3111 bool i2c_en, port_matched; 3112 u32 reg; 3113 3114 for (int i = 0; i < 4; i++) { 3115 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); 3116 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); 3117 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) 3118 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) 3119 & BIT(hw->port); 3120 if (i2c_en && port_matched) 3121 return (i); 3122 } 3123 3124 return (-1); 3125 } 3126 3127 void 3128 ixl_set_link(struct ixl_pf *pf, bool enable) 3129 { 3130 struct i40e_hw *hw = &pf->hw; 3131 device_t dev = pf->dev; 3132 struct i40e_aq_get_phy_abilities_resp abilities; 3133 struct i40e_aq_set_phy_config config; 3134 enum i40e_status_code aq_error = 0; 3135 u32 phy_type, phy_type_ext; 3136 3137 /* Get initial capability information */ 3138 aq_error = i40e_aq_get_phy_capabilities(hw, 3139 FALSE, TRUE, &abilities, NULL); 3140 if (aq_error) { 3141 device_printf(dev, 3142 "%s: Error getting phy capabilities %d," 3143 " aq error: %d\n", __func__, aq_error, 3144 hw->aq.asq_last_status); 3145 return; 3146 } 3147 3148 phy_type = abilities.phy_type; 3149 phy_type_ext = abilities.phy_type_ext; 3150 3151 /* Get current capability information */ 3152 aq_error = i40e_aq_get_phy_capabilities(hw, 3153 FALSE, FALSE, &abilities, NULL); 3154 if (aq_error) { 3155 device_printf(dev, 3156 "%s: Error getting phy capabilities %d," 3157 " aq error: %d\n", __func__, aq_error, 3158 hw->aq.asq_last_status); 3159 return; 3160 } 3161 3162 /* Prepare new config */ 3163 memset(&config, 0, sizeof(config)); 3164 config.link_speed = abilities.link_speed; 3165 config.abilities = abilities.abilities; 3166 config.eee_capability = abilities.eee_capability; 3167 config.eeer = abilities.eeer_val; 3168 config.low_power_ctrl = abilities.d3_lpan; 3169 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 3170 & I40E_AQ_PHY_FEC_CONFIG_MASK; 3171 config.phy_type = 0; 3172 config.phy_type_ext = 0; 3173 3174 config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX | 3175 I40E_AQ_PHY_FLAG_PAUSE_RX); 3176 3177 switch (pf->fc) { 3178 case I40E_FC_FULL: 3179 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX | 3180 I40E_AQ_PHY_FLAG_PAUSE_RX; 3181 break; 3182 case I40E_FC_RX_PAUSE: 3183 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX; 3184 break; 3185 case I40E_FC_TX_PAUSE: 3186 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX; 3187 break; 3188 default: 3189 break; 3190 } 3191 3192 if (enable) { 3193 config.phy_type = phy_type; 3194 config.phy_type_ext = phy_type_ext; 3195 3196 } 3197 3198 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 3199 if (aq_error) { 3200 device_printf(dev, 3201 "%s: Error setting new phy config %d," 3202 " aq error: %d\n", __func__, aq_error, 3203 hw->aq.asq_last_status); 3204 return; 3205 } 3206 3207 aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL); 3208 if (aq_error) { 3209 device_printf(dev, 3210 "%s: Error set link config %d," 3211 " aq error: %d\n", __func__, aq_error, 3212 hw->aq.asq_last_status); 3213 return; 3214 } 3215 } 3216 3217 static char * 3218 ixl_phy_type_string(u32 bit_pos, bool ext) 3219 { 3220 static char * phy_types_str[32] = { 3221 "SGMII", 3222 "1000BASE-KX", 3223 "10GBASE-KX4", 3224 "10GBASE-KR", 3225 "40GBASE-KR4", 3226 "XAUI", 3227 "XFI", 3228 "SFI", 3229 "XLAUI", 3230 "XLPPI", 3231 "40GBASE-CR4", 3232 "10GBASE-CR1", 3233 "SFP+ Active DA", 3234 "QSFP+ Active DA", 3235 "Reserved (14)", 3236 "Reserved (15)", 3237 "Reserved (16)", 3238 "100BASE-TX", 3239 "1000BASE-T", 3240 "10GBASE-T", 3241 "10GBASE-SR", 3242 "10GBASE-LR", 3243 "10GBASE-SFP+Cu", 3244 "10GBASE-CR1", 3245 "40GBASE-CR4", 3246 "40GBASE-SR4", 3247 "40GBASE-LR4", 3248 "1000BASE-SX", 3249 "1000BASE-LX", 3250 "1000BASE-T Optical", 3251 "20GBASE-KR2", 3252 "Reserved (31)" 3253 }; 3254 static char * ext_phy_types_str[8] = { 3255 "25GBASE-KR", 3256 "25GBASE-CR", 3257 "25GBASE-SR", 3258 "25GBASE-LR", 3259 "25GBASE-AOC", 3260 "25GBASE-ACC", 3261 "2.5GBASE-T", 3262 "5GBASE-T" 3263 }; 3264 3265 if (ext && bit_pos > 7) return "Invalid_Ext"; 3266 if (bit_pos > 31) return "Invalid"; 3267 3268 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; 3269 } 3270 3271 /* TODO: ERJ: I don't this is necessary anymore. */ 3272 int 3273 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) 3274 { 3275 device_t dev = pf->dev; 3276 struct i40e_hw *hw = &pf->hw; 3277 struct i40e_aq_desc desc; 3278 enum i40e_status_code status; 3279 3280 struct i40e_aqc_get_link_status *aq_link_status = 3281 (struct i40e_aqc_get_link_status *)&desc.params.raw; 3282 3283 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 3284 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); 3285 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 3286 if (status) { 3287 device_printf(dev, 3288 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", 3289 __func__, i40e_stat_str(hw, status), 3290 i40e_aq_str(hw, hw->aq.asq_last_status)); 3291 return (EIO); 3292 } 3293 3294 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); 3295 return (0); 3296 } 3297 3298 static char * 3299 ixl_phy_type_string_ls(u8 val) 3300 { 3301 if (val >= 0x1F) 3302 return ixl_phy_type_string(val - 0x1F, true); 3303 else 3304 return ixl_phy_type_string(val, false); 3305 } 3306 3307 static int 3308 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) 3309 { 3310 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3311 device_t dev = pf->dev; 3312 struct sbuf *buf; 3313 int error = 0; 3314 3315 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3316 if (!buf) { 3317 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3318 return (ENOMEM); 3319 } 3320 3321 struct i40e_aqc_get_link_status link_status; 3322 error = ixl_aq_get_link_status(pf, &link_status); 3323 if (error) { 3324 sbuf_delete(buf); 3325 return (error); 3326 } 3327 3328 sbuf_printf(buf, "\n" 3329 "PHY Type : 0x%02x<%s>\n" 3330 "Speed : 0x%02x\n" 3331 "Link info: 0x%02x\n" 3332 "AN info : 0x%02x\n" 3333 "Ext info : 0x%02x\n" 3334 "Loopback : 0x%02x\n" 3335 "Max Frame: %d\n" 3336 "Config : 0x%02x\n" 3337 "Power : 0x%02x", 3338 link_status.phy_type, 3339 ixl_phy_type_string_ls(link_status.phy_type), 3340 link_status.link_speed, 3341 link_status.link_info, 3342 link_status.an_info, 3343 link_status.ext_info, 3344 link_status.loopback, 3345 link_status.max_frame_size, 3346 link_status.config, 3347 link_status.power_desc); 3348 3349 error = sbuf_finish(buf); 3350 if (error) 3351 device_printf(dev, "Error finishing sbuf: %d\n", error); 3352 3353 sbuf_delete(buf); 3354 return (error); 3355 } 3356 3357 static int 3358 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) 3359 { 3360 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3361 struct i40e_hw *hw = &pf->hw; 3362 device_t dev = pf->dev; 3363 enum i40e_status_code status; 3364 struct i40e_aq_get_phy_abilities_resp abilities; 3365 struct sbuf *buf; 3366 int error = 0; 3367 3368 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3369 if (!buf) { 3370 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3371 return (ENOMEM); 3372 } 3373 3374 status = i40e_aq_get_phy_capabilities(hw, 3375 FALSE, arg2 != 0, &abilities, NULL); 3376 if (status) { 3377 device_printf(dev, 3378 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 3379 __func__, i40e_stat_str(hw, status), 3380 i40e_aq_str(hw, hw->aq.asq_last_status)); 3381 sbuf_delete(buf); 3382 return (EIO); 3383 } 3384 3385 sbuf_printf(buf, "\n" 3386 "PHY Type : %08x", 3387 abilities.phy_type); 3388 3389 if (abilities.phy_type != 0) { 3390 sbuf_printf(buf, "<"); 3391 for (int i = 0; i < 32; i++) 3392 if ((1 << i) & abilities.phy_type) 3393 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); 3394 sbuf_printf(buf, ">"); 3395 } 3396 3397 sbuf_printf(buf, "\nPHY Ext : %02x", 3398 abilities.phy_type_ext); 3399 3400 if (abilities.phy_type_ext != 0) { 3401 sbuf_printf(buf, "<"); 3402 for (int i = 0; i < 4; i++) 3403 if ((1 << i) & abilities.phy_type_ext) 3404 sbuf_printf(buf, "%s,", 3405 ixl_phy_type_string(i, true)); 3406 sbuf_printf(buf, ">"); 3407 } 3408 3409 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed); 3410 if (abilities.link_speed != 0) { 3411 u8 link_speed; 3412 sbuf_printf(buf, " <"); 3413 for (int i = 0; i < 8; i++) { 3414 link_speed = (1 << i) & abilities.link_speed; 3415 if (link_speed) 3416 sbuf_printf(buf, "%s, ", 3417 ixl_link_speed_string(link_speed)); 3418 } 3419 sbuf_printf(buf, ">"); 3420 } 3421 3422 sbuf_printf(buf, "\n" 3423 "Abilities: %02x\n" 3424 "EEE cap : %04x\n" 3425 "EEER reg : %08x\n" 3426 "D3 Lpan : %02x\n" 3427 "ID : %02x %02x %02x %02x\n" 3428 "ModType : %02x %02x %02x\n" 3429 "ModType E: %01x\n" 3430 "FEC Cfg : %02x\n" 3431 "Ext CC : %02x", 3432 abilities.abilities, abilities.eee_capability, 3433 abilities.eeer_val, abilities.d3_lpan, 3434 abilities.phy_id[0], abilities.phy_id[1], 3435 abilities.phy_id[2], abilities.phy_id[3], 3436 abilities.module_type[0], abilities.module_type[1], 3437 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, 3438 abilities.fec_cfg_curr_mod_ext_info & 0x1F, 3439 abilities.ext_comp_code); 3440 3441 error = sbuf_finish(buf); 3442 if (error) 3443 device_printf(dev, "Error finishing sbuf: %d\n", error); 3444 3445 sbuf_delete(buf); 3446 return (error); 3447 } 3448 3449 static int 3450 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) 3451 { 3452 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3453 struct ixl_vsi *vsi = &pf->vsi; 3454 struct ixl_mac_filter *f; 3455 device_t dev = pf->dev; 3456 int error = 0, ftl_len = 0, ftl_counter = 0; 3457 3458 struct sbuf *buf; 3459 3460 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3461 if (!buf) { 3462 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3463 return (ENOMEM); 3464 } 3465 3466 sbuf_printf(buf, "\n"); 3467 3468 /* Print MAC filters */ 3469 sbuf_printf(buf, "PF Filters:\n"); 3470 LIST_FOREACH(f, &vsi->ftl, ftle) 3471 ftl_len++; 3472 3473 if (ftl_len < 1) 3474 sbuf_printf(buf, "(none)\n"); 3475 else { 3476 LIST_FOREACH(f, &vsi->ftl, ftle) { 3477 sbuf_printf(buf, 3478 MAC_FORMAT ", vlan %4d, flags %#06x", 3479 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3480 /* don't print '\n' for last entry */ 3481 if (++ftl_counter != ftl_len) 3482 sbuf_printf(buf, "\n"); 3483 } 3484 } 3485 3486 #ifdef PCI_IOV 3487 /* TODO: Give each VF its own filter list sysctl */ 3488 struct ixl_vf *vf; 3489 if (pf->num_vfs > 0) { 3490 sbuf_printf(buf, "\n\n"); 3491 for (int i = 0; i < pf->num_vfs; i++) { 3492 vf = &pf->vfs[i]; 3493 if (!(vf->vf_flags & VF_FLAG_ENABLED)) 3494 continue; 3495 3496 vsi = &vf->vsi; 3497 ftl_len = 0, ftl_counter = 0; 3498 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num); 3499 LIST_FOREACH(f, &vsi->ftl, ftle) 3500 ftl_len++; 3501 3502 if (ftl_len < 1) 3503 sbuf_printf(buf, "(none)\n"); 3504 else { 3505 LIST_FOREACH(f, &vsi->ftl, ftle) { 3506 sbuf_printf(buf, 3507 MAC_FORMAT ", vlan %4d, flags %#06x\n", 3508 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3509 } 3510 } 3511 } 3512 } 3513 #endif 3514 3515 error = sbuf_finish(buf); 3516 if (error) 3517 device_printf(dev, "Error finishing sbuf: %d\n", error); 3518 sbuf_delete(buf); 3519 3520 return (error); 3521 } 3522 3523 #define IXL_SW_RES_SIZE 0x14 3524 int 3525 ixl_res_alloc_cmp(const void *a, const void *b) 3526 { 3527 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; 3528 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; 3529 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; 3530 3531 return ((int)one->resource_type - (int)two->resource_type); 3532 } 3533 3534 /* 3535 * Longest string length: 25 3536 */ 3537 const char * 3538 ixl_switch_res_type_string(u8 type) 3539 { 3540 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = { 3541 "VEB", 3542 "VSI", 3543 "Perfect Match MAC address", 3544 "S-tag", 3545 "(Reserved)", 3546 "Multicast hash entry", 3547 "Unicast hash entry", 3548 "VLAN", 3549 "VSI List entry", 3550 "(Reserved)", 3551 "VLAN Statistic Pool", 3552 "Mirror Rule", 3553 "Queue Set", 3554 "Inner VLAN Forward filter", 3555 "(Reserved)", 3556 "Inner MAC", 3557 "IP", 3558 "GRE/VN1 Key", 3559 "VN2 Key", 3560 "Tunneling Port" 3561 }; 3562 3563 if (type < IXL_SW_RES_SIZE) 3564 return ixl_switch_res_type_strings[type]; 3565 else 3566 return "(Reserved)"; 3567 } 3568 3569 static int 3570 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) 3571 { 3572 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3573 struct i40e_hw *hw = &pf->hw; 3574 device_t dev = pf->dev; 3575 struct sbuf *buf; 3576 enum i40e_status_code status; 3577 int error = 0; 3578 3579 u8 num_entries; 3580 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; 3581 3582 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3583 if (!buf) { 3584 device_printf(dev, "Could not allocate sbuf for output.\n"); 3585 return (ENOMEM); 3586 } 3587 3588 bzero(resp, sizeof(resp)); 3589 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, 3590 resp, 3591 IXL_SW_RES_SIZE, 3592 NULL); 3593 if (status) { 3594 device_printf(dev, 3595 "%s: get_switch_resource_alloc() error %s, aq error %s\n", 3596 __func__, i40e_stat_str(hw, status), 3597 i40e_aq_str(hw, hw->aq.asq_last_status)); 3598 sbuf_delete(buf); 3599 return (error); 3600 } 3601 3602 /* Sort entries by type for display */ 3603 qsort(resp, num_entries, 3604 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), 3605 &ixl_res_alloc_cmp); 3606 3607 sbuf_cat(buf, "\n"); 3608 sbuf_printf(buf, "# of entries: %d\n", num_entries); 3609 sbuf_printf(buf, 3610 " Type | Guaranteed | Total | Used | Un-allocated\n" 3611 " | (this) | (all) | (this) | (all) \n"); 3612 for (int i = 0; i < num_entries; i++) { 3613 sbuf_printf(buf, 3614 "%25s | %10d %5d %6d %12d", 3615 ixl_switch_res_type_string(resp[i].resource_type), 3616 resp[i].guaranteed, 3617 resp[i].total, 3618 resp[i].used, 3619 resp[i].total_unalloced); 3620 if (i < num_entries - 1) 3621 sbuf_cat(buf, "\n"); 3622 } 3623 3624 error = sbuf_finish(buf); 3625 if (error) 3626 device_printf(dev, "Error finishing sbuf: %d\n", error); 3627 3628 sbuf_delete(buf); 3629 return (error); 3630 } 3631 3632 enum ixl_sw_seid_offset { 3633 IXL_SW_SEID_EMP = 1, 3634 IXL_SW_SEID_MAC_START = 2, 3635 IXL_SW_SEID_MAC_END = 5, 3636 IXL_SW_SEID_PF_START = 16, 3637 IXL_SW_SEID_PF_END = 31, 3638 IXL_SW_SEID_VF_START = 32, 3639 IXL_SW_SEID_VF_END = 159, 3640 }; 3641 3642 /* 3643 * Caller must init and delete sbuf; this function will clear and 3644 * finish it for caller. 3645 * 3646 * Note: The SEID argument only applies for elements defined by FW at 3647 * power-on; these include the EMP, Ports, PFs and VFs. 3648 */ 3649 static char * 3650 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid) 3651 { 3652 sbuf_clear(s); 3653 3654 /* If SEID is in certain ranges, then we can infer the 3655 * mapping of SEID to switch element. 3656 */ 3657 if (seid == IXL_SW_SEID_EMP) { 3658 sbuf_cat(s, "EMP"); 3659 goto out; 3660 } else if (seid >= IXL_SW_SEID_MAC_START && 3661 seid <= IXL_SW_SEID_MAC_END) { 3662 sbuf_printf(s, "MAC %2d", 3663 seid - IXL_SW_SEID_MAC_START); 3664 goto out; 3665 } else if (seid >= IXL_SW_SEID_PF_START && 3666 seid <= IXL_SW_SEID_PF_END) { 3667 sbuf_printf(s, "PF %3d", 3668 seid - IXL_SW_SEID_PF_START); 3669 goto out; 3670 } else if (seid >= IXL_SW_SEID_VF_START && 3671 seid <= IXL_SW_SEID_VF_END) { 3672 sbuf_printf(s, "VF %3d", 3673 seid - IXL_SW_SEID_VF_START); 3674 goto out; 3675 } 3676 3677 switch (element_type) { 3678 case I40E_AQ_SW_ELEM_TYPE_BMC: 3679 sbuf_cat(s, "BMC"); 3680 break; 3681 case I40E_AQ_SW_ELEM_TYPE_PV: 3682 sbuf_cat(s, "PV"); 3683 break; 3684 case I40E_AQ_SW_ELEM_TYPE_VEB: 3685 sbuf_cat(s, "VEB"); 3686 break; 3687 case I40E_AQ_SW_ELEM_TYPE_PA: 3688 sbuf_cat(s, "PA"); 3689 break; 3690 case I40E_AQ_SW_ELEM_TYPE_VSI: 3691 sbuf_printf(s, "VSI"); 3692 break; 3693 default: 3694 sbuf_cat(s, "?"); 3695 break; 3696 } 3697 3698 out: 3699 sbuf_finish(s); 3700 return sbuf_data(s); 3701 } 3702 3703 static int 3704 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b) 3705 { 3706 const struct i40e_aqc_switch_config_element_resp *one, *two; 3707 one = (const struct i40e_aqc_switch_config_element_resp *)a; 3708 two = (const struct i40e_aqc_switch_config_element_resp *)b; 3709 3710 return ((int)one->seid - (int)two->seid); 3711 } 3712 3713 static int 3714 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) 3715 { 3716 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3717 struct i40e_hw *hw = &pf->hw; 3718 device_t dev = pf->dev; 3719 struct sbuf *buf; 3720 struct sbuf *nmbuf; 3721 enum i40e_status_code status; 3722 int error = 0; 3723 u16 next = 0; 3724 u8 aq_buf[I40E_AQ_LARGE_BUF]; 3725 3726 struct i40e_aqc_switch_config_element_resp *elem; 3727 struct i40e_aqc_get_switch_config_resp *sw_config; 3728 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 3729 3730 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3731 if (!buf) { 3732 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3733 return (ENOMEM); 3734 } 3735 3736 status = i40e_aq_get_switch_config(hw, sw_config, 3737 sizeof(aq_buf), &next, NULL); 3738 if (status) { 3739 device_printf(dev, 3740 "%s: aq_get_switch_config() error %s, aq error %s\n", 3741 __func__, i40e_stat_str(hw, status), 3742 i40e_aq_str(hw, hw->aq.asq_last_status)); 3743 sbuf_delete(buf); 3744 return error; 3745 } 3746 if (next) 3747 device_printf(dev, "%s: TODO: get more config with SEID %d\n", 3748 __func__, next); 3749 3750 nmbuf = sbuf_new_auto(); 3751 if (!nmbuf) { 3752 device_printf(dev, "Could not allocate sbuf for name output.\n"); 3753 sbuf_delete(buf); 3754 return (ENOMEM); 3755 } 3756 3757 /* Sort entries by SEID for display */ 3758 qsort(sw_config->element, sw_config->header.num_reported, 3759 sizeof(struct i40e_aqc_switch_config_element_resp), 3760 &ixl_sw_cfg_elem_seid_cmp); 3761 3762 sbuf_cat(buf, "\n"); 3763 /* Assuming <= 255 elements in switch */ 3764 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); 3765 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); 3766 /* Exclude: 3767 * Revision -- all elements are revision 1 for now 3768 */ 3769 sbuf_printf(buf, 3770 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n" 3771 " | | | (uplink)\n"); 3772 for (int i = 0; i < sw_config->header.num_reported; i++) { 3773 elem = &sw_config->element[i]; 3774 3775 // "%4d (%8s) | %8s %8s %#8x", 3776 sbuf_printf(buf, "%4d", elem->seid); 3777 sbuf_cat(buf, " "); 3778 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3779 elem->element_type, elem->seid)); 3780 sbuf_cat(buf, " | "); 3781 sbuf_printf(buf, "%4d", elem->uplink_seid); 3782 sbuf_cat(buf, " "); 3783 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3784 0, elem->uplink_seid)); 3785 sbuf_cat(buf, " | "); 3786 sbuf_printf(buf, "%4d", elem->downlink_seid); 3787 sbuf_cat(buf, " "); 3788 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3789 0, elem->downlink_seid)); 3790 sbuf_cat(buf, " | "); 3791 sbuf_printf(buf, "%8d", elem->connection_type); 3792 if (i < sw_config->header.num_reported - 1) 3793 sbuf_cat(buf, "\n"); 3794 } 3795 sbuf_delete(nmbuf); 3796 3797 error = sbuf_finish(buf); 3798 if (error) 3799 device_printf(dev, "Error finishing sbuf: %d\n", error); 3800 3801 sbuf_delete(buf); 3802 3803 return (error); 3804 } 3805 3806 static int 3807 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS) 3808 { 3809 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3810 struct i40e_hw *hw = &pf->hw; 3811 device_t dev = pf->dev; 3812 int requested_vlan = -1; 3813 enum i40e_status_code status = 0; 3814 int error = 0; 3815 3816 error = sysctl_handle_int(oidp, &requested_vlan, 0, req); 3817 if ((error) || (req->newptr == NULL)) 3818 return (error); 3819 3820 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) { 3821 device_printf(dev, "Flags disallow setting of vlans\n"); 3822 return (ENODEV); 3823 } 3824 3825 hw->switch_tag = requested_vlan; 3826 device_printf(dev, 3827 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n", 3828 hw->switch_tag, hw->first_tag, hw->second_tag); 3829 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL); 3830 if (status) { 3831 device_printf(dev, 3832 "%s: aq_set_switch_config() error %s, aq error %s\n", 3833 __func__, i40e_stat_str(hw, status), 3834 i40e_aq_str(hw, hw->aq.asq_last_status)); 3835 return (status); 3836 } 3837 return (0); 3838 } 3839 3840 static int 3841 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) 3842 { 3843 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3844 struct i40e_hw *hw = &pf->hw; 3845 device_t dev = pf->dev; 3846 struct sbuf *buf; 3847 int error = 0; 3848 enum i40e_status_code status; 3849 u32 reg; 3850 3851 struct i40e_aqc_get_set_rss_key_data key_data; 3852 3853 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3854 if (!buf) { 3855 device_printf(dev, "Could not allocate sbuf for output.\n"); 3856 return (ENOMEM); 3857 } 3858 3859 bzero(&key_data, sizeof(key_data)); 3860 3861 sbuf_cat(buf, "\n"); 3862 if (hw->mac.type == I40E_MAC_X722) { 3863 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); 3864 if (status) 3865 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", 3866 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3867 } else { 3868 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 3869 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 3870 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); 3871 } 3872 } 3873 3874 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); 3875 3876 error = sbuf_finish(buf); 3877 if (error) 3878 device_printf(dev, "Error finishing sbuf: %d\n", error); 3879 sbuf_delete(buf); 3880 3881 return (error); 3882 } 3883 3884 static void 3885 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) 3886 { 3887 int i, j, k, width; 3888 char c; 3889 3890 if (length < 1 || buf == NULL) return; 3891 3892 int byte_stride = 16; 3893 int lines = length / byte_stride; 3894 int rem = length % byte_stride; 3895 if (rem > 0) 3896 lines++; 3897 3898 for (i = 0; i < lines; i++) { 3899 width = (rem > 0 && i == lines - 1) 3900 ? rem : byte_stride; 3901 3902 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); 3903 3904 for (j = 0; j < width; j++) 3905 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); 3906 3907 if (width < byte_stride) { 3908 for (k = 0; k < (byte_stride - width); k++) 3909 sbuf_printf(sb, " "); 3910 } 3911 3912 if (!text) { 3913 sbuf_printf(sb, "\n"); 3914 continue; 3915 } 3916 3917 for (j = 0; j < width; j++) { 3918 c = (char)buf[i * byte_stride + j]; 3919 if (c < 32 || c > 126) 3920 sbuf_printf(sb, "."); 3921 else 3922 sbuf_printf(sb, "%c", c); 3923 3924 if (j == width - 1) 3925 sbuf_printf(sb, "\n"); 3926 } 3927 } 3928 } 3929 3930 static int 3931 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) 3932 { 3933 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3934 struct i40e_hw *hw = &pf->hw; 3935 device_t dev = pf->dev; 3936 struct sbuf *buf; 3937 int error = 0; 3938 enum i40e_status_code status; 3939 u8 hlut[512]; 3940 u32 reg; 3941 3942 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3943 if (!buf) { 3944 device_printf(dev, "Could not allocate sbuf for output.\n"); 3945 return (ENOMEM); 3946 } 3947 3948 bzero(hlut, sizeof(hlut)); 3949 sbuf_cat(buf, "\n"); 3950 if (hw->mac.type == I40E_MAC_X722) { 3951 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); 3952 if (status) 3953 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", 3954 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3955 } else { 3956 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { 3957 reg = rd32(hw, I40E_PFQF_HLUT(i)); 3958 bcopy(®, &hlut[i << 2], 4); 3959 } 3960 } 3961 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); 3962 3963 error = sbuf_finish(buf); 3964 if (error) 3965 device_printf(dev, "Error finishing sbuf: %d\n", error); 3966 sbuf_delete(buf); 3967 3968 return (error); 3969 } 3970 3971 static int 3972 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) 3973 { 3974 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3975 struct i40e_hw *hw = &pf->hw; 3976 u64 hena; 3977 3978 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 3979 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 3980 3981 return sysctl_handle_long(oidp, NULL, hena, req); 3982 } 3983 3984 /* 3985 * Sysctl to disable firmware's link management 3986 * 3987 * 1 - Disable link management on this port 3988 * 0 - Re-enable link management 3989 * 3990 * On normal NVMs, firmware manages link by default. 3991 */ 3992 static int 3993 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) 3994 { 3995 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3996 struct i40e_hw *hw = &pf->hw; 3997 device_t dev = pf->dev; 3998 int requested_mode = -1; 3999 enum i40e_status_code status = 0; 4000 int error = 0; 4001 4002 /* Read in new mode */ 4003 error = sysctl_handle_int(oidp, &requested_mode, 0, req); 4004 if ((error) || (req->newptr == NULL)) 4005 return (error); 4006 /* Check for sane value */ 4007 if (requested_mode < 0 || requested_mode > 1) { 4008 device_printf(dev, "Valid modes are 0 or 1\n"); 4009 return (EINVAL); 4010 } 4011 4012 /* Set new mode */ 4013 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); 4014 if (status) { 4015 device_printf(dev, 4016 "%s: Error setting new phy debug mode %s," 4017 " aq error: %s\n", __func__, i40e_stat_str(hw, status), 4018 i40e_aq_str(hw, hw->aq.asq_last_status)); 4019 return (EIO); 4020 } 4021 4022 return (0); 4023 } 4024 4025 /* 4026 * Read some diagnostic data from a (Q)SFP+ module 4027 * 4028 * SFP A2 QSFP Lower Page 4029 * Temperature 96-97 22-23 4030 * Vcc 98-99 26-27 4031 * TX power 102-103 34-35..40-41 4032 * RX power 104-105 50-51..56-57 4033 */ 4034 static int 4035 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 4036 { 4037 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4038 device_t dev = pf->dev; 4039 struct sbuf *sbuf; 4040 int error = 0; 4041 u8 output; 4042 4043 if (req->oldptr == NULL) { 4044 error = SYSCTL_OUT(req, 0, 128); 4045 return (0); 4046 } 4047 4048 error = pf->read_i2c_byte(pf, 0, 0xA0, &output); 4049 if (error) { 4050 device_printf(dev, "Error reading from i2c\n"); 4051 return (error); 4052 } 4053 4054 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 4055 if (output == 0x3) { 4056 /* 4057 * Check for: 4058 * - Internally calibrated data 4059 * - Diagnostic monitoring is implemented 4060 */ 4061 pf->read_i2c_byte(pf, 92, 0xA0, &output); 4062 if (!(output & 0x60)) { 4063 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); 4064 return (0); 4065 } 4066 4067 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4068 4069 for (u8 offset = 96; offset < 100; offset++) { 4070 pf->read_i2c_byte(pf, offset, 0xA2, &output); 4071 sbuf_printf(sbuf, "%02X ", output); 4072 } 4073 for (u8 offset = 102; offset < 106; offset++) { 4074 pf->read_i2c_byte(pf, offset, 0xA2, &output); 4075 sbuf_printf(sbuf, "%02X ", output); 4076 } 4077 } else if (output == 0xD || output == 0x11) { 4078 /* 4079 * QSFP+ modules are always internally calibrated, and must indicate 4080 * what types of diagnostic monitoring are implemented 4081 */ 4082 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4083 4084 for (u8 offset = 22; offset < 24; offset++) { 4085 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4086 sbuf_printf(sbuf, "%02X ", output); 4087 } 4088 for (u8 offset = 26; offset < 28; offset++) { 4089 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4090 sbuf_printf(sbuf, "%02X ", output); 4091 } 4092 /* Read the data from the first lane */ 4093 for (u8 offset = 34; offset < 36; offset++) { 4094 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4095 sbuf_printf(sbuf, "%02X ", output); 4096 } 4097 for (u8 offset = 50; offset < 52; offset++) { 4098 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4099 sbuf_printf(sbuf, "%02X ", output); 4100 } 4101 } else { 4102 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output); 4103 return (0); 4104 } 4105 4106 sbuf_finish(sbuf); 4107 sbuf_delete(sbuf); 4108 4109 return (0); 4110 } 4111 4112 /* 4113 * Sysctl to read a byte from I2C bus. 4114 * 4115 * Input: 32-bit value: 4116 * bits 0-7: device address (0xA0 or 0xA2) 4117 * bits 8-15: offset (0-255) 4118 * bits 16-31: unused 4119 * Output: 8-bit value read 4120 */ 4121 static int 4122 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) 4123 { 4124 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4125 device_t dev = pf->dev; 4126 int input = -1, error = 0; 4127 u8 dev_addr, offset, output; 4128 4129 /* Read in I2C read parameters */ 4130 error = sysctl_handle_int(oidp, &input, 0, req); 4131 if ((error) || (req->newptr == NULL)) 4132 return (error); 4133 /* Validate device address */ 4134 dev_addr = input & 0xFF; 4135 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4136 return (EINVAL); 4137 } 4138 offset = (input >> 8) & 0xFF; 4139 4140 error = pf->read_i2c_byte(pf, offset, dev_addr, &output); 4141 if (error) 4142 return (error); 4143 4144 device_printf(dev, "%02X\n", output); 4145 return (0); 4146 } 4147 4148 /* 4149 * Sysctl to write a byte to the I2C bus. 4150 * 4151 * Input: 32-bit value: 4152 * bits 0-7: device address (0xA0 or 0xA2) 4153 * bits 8-15: offset (0-255) 4154 * bits 16-23: value to write 4155 * bits 24-31: unused 4156 * Output: 8-bit value written 4157 */ 4158 static int 4159 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) 4160 { 4161 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4162 device_t dev = pf->dev; 4163 int input = -1, error = 0; 4164 u8 dev_addr, offset, value; 4165 4166 /* Read in I2C write parameters */ 4167 error = sysctl_handle_int(oidp, &input, 0, req); 4168 if ((error) || (req->newptr == NULL)) 4169 return (error); 4170 /* Validate device address */ 4171 dev_addr = input & 0xFF; 4172 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4173 return (EINVAL); 4174 } 4175 offset = (input >> 8) & 0xFF; 4176 value = (input >> 16) & 0xFF; 4177 4178 error = pf->write_i2c_byte(pf, offset, dev_addr, value); 4179 if (error) 4180 return (error); 4181 4182 device_printf(dev, "%02X written\n", value); 4183 return (0); 4184 } 4185 4186 static int 4187 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4188 u8 bit_pos, int *is_set) 4189 { 4190 device_t dev = pf->dev; 4191 struct i40e_hw *hw = &pf->hw; 4192 enum i40e_status_code status; 4193 4194 if (IXL_PF_IN_RECOVERY_MODE(pf)) 4195 return (EIO); 4196 4197 status = i40e_aq_get_phy_capabilities(hw, 4198 FALSE, FALSE, abilities, NULL); 4199 if (status) { 4200 device_printf(dev, 4201 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 4202 __func__, i40e_stat_str(hw, status), 4203 i40e_aq_str(hw, hw->aq.asq_last_status)); 4204 return (EIO); 4205 } 4206 4207 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); 4208 return (0); 4209 } 4210 4211 static int 4212 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4213 u8 bit_pos, int set) 4214 { 4215 device_t dev = pf->dev; 4216 struct i40e_hw *hw = &pf->hw; 4217 struct i40e_aq_set_phy_config config; 4218 enum i40e_status_code status; 4219 4220 /* Set new PHY config */ 4221 memset(&config, 0, sizeof(config)); 4222 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); 4223 if (set) 4224 config.fec_config |= bit_pos; 4225 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { 4226 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 4227 config.phy_type = abilities->phy_type; 4228 config.phy_type_ext = abilities->phy_type_ext; 4229 config.link_speed = abilities->link_speed; 4230 config.eee_capability = abilities->eee_capability; 4231 config.eeer = abilities->eeer_val; 4232 config.low_power_ctrl = abilities->d3_lpan; 4233 status = i40e_aq_set_phy_config(hw, &config, NULL); 4234 4235 if (status) { 4236 device_printf(dev, 4237 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", 4238 __func__, i40e_stat_str(hw, status), 4239 i40e_aq_str(hw, hw->aq.asq_last_status)); 4240 return (EIO); 4241 } 4242 } 4243 4244 return (0); 4245 } 4246 4247 static int 4248 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) 4249 { 4250 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4251 int mode, error = 0; 4252 4253 struct i40e_aq_get_phy_abilities_resp abilities; 4254 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); 4255 if (error) 4256 return (error); 4257 /* Read in new mode */ 4258 error = sysctl_handle_int(oidp, &mode, 0, req); 4259 if ((error) || (req->newptr == NULL)) 4260 return (error); 4261 4262 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); 4263 } 4264 4265 static int 4266 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) 4267 { 4268 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4269 int mode, error = 0; 4270 4271 struct i40e_aq_get_phy_abilities_resp abilities; 4272 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); 4273 if (error) 4274 return (error); 4275 /* Read in new mode */ 4276 error = sysctl_handle_int(oidp, &mode, 0, req); 4277 if ((error) || (req->newptr == NULL)) 4278 return (error); 4279 4280 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); 4281 } 4282 4283 static int 4284 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) 4285 { 4286 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4287 int mode, error = 0; 4288 4289 struct i40e_aq_get_phy_abilities_resp abilities; 4290 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); 4291 if (error) 4292 return (error); 4293 /* Read in new mode */ 4294 error = sysctl_handle_int(oidp, &mode, 0, req); 4295 if ((error) || (req->newptr == NULL)) 4296 return (error); 4297 4298 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); 4299 } 4300 4301 static int 4302 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) 4303 { 4304 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4305 int mode, error = 0; 4306 4307 struct i40e_aq_get_phy_abilities_resp abilities; 4308 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); 4309 if (error) 4310 return (error); 4311 /* Read in new mode */ 4312 error = sysctl_handle_int(oidp, &mode, 0, req); 4313 if ((error) || (req->newptr == NULL)) 4314 return (error); 4315 4316 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); 4317 } 4318 4319 static int 4320 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) 4321 { 4322 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4323 int mode, error = 0; 4324 4325 struct i40e_aq_get_phy_abilities_resp abilities; 4326 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); 4327 if (error) 4328 return (error); 4329 /* Read in new mode */ 4330 error = sysctl_handle_int(oidp, &mode, 0, req); 4331 if ((error) || (req->newptr == NULL)) 4332 return (error); 4333 4334 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); 4335 } 4336 4337 static int 4338 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) 4339 { 4340 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4341 struct i40e_hw *hw = &pf->hw; 4342 device_t dev = pf->dev; 4343 struct sbuf *buf; 4344 int error = 0; 4345 enum i40e_status_code status; 4346 4347 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4348 if (!buf) { 4349 device_printf(dev, "Could not allocate sbuf for output.\n"); 4350 return (ENOMEM); 4351 } 4352 4353 u8 *final_buff; 4354 /* This amount is only necessary if reading the entire cluster into memory */ 4355 #define IXL_FINAL_BUFF_SIZE (1280 * 1024) 4356 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT); 4357 if (final_buff == NULL) { 4358 device_printf(dev, "Could not allocate memory for output.\n"); 4359 goto out; 4360 } 4361 int final_buff_len = 0; 4362 4363 u8 cluster_id = 1; 4364 bool more = true; 4365 4366 u8 dump_buf[4096]; 4367 u16 curr_buff_size = 4096; 4368 u8 curr_next_table = 0; 4369 u32 curr_next_index = 0; 4370 4371 u16 ret_buff_size; 4372 u8 ret_next_table; 4373 u32 ret_next_index; 4374 4375 sbuf_cat(buf, "\n"); 4376 4377 while (more) { 4378 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, 4379 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); 4380 if (status) { 4381 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", 4382 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 4383 goto free_out; 4384 } 4385 4386 /* copy info out of temp buffer */ 4387 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); 4388 final_buff_len += ret_buff_size; 4389 4390 if (ret_next_table != curr_next_table) { 4391 /* We're done with the current table; we can dump out read data. */ 4392 sbuf_printf(buf, "%d:", curr_next_table); 4393 int bytes_printed = 0; 4394 while (bytes_printed <= final_buff_len) { 4395 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); 4396 bytes_printed += 16; 4397 } 4398 sbuf_cat(buf, "\n"); 4399 4400 /* The entire cluster has been read; we're finished */ 4401 if (ret_next_table == 0xFF) 4402 break; 4403 4404 /* Otherwise clear the output buffer and continue reading */ 4405 bzero(final_buff, IXL_FINAL_BUFF_SIZE); 4406 final_buff_len = 0; 4407 } 4408 4409 if (ret_next_index == 0xFFFFFFFF) 4410 ret_next_index = 0; 4411 4412 bzero(dump_buf, sizeof(dump_buf)); 4413 curr_next_table = ret_next_table; 4414 curr_next_index = ret_next_index; 4415 } 4416 4417 free_out: 4418 free(final_buff, M_IXL); 4419 out: 4420 error = sbuf_finish(buf); 4421 if (error) 4422 device_printf(dev, "Error finishing sbuf: %d\n", error); 4423 sbuf_delete(buf); 4424 4425 return (error); 4426 } 4427 4428 static int 4429 ixl_start_fw_lldp(struct ixl_pf *pf) 4430 { 4431 struct i40e_hw *hw = &pf->hw; 4432 enum i40e_status_code status; 4433 4434 status = i40e_aq_start_lldp(hw, false, NULL); 4435 if (status != I40E_SUCCESS) { 4436 switch (hw->aq.asq_last_status) { 4437 case I40E_AQ_RC_EEXIST: 4438 device_printf(pf->dev, 4439 "FW LLDP agent is already running\n"); 4440 break; 4441 case I40E_AQ_RC_EPERM: 4442 device_printf(pf->dev, 4443 "Device configuration forbids SW from starting " 4444 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII " 4445 "attribute to \"Enabled\" to use this sysctl\n"); 4446 return (EINVAL); 4447 default: 4448 device_printf(pf->dev, 4449 "Starting FW LLDP agent failed: error: %s, %s\n", 4450 i40e_stat_str(hw, status), 4451 i40e_aq_str(hw, hw->aq.asq_last_status)); 4452 return (EINVAL); 4453 } 4454 } 4455 4456 atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 4457 return (0); 4458 } 4459 4460 static int 4461 ixl_stop_fw_lldp(struct ixl_pf *pf) 4462 { 4463 struct i40e_hw *hw = &pf->hw; 4464 device_t dev = pf->dev; 4465 enum i40e_status_code status; 4466 4467 if (hw->func_caps.npar_enable != 0) { 4468 device_printf(dev, 4469 "Disabling FW LLDP agent is not supported on this device\n"); 4470 return (EINVAL); 4471 } 4472 4473 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) { 4474 device_printf(dev, 4475 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); 4476 return (EINVAL); 4477 } 4478 4479 status = i40e_aq_stop_lldp(hw, true, false, NULL); 4480 if (status != I40E_SUCCESS) { 4481 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) { 4482 device_printf(dev, 4483 "Disabling FW LLDP agent failed: error: %s, %s\n", 4484 i40e_stat_str(hw, status), 4485 i40e_aq_str(hw, hw->aq.asq_last_status)); 4486 return (EINVAL); 4487 } 4488 4489 device_printf(dev, "FW LLDP agent is already stopped\n"); 4490 } 4491 4492 i40e_aq_set_dcb_parameters(hw, true, NULL); 4493 atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 4494 return (0); 4495 } 4496 4497 static int 4498 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) 4499 { 4500 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4501 int state, new_state, error = 0; 4502 4503 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0); 4504 4505 /* Read in new mode */ 4506 error = sysctl_handle_int(oidp, &new_state, 0, req); 4507 if ((error) || (req->newptr == NULL)) 4508 return (error); 4509 4510 /* Already in requested state */ 4511 if (new_state == state) 4512 return (error); 4513 4514 if (new_state == 0) 4515 return ixl_stop_fw_lldp(pf); 4516 4517 return ixl_start_fw_lldp(pf); 4518 } 4519 4520 static int 4521 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 4522 { 4523 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4524 int state, new_state; 4525 int sysctl_handle_status = 0; 4526 enum i40e_status_code cmd_status; 4527 4528 /* Init states' values */ 4529 state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED)); 4530 4531 /* Get requested mode */ 4532 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req); 4533 if ((sysctl_handle_status) || (req->newptr == NULL)) 4534 return (sysctl_handle_status); 4535 4536 /* Check if state has changed */ 4537 if (new_state == state) 4538 return (0); 4539 4540 /* Set new state */ 4541 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state)); 4542 4543 /* Save new state or report error */ 4544 if (!cmd_status) { 4545 if (new_state == 0) 4546 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 4547 else 4548 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 4549 } else if (cmd_status == I40E_ERR_CONFIG) 4550 return (EPERM); 4551 else 4552 return (EIO); 4553 4554 return (0); 4555 } 4556 4557 static int 4558 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS) 4559 { 4560 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4561 int error, state; 4562 4563 state = !!(atomic_load_acq_32(&pf->state) & 4564 IXL_PF_STATE_LINK_ACTIVE_ON_DOWN); 4565 4566 error = sysctl_handle_int(oidp, &state, 0, req); 4567 if ((error) || (req->newptr == NULL)) 4568 return (error); 4569 4570 if (state == 0) 4571 atomic_clear_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN); 4572 else 4573 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN); 4574 4575 return (0); 4576 } 4577 4578 4579 int 4580 ixl_attach_get_link_status(struct ixl_pf *pf) 4581 { 4582 struct i40e_hw *hw = &pf->hw; 4583 device_t dev = pf->dev; 4584 int error = 0; 4585 4586 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 4587 (hw->aq.fw_maj_ver < 4)) { 4588 i40e_msec_delay(75); 4589 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); 4590 if (error) { 4591 device_printf(dev, "link restart failed, aq_err=%d\n", 4592 pf->hw.aq.asq_last_status); 4593 return error; 4594 } 4595 } 4596 4597 /* Determine link state */ 4598 hw->phy.get_link_info = TRUE; 4599 i40e_get_link_status(hw, &pf->link_up); 4600 4601 /* Flow Control mode not set by user, read current FW settings */ 4602 if (pf->fc == -1) 4603 pf->fc = hw->fc.current_mode; 4604 4605 return (0); 4606 } 4607 4608 static int 4609 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS) 4610 { 4611 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4612 int requested = 0, error = 0; 4613 4614 /* Read in new mode */ 4615 error = sysctl_handle_int(oidp, &requested, 0, req); 4616 if ((error) || (req->newptr == NULL)) 4617 return (error); 4618 4619 /* Initiate the PF reset later in the admin task */ 4620 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ); 4621 4622 return (error); 4623 } 4624 4625 static int 4626 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS) 4627 { 4628 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4629 struct i40e_hw *hw = &pf->hw; 4630 int requested = 0, error = 0; 4631 4632 /* Read in new mode */ 4633 error = sysctl_handle_int(oidp, &requested, 0, req); 4634 if ((error) || (req->newptr == NULL)) 4635 return (error); 4636 4637 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 4638 4639 return (error); 4640 } 4641 4642 static int 4643 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS) 4644 { 4645 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4646 struct i40e_hw *hw = &pf->hw; 4647 int requested = 0, error = 0; 4648 4649 /* Read in new mode */ 4650 error = sysctl_handle_int(oidp, &requested, 0, req); 4651 if ((error) || (req->newptr == NULL)) 4652 return (error); 4653 4654 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK); 4655 4656 return (error); 4657 } 4658 4659 /* 4660 * Print out mapping of TX queue indexes and Rx queue indexes 4661 * to MSI-X vectors. 4662 */ 4663 static int 4664 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) 4665 { 4666 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4667 struct ixl_vsi *vsi = &pf->vsi; 4668 device_t dev = pf->dev; 4669 struct sbuf *buf; 4670 int error = 0; 4671 4672 struct ixl_rx_queue *rx_que = vsi->rx_queues; 4673 struct ixl_tx_queue *tx_que = vsi->tx_queues; 4674 4675 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4676 if (!buf) { 4677 device_printf(dev, "Could not allocate sbuf for output.\n"); 4678 return (ENOMEM); 4679 } 4680 4681 sbuf_cat(buf, "\n"); 4682 for (int i = 0; i < vsi->num_rx_queues; i++) { 4683 rx_que = &vsi->rx_queues[i]; 4684 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); 4685 } 4686 for (int i = 0; i < vsi->num_tx_queues; i++) { 4687 tx_que = &vsi->tx_queues[i]; 4688 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); 4689 } 4690 4691 error = sbuf_finish(buf); 4692 if (error) 4693 device_printf(dev, "Error finishing sbuf: %d\n", error); 4694 sbuf_delete(buf); 4695 4696 return (error); 4697 } 4698