1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "ixl_pf.h" 37 38 #ifdef PCI_IOV 39 #include "ixl_pf_iov.h" 40 #endif 41 42 #ifdef IXL_IW 43 #include "ixl_iw.h" 44 #include "ixl_iw_int.h" 45 #endif 46 47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); 48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); 49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed); 50 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int); 51 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int); 52 static char * ixl_switch_element_string(struct sbuf *, u8, u16); 53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *); 54 55 /* Sysctls */ 56 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); 57 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); 58 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 59 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 60 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); 61 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); 62 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); 63 64 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 65 static int ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS); 66 67 /* Debug Sysctls */ 68 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); 69 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); 70 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); 71 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); 72 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); 73 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS); 74 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); 75 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); 76 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); 77 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); 78 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); 79 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); 80 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); 81 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); 82 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); 83 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); 84 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); 85 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); 86 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); 87 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 88 89 /* Debug Sysctls */ 90 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS); 91 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS); 92 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS); 93 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); 94 #ifdef IXL_DEBUG 95 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); 96 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); 97 #endif 98 99 #ifdef IXL_IW 100 extern int ixl_enable_iwarp; 101 extern int ixl_limit_iwarp_msix; 102 #endif 103 104 static const char * const ixl_fc_string[6] = { 105 "None", 106 "Rx", 107 "Tx", 108 "Full", 109 "Priority", 110 "Default" 111 }; 112 113 static char *ixl_fec_string[3] = { 114 "CL108 RS-FEC", 115 "CL74 FC-FEC/BASE-R", 116 "None" 117 }; 118 119 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); 120 121 /* 122 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string 123 */ 124 void 125 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) 126 { 127 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); 128 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); 129 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); 130 131 sbuf_printf(buf, 132 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", 133 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 134 hw->aq.api_maj_ver, hw->aq.api_min_ver, 135 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> 136 IXL_NVM_VERSION_HI_SHIFT, 137 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> 138 IXL_NVM_VERSION_LO_SHIFT, 139 hw->nvm.eetrack, 140 oem_ver, oem_build, oem_patch); 141 } 142 143 void 144 ixl_print_nvm_version(struct ixl_pf *pf) 145 { 146 struct i40e_hw *hw = &pf->hw; 147 device_t dev = pf->dev; 148 struct sbuf *sbuf; 149 150 sbuf = sbuf_new_auto(); 151 ixl_nvm_version_str(hw, sbuf); 152 sbuf_finish(sbuf); 153 device_printf(dev, "%s\n", sbuf_data(sbuf)); 154 sbuf_delete(sbuf); 155 } 156 157 /** 158 * ixl_get_fw_mode - Check the state of FW 159 * @hw: device hardware structure 160 * 161 * Identify state of FW. It might be in a recovery mode 162 * which limits functionality and requires special handling 163 * from the driver. 164 * 165 * @returns FW mode (normal, recovery, unexpected EMP reset) 166 */ 167 static enum ixl_fw_mode 168 ixl_get_fw_mode(struct ixl_pf *pf) 169 { 170 struct i40e_hw *hw = &pf->hw; 171 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL; 172 u32 fwsts; 173 174 #ifdef IXL_DEBUG 175 if (pf->recovery_mode) 176 return IXL_FW_MODE_RECOVERY; 177 #endif 178 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; 179 180 /* Is set and has one of expected values */ 181 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK && 182 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) || 183 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || 184 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK) 185 fw_mode = IXL_FW_MODE_RECOVERY; 186 else { 187 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 && 188 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10) 189 fw_mode = IXL_FW_MODE_UEMPR; 190 } 191 return (fw_mode); 192 } 193 194 /** 195 * ixl_pf_reset - Reset the PF 196 * @pf: PF structure 197 * 198 * Ensure that FW is in the right state and do the reset 199 * if needed. 200 * 201 * @returns zero on success, or an error code on failure. 202 */ 203 int 204 ixl_pf_reset(struct ixl_pf *pf) 205 { 206 struct i40e_hw *hw = &pf->hw; 207 enum i40e_status_code status; 208 enum ixl_fw_mode fw_mode; 209 210 fw_mode = ixl_get_fw_mode(pf); 211 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode); 212 if (fw_mode == IXL_FW_MODE_RECOVERY) { 213 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 214 /* Don't try to reset device if it's in recovery mode */ 215 return (0); 216 } 217 218 status = i40e_pf_reset(hw); 219 if (status == I40E_SUCCESS) 220 return (0); 221 222 /* Check FW mode again in case it has changed while 223 * waiting for reset to complete */ 224 fw_mode = ixl_get_fw_mode(pf); 225 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode); 226 if (fw_mode == IXL_FW_MODE_RECOVERY) { 227 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 228 return (0); 229 } 230 231 if (fw_mode == IXL_FW_MODE_UEMPR) 232 device_printf(pf->dev, 233 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); 234 else 235 device_printf(pf->dev, "PF reset failure %s\n", 236 i40e_stat_str(hw, status)); 237 return (EIO); 238 } 239 240 /** 241 * ixl_setup_hmc - Setup LAN Host Memory Cache 242 * @pf: PF structure 243 * 244 * Init and configure LAN Host Memory Cache 245 * 246 * @returns 0 on success, EIO on error 247 */ 248 int 249 ixl_setup_hmc(struct ixl_pf *pf) 250 { 251 struct i40e_hw *hw = &pf->hw; 252 enum i40e_status_code status; 253 254 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 255 hw->func_caps.num_rx_qp, 0, 0); 256 if (status) { 257 device_printf(pf->dev, "init_lan_hmc failed: %s\n", 258 i40e_stat_str(hw, status)); 259 return (EIO); 260 } 261 262 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 263 if (status) { 264 device_printf(pf->dev, "configure_lan_hmc failed: %s\n", 265 i40e_stat_str(hw, status)); 266 return (EIO); 267 } 268 269 return (0); 270 } 271 272 /** 273 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache 274 * @pf: PF structure 275 * 276 * Shutdown Host Memory Cache if configured. 277 * 278 */ 279 void 280 ixl_shutdown_hmc(struct ixl_pf *pf) 281 { 282 struct i40e_hw *hw = &pf->hw; 283 enum i40e_status_code status; 284 285 /* HMC not configured, no need to shutdown */ 286 if (hw->hmc.hmc_obj == NULL) 287 return; 288 289 status = i40e_shutdown_lan_hmc(hw); 290 if (status) 291 device_printf(pf->dev, 292 "Shutdown LAN HMC failed with code %s\n", 293 i40e_stat_str(hw, status)); 294 } 295 /* 296 * Write PF ITR values to queue ITR registers. 297 */ 298 void 299 ixl_configure_itr(struct ixl_pf *pf) 300 { 301 ixl_configure_tx_itr(pf); 302 ixl_configure_rx_itr(pf); 303 } 304 305 /********************************************************************* 306 * 307 * Get the hardware capabilities 308 * 309 **********************************************************************/ 310 311 int 312 ixl_get_hw_capabilities(struct ixl_pf *pf) 313 { 314 struct i40e_aqc_list_capabilities_element_resp *buf; 315 struct i40e_hw *hw = &pf->hw; 316 device_t dev = pf->dev; 317 enum i40e_status_code status; 318 int len, i2c_intfc_num; 319 bool again = TRUE; 320 u16 needed; 321 322 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 323 hw->func_caps.iwarp = 0; 324 return (0); 325 } 326 327 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 328 retry: 329 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) 330 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) { 331 device_printf(dev, "Unable to allocate cap memory\n"); 332 return (ENOMEM); 333 } 334 335 /* This populates the hw struct */ 336 status = i40e_aq_discover_capabilities(hw, buf, len, 337 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 338 free(buf, M_IXL); 339 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && 340 (again == TRUE)) { 341 /* retry once with a larger buffer */ 342 again = FALSE; 343 len = needed; 344 goto retry; 345 } else if (status != I40E_SUCCESS) { 346 device_printf(dev, "capability discovery failed; status %s, error %s\n", 347 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 348 return (ENODEV); 349 } 350 351 /* 352 * Some devices have both MDIO and I2C; since this isn't reported 353 * by the FW, check registers to see if an I2C interface exists. 354 */ 355 i2c_intfc_num = ixl_find_i2c_interface(pf); 356 if (i2c_intfc_num != -1) 357 pf->has_i2c = true; 358 359 /* Determine functions to use for driver I2C accesses */ 360 switch (pf->i2c_access_method) { 361 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: { 362 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 363 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 364 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 365 } else { 366 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 367 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 368 } 369 break; 370 } 371 case IXL_I2C_ACCESS_METHOD_AQ: 372 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 373 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 374 break; 375 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD: 376 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 377 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 378 break; 379 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS: 380 pf->read_i2c_byte = ixl_read_i2c_byte_bb; 381 pf->write_i2c_byte = ixl_write_i2c_byte_bb; 382 break; 383 default: 384 /* Should not happen */ 385 device_printf(dev, "Error setting I2C access functions\n"); 386 break; 387 } 388 389 /* Keep link active by default */ 390 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN); 391 392 /* Print a subset of the capability information. */ 393 device_printf(dev, 394 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n", 395 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, 396 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, 397 (hw->func_caps.mdio_port_mode == 2) ? "I2C" : 398 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : 399 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : 400 "MDIO shared"); 401 402 return (0); 403 } 404 405 /* For the set_advertise sysctl */ 406 void 407 ixl_set_initial_advertised_speeds(struct ixl_pf *pf) 408 { 409 device_t dev = pf->dev; 410 int err; 411 412 /* Make sure to initialize the device to the complete list of 413 * supported speeds on driver load, to ensure unloading and 414 * reloading the driver will restore this value. 415 */ 416 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); 417 if (err) { 418 /* Non-fatal error */ 419 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", 420 __func__, err); 421 return; 422 } 423 424 pf->advertised_speed = 425 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 426 } 427 428 int 429 ixl_teardown_hw_structs(struct ixl_pf *pf) 430 { 431 enum i40e_status_code status = 0; 432 struct i40e_hw *hw = &pf->hw; 433 device_t dev = pf->dev; 434 435 /* Shutdown LAN HMC */ 436 if (hw->hmc.hmc_obj) { 437 status = i40e_shutdown_lan_hmc(hw); 438 if (status) { 439 device_printf(dev, 440 "init: LAN HMC shutdown failure; status %s\n", 441 i40e_stat_str(hw, status)); 442 goto err_out; 443 } 444 } 445 446 /* Shutdown admin queue */ 447 ixl_disable_intr0(hw); 448 status = i40e_shutdown_adminq(hw); 449 if (status) 450 device_printf(dev, 451 "init: Admin Queue shutdown failure; status %s\n", 452 i40e_stat_str(hw, status)); 453 454 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); 455 err_out: 456 return (status); 457 } 458 459 /* 460 ** Creates new filter with given MAC address and VLAN ID 461 */ 462 static struct ixl_mac_filter * 463 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 464 { 465 struct ixl_mac_filter *f; 466 467 /* create a new empty filter */ 468 f = malloc(sizeof(struct ixl_mac_filter), 469 M_IXL, M_NOWAIT | M_ZERO); 470 if (f) { 471 LIST_INSERT_HEAD(headp, f, ftle); 472 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); 473 f->vlan = vlan; 474 } 475 476 return (f); 477 } 478 479 /** 480 * ixl_free_filters - Free all filters in given list 481 * headp - pointer to list head 482 * 483 * Frees memory used by each entry in the list. 484 * Does not remove filters from HW. 485 */ 486 void 487 ixl_free_filters(struct ixl_ftl_head *headp) 488 { 489 struct ixl_mac_filter *f, *nf; 490 491 f = LIST_FIRST(headp); 492 while (f != NULL) { 493 nf = LIST_NEXT(f, ftle); 494 free(f, M_IXL); 495 f = nf; 496 } 497 498 LIST_INIT(headp); 499 } 500 501 static u_int 502 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 503 { 504 struct ixl_add_maddr_arg *ama = arg; 505 struct ixl_vsi *vsi = ama->vsi; 506 const u8 *macaddr = (u8*)LLADDR(sdl); 507 struct ixl_mac_filter *f; 508 509 /* Does one already exist */ 510 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 511 if (f != NULL) 512 return (0); 513 514 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY); 515 if (f == NULL) { 516 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 517 return (0); 518 } 519 f->flags |= IXL_FILTER_MC; 520 521 return (1); 522 } 523 524 /********************************************************************* 525 * Filter Routines 526 * 527 * Routines for multicast and vlan filter management. 528 * 529 *********************************************************************/ 530 void 531 ixl_add_multi(struct ixl_vsi *vsi) 532 { 533 struct ifnet *ifp = vsi->ifp; 534 struct i40e_hw *hw = vsi->hw; 535 int mcnt = 0; 536 struct ixl_add_maddr_arg cb_arg; 537 538 IOCTL_DEBUGOUT("ixl_add_multi: begin"); 539 540 mcnt = if_llmaddr_count(ifp); 541 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { 542 i40e_aq_set_vsi_multicast_promiscuous(hw, 543 vsi->seid, TRUE, NULL); 544 /* delete all existing MC filters */ 545 ixl_del_multi(vsi, true); 546 return; 547 } 548 549 cb_arg.vsi = vsi; 550 LIST_INIT(&cb_arg.to_add); 551 552 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg); 553 if (mcnt > 0) 554 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt); 555 556 IOCTL_DEBUGOUT("ixl_add_multi: end"); 557 } 558 559 static u_int 560 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 561 { 562 struct ixl_mac_filter *f = arg; 563 564 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl))) 565 return (1); 566 else 567 return (0); 568 } 569 570 void 571 ixl_del_multi(struct ixl_vsi *vsi, bool all) 572 { 573 struct ixl_ftl_head to_del; 574 struct ifnet *ifp = vsi->ifp; 575 struct ixl_mac_filter *f, *fn; 576 int mcnt = 0; 577 578 IOCTL_DEBUGOUT("ixl_del_multi: begin"); 579 580 LIST_INIT(&to_del); 581 /* Search for removed multicast addresses */ 582 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) { 583 if ((f->flags & IXL_FILTER_MC) == 0 || 584 (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0))) 585 continue; 586 587 LIST_REMOVE(f, ftle); 588 LIST_INSERT_HEAD(&to_del, f, ftle); 589 mcnt++; 590 } 591 592 if (mcnt > 0) 593 ixl_del_hw_filters(vsi, &to_del, mcnt); 594 } 595 596 void 597 ixl_link_up_msg(struct ixl_pf *pf) 598 { 599 struct i40e_hw *hw = &pf->hw; 600 struct ifnet *ifp = pf->vsi.ifp; 601 char *req_fec_string, *neg_fec_string; 602 u8 fec_abilities; 603 604 fec_abilities = hw->phy.link_info.req_fec_info; 605 /* If both RS and KR are requested, only show RS */ 606 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) 607 req_fec_string = ixl_fec_string[0]; 608 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) 609 req_fec_string = ixl_fec_string[1]; 610 else 611 req_fec_string = ixl_fec_string[2]; 612 613 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) 614 neg_fec_string = ixl_fec_string[0]; 615 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) 616 neg_fec_string = ixl_fec_string[1]; 617 else 618 neg_fec_string = ixl_fec_string[2]; 619 620 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 621 ifp->if_xname, 622 ixl_link_speed_string(hw->phy.link_info.link_speed), 623 req_fec_string, neg_fec_string, 624 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", 625 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && 626 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 627 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? 628 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 629 ixl_fc_string[1] : ixl_fc_string[0]); 630 } 631 632 /* 633 * Configure admin queue/misc interrupt cause registers in hardware. 634 */ 635 void 636 ixl_configure_intr0_msix(struct ixl_pf *pf) 637 { 638 struct i40e_hw *hw = &pf->hw; 639 u32 reg; 640 641 /* First set up the adminq - vector 0 */ 642 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 643 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 644 645 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 646 I40E_PFINT_ICR0_ENA_GRST_MASK | 647 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 648 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 649 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 650 I40E_PFINT_ICR0_ENA_VFLR_MASK | 651 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 652 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; 653 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 654 655 /* 656 * 0x7FF is the end of the queue list. 657 * This means we won't use MSI-X vector 0 for a queue interrupt 658 * in MSI-X mode. 659 */ 660 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 661 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ 662 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); 663 664 wr32(hw, I40E_PFINT_DYN_CTL0, 665 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 666 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 667 668 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 669 } 670 671 void 672 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types) 673 { 674 /* Display supported media types */ 675 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) 676 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL); 677 678 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) 679 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL); 680 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) 681 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL); 682 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) 683 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL); 684 685 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T)) 686 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL); 687 688 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T)) 689 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL); 690 691 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || 692 phy_types & (I40E_CAP_PHY_TYPE_XFI) || 693 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) 694 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 695 696 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) 697 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL); 698 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) 699 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL); 700 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) 701 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL); 702 703 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || 704 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || 705 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || 706 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || 707 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 708 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL); 709 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) 710 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL); 711 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) 712 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL); 713 714 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) 715 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL); 716 717 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) 718 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) 719 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL); 720 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) 721 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL); 722 if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) 723 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL); 724 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) 725 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 726 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) 727 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL); 728 729 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) 730 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL); 731 732 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 733 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL); 734 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) 735 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); 736 737 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) 738 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL); 739 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) 740 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL); 741 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) 742 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL); 743 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) 744 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL); 745 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) 746 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL); 747 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) 748 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL); 749 } 750 751 /********************************************************************* 752 * 753 * Get Firmware Switch configuration 754 * - this will need to be more robust when more complex 755 * switch configurations are enabled. 756 * 757 **********************************************************************/ 758 int 759 ixl_switch_config(struct ixl_pf *pf) 760 { 761 struct i40e_hw *hw = &pf->hw; 762 struct ixl_vsi *vsi = &pf->vsi; 763 device_t dev = iflib_get_dev(vsi->ctx); 764 struct i40e_aqc_get_switch_config_resp *sw_config; 765 u8 aq_buf[I40E_AQ_LARGE_BUF]; 766 int ret; 767 u16 next = 0; 768 769 memset(&aq_buf, 0, sizeof(aq_buf)); 770 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 771 ret = i40e_aq_get_switch_config(hw, sw_config, 772 sizeof(aq_buf), &next, NULL); 773 if (ret) { 774 device_printf(dev, "aq_get_switch_config() failed, error %d," 775 " aq_error %d\n", ret, pf->hw.aq.asq_last_status); 776 return (ret); 777 } 778 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { 779 device_printf(dev, 780 "Switch config: header reported: %d in structure, %d total\n", 781 LE16_TO_CPU(sw_config->header.num_reported), 782 LE16_TO_CPU(sw_config->header.num_total)); 783 for (int i = 0; 784 i < LE16_TO_CPU(sw_config->header.num_reported); i++) { 785 device_printf(dev, 786 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, 787 sw_config->element[i].element_type, 788 LE16_TO_CPU(sw_config->element[i].seid), 789 LE16_TO_CPU(sw_config->element[i].uplink_seid), 790 LE16_TO_CPU(sw_config->element[i].downlink_seid)); 791 } 792 } 793 /* Simplified due to a single VSI */ 794 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid); 795 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid); 796 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid); 797 return (ret); 798 } 799 800 void 801 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls) 802 { 803 struct sysctl_oid *tree; 804 struct sysctl_oid_list *child; 805 struct sysctl_oid_list *vsi_list; 806 807 tree = device_get_sysctl_tree(vsi->dev); 808 child = SYSCTL_CHILDREN(tree); 809 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name, 810 CTLFLAG_RD, NULL, "VSI Number"); 811 812 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 813 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats); 814 815 /* Copy of netstat RX errors counter for validation purposes */ 816 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors", 817 CTLFLAG_RD, &vsi->ierrors, 818 "RX packet errors"); 819 820 if (queues_sysctls) 821 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx); 822 } 823 824 /* 825 * Used to set the Tx ITR value for all of the PF LAN VSI's queues. 826 * Writes to the ITR registers immediately. 827 */ 828 static int 829 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) 830 { 831 struct ixl_pf *pf = (struct ixl_pf *)arg1; 832 device_t dev = pf->dev; 833 int error = 0; 834 int requested_tx_itr; 835 836 requested_tx_itr = pf->tx_itr; 837 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); 838 if ((error) || (req->newptr == NULL)) 839 return (error); 840 if (pf->dynamic_tx_itr) { 841 device_printf(dev, 842 "Cannot set TX itr value while dynamic TX itr is enabled\n"); 843 return (EINVAL); 844 } 845 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { 846 device_printf(dev, 847 "Invalid TX itr value; value must be between 0 and %d\n", 848 IXL_MAX_ITR); 849 return (EINVAL); 850 } 851 852 pf->tx_itr = requested_tx_itr; 853 ixl_configure_tx_itr(pf); 854 855 return (error); 856 } 857 858 /* 859 * Used to set the Rx ITR value for all of the PF LAN VSI's queues. 860 * Writes to the ITR registers immediately. 861 */ 862 static int 863 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) 864 { 865 struct ixl_pf *pf = (struct ixl_pf *)arg1; 866 device_t dev = pf->dev; 867 int error = 0; 868 int requested_rx_itr; 869 870 requested_rx_itr = pf->rx_itr; 871 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); 872 if ((error) || (req->newptr == NULL)) 873 return (error); 874 if (pf->dynamic_rx_itr) { 875 device_printf(dev, 876 "Cannot set RX itr value while dynamic RX itr is enabled\n"); 877 return (EINVAL); 878 } 879 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { 880 device_printf(dev, 881 "Invalid RX itr value; value must be between 0 and %d\n", 882 IXL_MAX_ITR); 883 return (EINVAL); 884 } 885 886 pf->rx_itr = requested_rx_itr; 887 ixl_configure_rx_itr(pf); 888 889 return (error); 890 } 891 892 void 893 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 894 struct sysctl_oid_list *child, 895 struct i40e_hw_port_stats *stats) 896 { 897 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 898 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics"); 899 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); 900 901 struct i40e_eth_stats *eth_stats = &stats->eth; 902 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); 903 904 struct ixl_sysctl_info ctls[] = 905 { 906 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 907 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 908 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 909 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 910 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, 911 /* Packet Reception Stats */ 912 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 913 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 914 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 915 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 916 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 917 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 918 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 919 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 920 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 921 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 922 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 923 {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, 924 /* Packet Transmission Stats */ 925 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 926 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 927 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 928 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 929 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 930 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 931 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 932 /* Flow control */ 933 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 934 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 935 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 936 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 937 /* End */ 938 {0,0,0} 939 }; 940 941 struct ixl_sysctl_info *entry = ctls; 942 while (entry->stat != 0) 943 { 944 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, 945 CTLFLAG_RD, entry->stat, 946 entry->description); 947 entry++; 948 } 949 } 950 951 void 952 ixl_set_rss_key(struct ixl_pf *pf) 953 { 954 struct i40e_hw *hw = &pf->hw; 955 struct ixl_vsi *vsi = &pf->vsi; 956 device_t dev = pf->dev; 957 u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; 958 enum i40e_status_code status; 959 960 #ifdef RSS 961 /* Fetch the configured RSS key */ 962 rss_getkey((uint8_t *) &rss_seed); 963 #else 964 ixl_get_default_rss_key(rss_seed); 965 #endif 966 /* Fill out hash function seed */ 967 if (hw->mac.type == I40E_MAC_X722) { 968 struct i40e_aqc_get_set_rss_key_data key_data; 969 bcopy(rss_seed, &key_data, 52); 970 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); 971 if (status) 972 device_printf(dev, 973 "i40e_aq_set_rss_key status %s, error %s\n", 974 i40e_stat_str(hw, status), 975 i40e_aq_str(hw, hw->aq.asq_last_status)); 976 } else { 977 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) 978 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); 979 } 980 } 981 982 /* 983 * Configure enabled PCTYPES for RSS. 984 */ 985 void 986 ixl_set_rss_pctypes(struct ixl_pf *pf) 987 { 988 struct i40e_hw *hw = &pf->hw; 989 u64 set_hena = 0, hena; 990 991 #ifdef RSS 992 u32 rss_hash_config; 993 994 rss_hash_config = rss_gethashconfig(); 995 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 996 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); 997 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 998 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 999 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1000 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 1001 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1002 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); 1003 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1004 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); 1005 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1006 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 1007 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1008 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 1009 #else 1010 if (hw->mac.type == I40E_MAC_X722) 1011 set_hena = IXL_DEFAULT_RSS_HENA_X722; 1012 else 1013 set_hena = IXL_DEFAULT_RSS_HENA_XL710; 1014 #endif 1015 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 1016 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 1017 hena |= set_hena; 1018 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 1019 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 1020 1021 } 1022 1023 /* 1024 ** Setup the PF's RSS parameters. 1025 */ 1026 void 1027 ixl_config_rss(struct ixl_pf *pf) 1028 { 1029 ixl_set_rss_key(pf); 1030 ixl_set_rss_pctypes(pf); 1031 ixl_set_rss_hlut(pf); 1032 } 1033 1034 /* 1035 * In some firmware versions there is default MAC/VLAN filter 1036 * configured which interferes with filters managed by driver. 1037 * Make sure it's removed. 1038 */ 1039 void 1040 ixl_del_default_hw_filters(struct ixl_vsi *vsi) 1041 { 1042 struct i40e_aqc_remove_macvlan_element_data e; 1043 1044 bzero(&e, sizeof(e)); 1045 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1046 e.vlan_tag = 0; 1047 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1048 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1049 1050 bzero(&e, sizeof(e)); 1051 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1052 e.vlan_tag = 0; 1053 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1054 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1055 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1056 } 1057 1058 /* 1059 ** Initialize filter list and add filters that the hardware 1060 ** needs to know about. 1061 ** 1062 ** Requires VSI's seid to be set before calling. 1063 */ 1064 void 1065 ixl_init_filters(struct ixl_vsi *vsi) 1066 { 1067 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1068 1069 ixl_dbg_filter(pf, "%s: start\n", __func__); 1070 1071 /* Initialize mac filter list for VSI */ 1072 LIST_INIT(&vsi->ftl); 1073 vsi->num_hw_filters = 0; 1074 1075 /* Receive broadcast Ethernet frames */ 1076 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL); 1077 1078 if (IXL_VSI_IS_VF(vsi)) 1079 return; 1080 1081 ixl_del_default_hw_filters(vsi); 1082 1083 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); 1084 1085 /* 1086 * Prevent Tx flow control frames from being sent out by 1087 * non-firmware transmitters. 1088 * This affects every VSI in the PF. 1089 */ 1090 #ifndef IXL_DEBUG_FC 1091 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1092 #else 1093 if (pf->enable_tx_fc_filter) 1094 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1095 #endif 1096 } 1097 1098 void 1099 ixl_reconfigure_filters(struct ixl_vsi *vsi) 1100 { 1101 struct i40e_hw *hw = vsi->hw; 1102 struct ixl_ftl_head tmp; 1103 int cnt; 1104 1105 /* 1106 * The ixl_add_hw_filters function adds filters configured 1107 * in HW to a list in VSI. Move all filters to a temporary 1108 * list to avoid corrupting it by concatenating to itself. 1109 */ 1110 LIST_INIT(&tmp); 1111 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle); 1112 cnt = vsi->num_hw_filters; 1113 vsi->num_hw_filters = 0; 1114 1115 ixl_add_hw_filters(vsi, &tmp, cnt); 1116 1117 /* Filter could be removed if MAC address was changed */ 1118 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 1119 1120 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0) 1121 return; 1122 /* 1123 * VLAN HW filtering is enabled, make sure that filters 1124 * for all registered VLAN tags are configured 1125 */ 1126 ixl_add_vlan_filters(vsi, hw->mac.addr); 1127 } 1128 1129 /* 1130 * This routine adds a MAC/VLAN filter to the software filter 1131 * list, then adds that new filter to the HW if it doesn't already 1132 * exist in the SW filter list. 1133 */ 1134 void 1135 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1136 { 1137 struct ixl_mac_filter *f, *tmp; 1138 struct ixl_pf *pf; 1139 device_t dev; 1140 struct ixl_ftl_head to_add; 1141 int to_add_cnt; 1142 1143 pf = vsi->back; 1144 dev = pf->dev; 1145 to_add_cnt = 1; 1146 1147 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n", 1148 MAC_FORMAT_ARGS(macaddr), vlan); 1149 1150 /* Does one already exist */ 1151 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1152 if (f != NULL) 1153 return; 1154 1155 LIST_INIT(&to_add); 1156 f = ixl_new_filter(&to_add, macaddr, vlan); 1157 if (f == NULL) { 1158 device_printf(dev, "WARNING: no filter available!!\n"); 1159 return; 1160 } 1161 if (f->vlan != IXL_VLAN_ANY) 1162 f->flags |= IXL_FILTER_VLAN; 1163 else 1164 vsi->num_macs++; 1165 1166 /* 1167 ** Is this the first vlan being registered, if so we 1168 ** need to remove the ANY filter that indicates we are 1169 ** not in a vlan, and replace that with a 0 filter. 1170 */ 1171 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { 1172 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 1173 if (tmp != NULL) { 1174 struct ixl_ftl_head to_del; 1175 1176 /* Prepare new filter first to avoid removing 1177 * VLAN_ANY filter if allocation fails */ 1178 f = ixl_new_filter(&to_add, macaddr, 0); 1179 if (f == NULL) { 1180 device_printf(dev, "WARNING: no filter available!!\n"); 1181 free(LIST_FIRST(&to_add), M_IXL); 1182 return; 1183 } 1184 to_add_cnt++; 1185 1186 LIST_REMOVE(tmp, ftle); 1187 LIST_INIT(&to_del); 1188 LIST_INSERT_HEAD(&to_del, tmp, ftle); 1189 ixl_del_hw_filters(vsi, &to_del, 1); 1190 } 1191 } 1192 1193 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1194 } 1195 1196 /** 1197 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs 1198 * @vsi: pointer to VSI 1199 * @macaddr: MAC address 1200 * 1201 * Adds MAC/VLAN filter for each VLAN configured on the interface 1202 * if there is enough HW filters. Otherwise adds a single filter 1203 * for all tagged and untagged frames to allow all configured VLANs 1204 * to recieve traffic. 1205 */ 1206 void 1207 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1208 { 1209 struct ixl_ftl_head to_add; 1210 struct ixl_mac_filter *f; 1211 int to_add_cnt = 0; 1212 int i, vlan = 0; 1213 1214 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) { 1215 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1216 return; 1217 } 1218 LIST_INIT(&to_add); 1219 1220 /* Add filter for untagged frames if it does not exist yet */ 1221 f = ixl_find_filter(&vsi->ftl, macaddr, 0); 1222 if (f == NULL) { 1223 f = ixl_new_filter(&to_add, macaddr, 0); 1224 if (f == NULL) { 1225 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1226 return; 1227 } 1228 to_add_cnt++; 1229 } 1230 1231 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) { 1232 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan); 1233 if (vlan == -1) 1234 break; 1235 1236 /* Does one already exist */ 1237 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1238 if (f != NULL) 1239 continue; 1240 1241 f = ixl_new_filter(&to_add, macaddr, vlan); 1242 if (f == NULL) { 1243 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1244 ixl_free_filters(&to_add); 1245 return; 1246 } 1247 to_add_cnt++; 1248 } 1249 1250 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1251 } 1252 1253 void 1254 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1255 { 1256 struct ixl_mac_filter *f, *tmp; 1257 struct ixl_ftl_head ftl_head; 1258 int to_del_cnt = 1; 1259 1260 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1261 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n", 1262 MAC_FORMAT_ARGS(macaddr), vlan); 1263 1264 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1265 if (f == NULL) 1266 return; 1267 1268 LIST_REMOVE(f, ftle); 1269 LIST_INIT(&ftl_head); 1270 LIST_INSERT_HEAD(&ftl_head, f, ftle); 1271 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0) 1272 vsi->num_macs--; 1273 1274 /* If this is not the last vlan just remove the filter */ 1275 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) { 1276 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1277 return; 1278 } 1279 1280 /* It's the last vlan, we need to switch back to a non-vlan filter */ 1281 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0); 1282 if (tmp != NULL) { 1283 LIST_REMOVE(tmp, ftle); 1284 LIST_INSERT_AFTER(f, tmp, ftle); 1285 to_del_cnt++; 1286 } 1287 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1288 1289 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1290 } 1291 1292 /** 1293 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC 1294 * @vsi: VSI which filters need to be removed 1295 * @macaddr: MAC address 1296 * 1297 * Remove all MAC/VLAN filters with a given MAC address. For multicast 1298 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY) 1299 * so skip them to speed up processing. Those filters should be removed 1300 * using ixl_del_filter function. 1301 */ 1302 void 1303 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1304 { 1305 struct ixl_mac_filter *f, *tmp; 1306 struct ixl_ftl_head to_del; 1307 int to_del_cnt = 0; 1308 1309 LIST_INIT(&to_del); 1310 1311 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) { 1312 if ((f->flags & IXL_FILTER_MC) != 0 || 1313 !ixl_ether_is_equal(f->macaddr, macaddr)) 1314 continue; 1315 1316 LIST_REMOVE(f, ftle); 1317 LIST_INSERT_HEAD(&to_del, f, ftle); 1318 to_del_cnt++; 1319 } 1320 1321 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1322 "%s: " MAC_FORMAT ", to_del_cnt: %d\n", 1323 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt); 1324 if (to_del_cnt > 0) 1325 ixl_del_hw_filters(vsi, &to_del, to_del_cnt); 1326 } 1327 1328 /* 1329 ** Find the filter with both matching mac addr and vlan id 1330 */ 1331 struct ixl_mac_filter * 1332 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 1333 { 1334 struct ixl_mac_filter *f; 1335 1336 LIST_FOREACH(f, headp, ftle) { 1337 if (ixl_ether_is_equal(f->macaddr, macaddr) && 1338 (f->vlan == vlan)) { 1339 return (f); 1340 } 1341 } 1342 1343 return (NULL); 1344 } 1345 1346 /* 1347 ** This routine takes additions to the vsi filter 1348 ** table and creates an Admin Queue call to create 1349 ** the filters in the hardware. 1350 */ 1351 void 1352 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt) 1353 { 1354 struct i40e_aqc_add_macvlan_element_data *a, *b; 1355 struct ixl_mac_filter *f, *fn; 1356 struct ixl_pf *pf; 1357 struct i40e_hw *hw; 1358 device_t dev; 1359 enum i40e_status_code status; 1360 int j = 0; 1361 1362 pf = vsi->back; 1363 dev = vsi->dev; 1364 hw = &pf->hw; 1365 1366 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt); 1367 1368 if (cnt < 1) { 1369 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n"); 1370 return; 1371 } 1372 1373 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, 1374 M_IXL, M_NOWAIT | M_ZERO); 1375 if (a == NULL) { 1376 device_printf(dev, "add_hw_filters failed to get memory\n"); 1377 return; 1378 } 1379 1380 LIST_FOREACH(f, to_add, ftle) { 1381 b = &a[j]; // a pox on fvl long names :) 1382 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); 1383 if (f->vlan == IXL_VLAN_ANY) { 1384 b->vlan_tag = 0; 1385 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; 1386 } else { 1387 b->vlan_tag = f->vlan; 1388 b->flags = 0; 1389 } 1390 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1391 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n", 1392 MAC_FORMAT_ARGS(f->macaddr)); 1393 1394 if (++j == cnt) 1395 break; 1396 } 1397 if (j != cnt) { 1398 /* Something went wrong */ 1399 device_printf(dev, 1400 "%s ERROR: list of filters to short expected: %d, found: %d\n", 1401 __func__, cnt, j); 1402 ixl_free_filters(to_add); 1403 goto out_free; 1404 } 1405 1406 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); 1407 if (status == I40E_SUCCESS) { 1408 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle); 1409 vsi->num_hw_filters += j; 1410 goto out_free; 1411 } 1412 1413 device_printf(dev, 1414 "i40e_aq_add_macvlan status %s, error %s\n", 1415 i40e_stat_str(hw, status), 1416 i40e_aq_str(hw, hw->aq.asq_last_status)); 1417 j = 0; 1418 1419 /* Verify which filters were actually configured in HW 1420 * and add them to the list */ 1421 LIST_FOREACH_SAFE(f, to_add, ftle, fn) { 1422 LIST_REMOVE(f, ftle); 1423 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) { 1424 ixl_dbg_filter(pf, 1425 "%s filter " MAC_FORMAT " VTAG: %d not added\n", 1426 __func__, 1427 MAC_FORMAT_ARGS(f->macaddr), 1428 f->vlan); 1429 free(f, M_IXL); 1430 } else { 1431 LIST_INSERT_HEAD(&vsi->ftl, f, ftle); 1432 vsi->num_hw_filters++; 1433 } 1434 j++; 1435 } 1436 1437 out_free: 1438 free(a, M_IXL); 1439 } 1440 1441 /* 1442 ** This routine takes removals in the vsi filter 1443 ** table and creates an Admin Queue call to delete 1444 ** the filters in the hardware. 1445 */ 1446 void 1447 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt) 1448 { 1449 struct i40e_aqc_remove_macvlan_element_data *d, *e; 1450 struct ixl_pf *pf; 1451 struct i40e_hw *hw; 1452 device_t dev; 1453 struct ixl_mac_filter *f, *f_temp; 1454 enum i40e_status_code status; 1455 int j = 0; 1456 1457 pf = vsi->back; 1458 hw = &pf->hw; 1459 dev = vsi->dev; 1460 1461 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt); 1462 1463 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, 1464 M_IXL, M_NOWAIT | M_ZERO); 1465 if (d == NULL) { 1466 device_printf(dev, "%s: failed to get memory\n", __func__); 1467 return; 1468 } 1469 1470 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) { 1471 e = &d[j]; // a pox on fvl long names :) 1472 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); 1473 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1474 if (f->vlan == IXL_VLAN_ANY) { 1475 e->vlan_tag = 0; 1476 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1477 } else { 1478 e->vlan_tag = f->vlan; 1479 } 1480 1481 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n", 1482 MAC_FORMAT_ARGS(f->macaddr)); 1483 1484 /* delete entry from the list */ 1485 LIST_REMOVE(f, ftle); 1486 free(f, M_IXL); 1487 if (++j == cnt) 1488 break; 1489 } 1490 if (j != cnt || !LIST_EMPTY(to_del)) { 1491 /* Something went wrong */ 1492 device_printf(dev, 1493 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n", 1494 __func__, cnt, j); 1495 ixl_free_filters(to_del); 1496 goto out_free; 1497 } 1498 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); 1499 if (status) { 1500 device_printf(dev, 1501 "%s: i40e_aq_remove_macvlan status %s, error %s\n", 1502 __func__, i40e_stat_str(hw, status), 1503 i40e_aq_str(hw, hw->aq.asq_last_status)); 1504 for (int i = 0; i < j; i++) { 1505 if (d[i].error_code == 0) 1506 continue; 1507 device_printf(dev, 1508 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n", 1509 __func__, MAC_FORMAT_ARGS(d[i].mac_addr), 1510 d[i].vlan_tag); 1511 } 1512 } 1513 1514 vsi->num_hw_filters -= j; 1515 1516 out_free: 1517 free(d, M_IXL); 1518 1519 ixl_dbg_filter(pf, "%s: end\n", __func__); 1520 } 1521 1522 int 1523 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1524 { 1525 struct i40e_hw *hw = &pf->hw; 1526 int error = 0; 1527 u32 reg; 1528 u16 pf_qidx; 1529 1530 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1531 1532 ixl_dbg(pf, IXL_DBG_EN_DIS, 1533 "Enabling PF TX ring %4d / VSI TX ring %4d...\n", 1534 pf_qidx, vsi_qidx); 1535 1536 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); 1537 1538 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1539 reg |= I40E_QTX_ENA_QENA_REQ_MASK | 1540 I40E_QTX_ENA_QENA_STAT_MASK; 1541 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1542 /* Verify the enable took */ 1543 for (int j = 0; j < 10; j++) { 1544 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1545 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 1546 break; 1547 i40e_usec_delay(10); 1548 } 1549 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 1550 device_printf(pf->dev, "TX queue %d still disabled!\n", 1551 pf_qidx); 1552 error = ETIMEDOUT; 1553 } 1554 1555 return (error); 1556 } 1557 1558 int 1559 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1560 { 1561 struct i40e_hw *hw = &pf->hw; 1562 int error = 0; 1563 u32 reg; 1564 u16 pf_qidx; 1565 1566 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1567 1568 ixl_dbg(pf, IXL_DBG_EN_DIS, 1569 "Enabling PF RX ring %4d / VSI RX ring %4d...\n", 1570 pf_qidx, vsi_qidx); 1571 1572 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1573 reg |= I40E_QRX_ENA_QENA_REQ_MASK | 1574 I40E_QRX_ENA_QENA_STAT_MASK; 1575 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1576 /* Verify the enable took */ 1577 for (int j = 0; j < 10; j++) { 1578 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1579 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 1580 break; 1581 i40e_usec_delay(10); 1582 } 1583 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 1584 device_printf(pf->dev, "RX queue %d still disabled!\n", 1585 pf_qidx); 1586 error = ETIMEDOUT; 1587 } 1588 1589 return (error); 1590 } 1591 1592 int 1593 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1594 { 1595 int error = 0; 1596 1597 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); 1598 /* Called function already prints error message */ 1599 if (error) 1600 return (error); 1601 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); 1602 return (error); 1603 } 1604 1605 /* 1606 * Returns error on first ring that is detected hung. 1607 */ 1608 int 1609 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1610 { 1611 struct i40e_hw *hw = &pf->hw; 1612 int error = 0; 1613 u32 reg; 1614 u16 pf_qidx; 1615 1616 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1617 1618 ixl_dbg(pf, IXL_DBG_EN_DIS, 1619 "Disabling PF TX ring %4d / VSI TX ring %4d...\n", 1620 pf_qidx, vsi_qidx); 1621 1622 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); 1623 i40e_usec_delay(500); 1624 1625 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1626 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 1627 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1628 /* Verify the disable took */ 1629 for (int j = 0; j < 10; j++) { 1630 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1631 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) 1632 break; 1633 i40e_msec_delay(10); 1634 } 1635 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { 1636 device_printf(pf->dev, "TX queue %d still enabled!\n", 1637 pf_qidx); 1638 error = ETIMEDOUT; 1639 } 1640 1641 return (error); 1642 } 1643 1644 /* 1645 * Returns error on first ring that is detected hung. 1646 */ 1647 int 1648 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1649 { 1650 struct i40e_hw *hw = &pf->hw; 1651 int error = 0; 1652 u32 reg; 1653 u16 pf_qidx; 1654 1655 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1656 1657 ixl_dbg(pf, IXL_DBG_EN_DIS, 1658 "Disabling PF RX ring %4d / VSI RX ring %4d...\n", 1659 pf_qidx, vsi_qidx); 1660 1661 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1662 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 1663 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1664 /* Verify the disable took */ 1665 for (int j = 0; j < 10; j++) { 1666 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1667 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) 1668 break; 1669 i40e_msec_delay(10); 1670 } 1671 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { 1672 device_printf(pf->dev, "RX queue %d still enabled!\n", 1673 pf_qidx); 1674 error = ETIMEDOUT; 1675 } 1676 1677 return (error); 1678 } 1679 1680 int 1681 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1682 { 1683 int error = 0; 1684 1685 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); 1686 /* Called function already prints error message */ 1687 if (error) 1688 return (error); 1689 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); 1690 return (error); 1691 } 1692 1693 static void 1694 ixl_handle_tx_mdd_event(struct ixl_pf *pf) 1695 { 1696 struct i40e_hw *hw = &pf->hw; 1697 device_t dev = pf->dev; 1698 struct ixl_vf *vf; 1699 bool mdd_detected = false; 1700 bool pf_mdd_detected = false; 1701 bool vf_mdd_detected = false; 1702 u16 vf_num, queue; 1703 u8 pf_num, event; 1704 u8 pf_mdet_num, vp_mdet_num; 1705 u32 reg; 1706 1707 /* find what triggered the MDD event */ 1708 reg = rd32(hw, I40E_GL_MDET_TX); 1709 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 1710 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 1711 I40E_GL_MDET_TX_PF_NUM_SHIFT; 1712 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 1713 I40E_GL_MDET_TX_VF_NUM_SHIFT; 1714 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 1715 I40E_GL_MDET_TX_EVENT_SHIFT; 1716 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 1717 I40E_GL_MDET_TX_QUEUE_SHIFT; 1718 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 1719 mdd_detected = true; 1720 } 1721 1722 if (!mdd_detected) 1723 return; 1724 1725 reg = rd32(hw, I40E_PF_MDET_TX); 1726 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 1727 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 1728 pf_mdet_num = hw->pf_id; 1729 pf_mdd_detected = true; 1730 } 1731 1732 /* Check if MDD was caused by a VF */ 1733 for (int i = 0; i < pf->num_vfs; i++) { 1734 vf = &(pf->vfs[i]); 1735 reg = rd32(hw, I40E_VP_MDET_TX(i)); 1736 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 1737 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 1738 vp_mdet_num = i; 1739 vf->num_mdd_events++; 1740 vf_mdd_detected = true; 1741 } 1742 } 1743 1744 /* Print out an error message */ 1745 if (vf_mdd_detected && pf_mdd_detected) 1746 device_printf(dev, 1747 "Malicious Driver Detection event %d" 1748 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n", 1749 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num); 1750 else if (vf_mdd_detected && !pf_mdd_detected) 1751 device_printf(dev, 1752 "Malicious Driver Detection event %d" 1753 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n", 1754 event, queue, pf_num, vf_num, vp_mdet_num); 1755 else if (!vf_mdd_detected && pf_mdd_detected) 1756 device_printf(dev, 1757 "Malicious Driver Detection event %d" 1758 " on TX queue %d, pf number %d (PF-%d)\n", 1759 event, queue, pf_num, pf_mdet_num); 1760 /* Theoretically shouldn't happen */ 1761 else 1762 device_printf(dev, 1763 "TX Malicious Driver Detection event (unknown)\n"); 1764 } 1765 1766 static void 1767 ixl_handle_rx_mdd_event(struct ixl_pf *pf) 1768 { 1769 struct i40e_hw *hw = &pf->hw; 1770 device_t dev = pf->dev; 1771 struct ixl_vf *vf; 1772 bool mdd_detected = false; 1773 bool pf_mdd_detected = false; 1774 bool vf_mdd_detected = false; 1775 u16 queue; 1776 u8 pf_num, event; 1777 u8 pf_mdet_num, vp_mdet_num; 1778 u32 reg; 1779 1780 /* 1781 * GL_MDET_RX doesn't contain VF number information, unlike 1782 * GL_MDET_TX. 1783 */ 1784 reg = rd32(hw, I40E_GL_MDET_RX); 1785 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 1786 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 1787 I40E_GL_MDET_RX_FUNCTION_SHIFT; 1788 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 1789 I40E_GL_MDET_RX_EVENT_SHIFT; 1790 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 1791 I40E_GL_MDET_RX_QUEUE_SHIFT; 1792 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 1793 mdd_detected = true; 1794 } 1795 1796 if (!mdd_detected) 1797 return; 1798 1799 reg = rd32(hw, I40E_PF_MDET_RX); 1800 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 1801 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 1802 pf_mdet_num = hw->pf_id; 1803 pf_mdd_detected = true; 1804 } 1805 1806 /* Check if MDD was caused by a VF */ 1807 for (int i = 0; i < pf->num_vfs; i++) { 1808 vf = &(pf->vfs[i]); 1809 reg = rd32(hw, I40E_VP_MDET_RX(i)); 1810 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 1811 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 1812 vp_mdet_num = i; 1813 vf->num_mdd_events++; 1814 vf_mdd_detected = true; 1815 } 1816 } 1817 1818 /* Print out an error message */ 1819 if (vf_mdd_detected && pf_mdd_detected) 1820 device_printf(dev, 1821 "Malicious Driver Detection event %d" 1822 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n", 1823 event, queue, pf_num, pf_mdet_num, vp_mdet_num); 1824 else if (vf_mdd_detected && !pf_mdd_detected) 1825 device_printf(dev, 1826 "Malicious Driver Detection event %d" 1827 " on RX queue %d, pf number %d, (VF-%d)\n", 1828 event, queue, pf_num, vp_mdet_num); 1829 else if (!vf_mdd_detected && pf_mdd_detected) 1830 device_printf(dev, 1831 "Malicious Driver Detection event %d" 1832 " on RX queue %d, pf number %d (PF-%d)\n", 1833 event, queue, pf_num, pf_mdet_num); 1834 /* Theoretically shouldn't happen */ 1835 else 1836 device_printf(dev, 1837 "RX Malicious Driver Detection event (unknown)\n"); 1838 } 1839 1840 /** 1841 * ixl_handle_mdd_event 1842 * 1843 * Called from interrupt handler to identify possibly malicious vfs 1844 * (But also detects events from the PF, as well) 1845 **/ 1846 void 1847 ixl_handle_mdd_event(struct ixl_pf *pf) 1848 { 1849 struct i40e_hw *hw = &pf->hw; 1850 u32 reg; 1851 1852 /* 1853 * Handle both TX/RX because it's possible they could 1854 * both trigger in the same interrupt. 1855 */ 1856 ixl_handle_tx_mdd_event(pf); 1857 ixl_handle_rx_mdd_event(pf); 1858 1859 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING); 1860 1861 /* re-enable mdd interrupt cause */ 1862 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1863 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 1864 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1865 ixl_flush(hw); 1866 } 1867 1868 void 1869 ixl_enable_intr0(struct i40e_hw *hw) 1870 { 1871 u32 reg; 1872 1873 /* Use IXL_ITR_NONE so ITR isn't updated here */ 1874 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | 1875 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 1876 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 1877 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1878 } 1879 1880 void 1881 ixl_disable_intr0(struct i40e_hw *hw) 1882 { 1883 u32 reg; 1884 1885 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; 1886 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1887 ixl_flush(hw); 1888 } 1889 1890 void 1891 ixl_enable_queue(struct i40e_hw *hw, int id) 1892 { 1893 u32 reg; 1894 1895 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | 1896 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1897 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 1898 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1899 } 1900 1901 void 1902 ixl_disable_queue(struct i40e_hw *hw, int id) 1903 { 1904 u32 reg; 1905 1906 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1907 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1908 } 1909 1910 void 1911 ixl_handle_empr_reset(struct ixl_pf *pf) 1912 { 1913 struct ixl_vsi *vsi = &pf->vsi; 1914 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING); 1915 1916 ixl_prepare_for_reset(pf, is_up); 1917 /* 1918 * i40e_pf_reset checks the type of reset and acts 1919 * accordingly. If EMP or Core reset was performed 1920 * doing PF reset is not necessary and it sometimes 1921 * fails. 1922 */ 1923 ixl_pf_reset(pf); 1924 1925 if (!IXL_PF_IN_RECOVERY_MODE(pf) && 1926 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) { 1927 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 1928 device_printf(pf->dev, 1929 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 1930 pf->link_up = FALSE; 1931 ixl_update_link_status(pf); 1932 } 1933 1934 ixl_rebuild_hw_structs_after_reset(pf, is_up); 1935 1936 atomic_clear_32(&pf->state, IXL_PF_STATE_RESETTING); 1937 } 1938 1939 void 1940 ixl_update_stats_counters(struct ixl_pf *pf) 1941 { 1942 struct i40e_hw *hw = &pf->hw; 1943 struct ixl_vsi *vsi = &pf->vsi; 1944 struct ixl_vf *vf; 1945 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx; 1946 1947 struct i40e_hw_port_stats *nsd = &pf->stats; 1948 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 1949 1950 /* Update hw stats */ 1951 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 1952 pf->stat_offsets_loaded, 1953 &osd->crc_errors, &nsd->crc_errors); 1954 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 1955 pf->stat_offsets_loaded, 1956 &osd->illegal_bytes, &nsd->illegal_bytes); 1957 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 1958 I40E_GLPRT_GORCL(hw->port), 1959 pf->stat_offsets_loaded, 1960 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 1961 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 1962 I40E_GLPRT_GOTCL(hw->port), 1963 pf->stat_offsets_loaded, 1964 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 1965 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 1966 pf->stat_offsets_loaded, 1967 &osd->eth.rx_discards, 1968 &nsd->eth.rx_discards); 1969 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 1970 I40E_GLPRT_UPRCL(hw->port), 1971 pf->stat_offsets_loaded, 1972 &osd->eth.rx_unicast, 1973 &nsd->eth.rx_unicast); 1974 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 1975 I40E_GLPRT_UPTCL(hw->port), 1976 pf->stat_offsets_loaded, 1977 &osd->eth.tx_unicast, 1978 &nsd->eth.tx_unicast); 1979 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 1980 I40E_GLPRT_MPRCL(hw->port), 1981 pf->stat_offsets_loaded, 1982 &osd->eth.rx_multicast, 1983 &nsd->eth.rx_multicast); 1984 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 1985 I40E_GLPRT_MPTCL(hw->port), 1986 pf->stat_offsets_loaded, 1987 &osd->eth.tx_multicast, 1988 &nsd->eth.tx_multicast); 1989 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 1990 I40E_GLPRT_BPRCL(hw->port), 1991 pf->stat_offsets_loaded, 1992 &osd->eth.rx_broadcast, 1993 &nsd->eth.rx_broadcast); 1994 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 1995 I40E_GLPRT_BPTCL(hw->port), 1996 pf->stat_offsets_loaded, 1997 &osd->eth.tx_broadcast, 1998 &nsd->eth.tx_broadcast); 1999 2000 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 2001 pf->stat_offsets_loaded, 2002 &osd->tx_dropped_link_down, 2003 &nsd->tx_dropped_link_down); 2004 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 2005 pf->stat_offsets_loaded, 2006 &osd->mac_local_faults, 2007 &nsd->mac_local_faults); 2008 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 2009 pf->stat_offsets_loaded, 2010 &osd->mac_remote_faults, 2011 &nsd->mac_remote_faults); 2012 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 2013 pf->stat_offsets_loaded, 2014 &osd->rx_length_errors, 2015 &nsd->rx_length_errors); 2016 2017 /* Flow control (LFC) stats */ 2018 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 2019 pf->stat_offsets_loaded, 2020 &osd->link_xon_rx, &nsd->link_xon_rx); 2021 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 2022 pf->stat_offsets_loaded, 2023 &osd->link_xon_tx, &nsd->link_xon_tx); 2024 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 2025 pf->stat_offsets_loaded, 2026 &osd->link_xoff_rx, &nsd->link_xoff_rx); 2027 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 2028 pf->stat_offsets_loaded, 2029 &osd->link_xoff_tx, &nsd->link_xoff_tx); 2030 2031 /* 2032 * For watchdog management we need to know if we have been paused 2033 * during the last interval, so capture that here. 2034 */ 2035 if (pf->stats.link_xoff_rx != prev_link_xoff_rx) 2036 vsi->shared->isc_pause_frames = 1; 2037 2038 /* Packet size stats rx */ 2039 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 2040 I40E_GLPRT_PRC64L(hw->port), 2041 pf->stat_offsets_loaded, 2042 &osd->rx_size_64, &nsd->rx_size_64); 2043 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 2044 I40E_GLPRT_PRC127L(hw->port), 2045 pf->stat_offsets_loaded, 2046 &osd->rx_size_127, &nsd->rx_size_127); 2047 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 2048 I40E_GLPRT_PRC255L(hw->port), 2049 pf->stat_offsets_loaded, 2050 &osd->rx_size_255, &nsd->rx_size_255); 2051 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 2052 I40E_GLPRT_PRC511L(hw->port), 2053 pf->stat_offsets_loaded, 2054 &osd->rx_size_511, &nsd->rx_size_511); 2055 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 2056 I40E_GLPRT_PRC1023L(hw->port), 2057 pf->stat_offsets_loaded, 2058 &osd->rx_size_1023, &nsd->rx_size_1023); 2059 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 2060 I40E_GLPRT_PRC1522L(hw->port), 2061 pf->stat_offsets_loaded, 2062 &osd->rx_size_1522, &nsd->rx_size_1522); 2063 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 2064 I40E_GLPRT_PRC9522L(hw->port), 2065 pf->stat_offsets_loaded, 2066 &osd->rx_size_big, &nsd->rx_size_big); 2067 2068 /* Packet size stats tx */ 2069 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 2070 I40E_GLPRT_PTC64L(hw->port), 2071 pf->stat_offsets_loaded, 2072 &osd->tx_size_64, &nsd->tx_size_64); 2073 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 2074 I40E_GLPRT_PTC127L(hw->port), 2075 pf->stat_offsets_loaded, 2076 &osd->tx_size_127, &nsd->tx_size_127); 2077 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 2078 I40E_GLPRT_PTC255L(hw->port), 2079 pf->stat_offsets_loaded, 2080 &osd->tx_size_255, &nsd->tx_size_255); 2081 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 2082 I40E_GLPRT_PTC511L(hw->port), 2083 pf->stat_offsets_loaded, 2084 &osd->tx_size_511, &nsd->tx_size_511); 2085 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 2086 I40E_GLPRT_PTC1023L(hw->port), 2087 pf->stat_offsets_loaded, 2088 &osd->tx_size_1023, &nsd->tx_size_1023); 2089 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 2090 I40E_GLPRT_PTC1522L(hw->port), 2091 pf->stat_offsets_loaded, 2092 &osd->tx_size_1522, &nsd->tx_size_1522); 2093 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 2094 I40E_GLPRT_PTC9522L(hw->port), 2095 pf->stat_offsets_loaded, 2096 &osd->tx_size_big, &nsd->tx_size_big); 2097 2098 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 2099 pf->stat_offsets_loaded, 2100 &osd->rx_undersize, &nsd->rx_undersize); 2101 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 2102 pf->stat_offsets_loaded, 2103 &osd->rx_fragments, &nsd->rx_fragments); 2104 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 2105 pf->stat_offsets_loaded, 2106 &osd->rx_oversize, &nsd->rx_oversize); 2107 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 2108 pf->stat_offsets_loaded, 2109 &osd->rx_jabber, &nsd->rx_jabber); 2110 /* EEE */ 2111 i40e_get_phy_lpi_status(hw, nsd); 2112 2113 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded, 2114 &osd->tx_lpi_count, &nsd->tx_lpi_count, 2115 &osd->rx_lpi_count, &nsd->rx_lpi_count); 2116 2117 pf->stat_offsets_loaded = true; 2118 /* End hw stats */ 2119 2120 /* Update vsi stats */ 2121 ixl_update_vsi_stats(vsi); 2122 2123 for (int i = 0; i < pf->num_vfs; i++) { 2124 vf = &pf->vfs[i]; 2125 if (vf->vf_flags & VF_FLAG_ENABLED) 2126 ixl_update_eth_stats(&pf->vfs[i].vsi); 2127 } 2128 } 2129 2130 /** 2131 * Update VSI-specific ethernet statistics counters. 2132 **/ 2133 void 2134 ixl_update_eth_stats(struct ixl_vsi *vsi) 2135 { 2136 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 2137 struct i40e_hw *hw = &pf->hw; 2138 struct i40e_eth_stats *es; 2139 struct i40e_eth_stats *oes; 2140 u16 stat_idx = vsi->info.stat_counter_idx; 2141 2142 es = &vsi->eth_stats; 2143 oes = &vsi->eth_stats_offsets; 2144 2145 /* Gather up the stats that the hw collects */ 2146 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 2147 vsi->stat_offsets_loaded, 2148 &oes->tx_errors, &es->tx_errors); 2149 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 2150 vsi->stat_offsets_loaded, 2151 &oes->rx_discards, &es->rx_discards); 2152 2153 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 2154 I40E_GLV_GORCL(stat_idx), 2155 vsi->stat_offsets_loaded, 2156 &oes->rx_bytes, &es->rx_bytes); 2157 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 2158 I40E_GLV_UPRCL(stat_idx), 2159 vsi->stat_offsets_loaded, 2160 &oes->rx_unicast, &es->rx_unicast); 2161 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 2162 I40E_GLV_MPRCL(stat_idx), 2163 vsi->stat_offsets_loaded, 2164 &oes->rx_multicast, &es->rx_multicast); 2165 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 2166 I40E_GLV_BPRCL(stat_idx), 2167 vsi->stat_offsets_loaded, 2168 &oes->rx_broadcast, &es->rx_broadcast); 2169 2170 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 2171 I40E_GLV_GOTCL(stat_idx), 2172 vsi->stat_offsets_loaded, 2173 &oes->tx_bytes, &es->tx_bytes); 2174 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 2175 I40E_GLV_UPTCL(stat_idx), 2176 vsi->stat_offsets_loaded, 2177 &oes->tx_unicast, &es->tx_unicast); 2178 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 2179 I40E_GLV_MPTCL(stat_idx), 2180 vsi->stat_offsets_loaded, 2181 &oes->tx_multicast, &es->tx_multicast); 2182 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 2183 I40E_GLV_BPTCL(stat_idx), 2184 vsi->stat_offsets_loaded, 2185 &oes->tx_broadcast, &es->tx_broadcast); 2186 vsi->stat_offsets_loaded = true; 2187 } 2188 2189 void 2190 ixl_update_vsi_stats(struct ixl_vsi *vsi) 2191 { 2192 struct ixl_pf *pf; 2193 struct ifnet *ifp; 2194 struct i40e_eth_stats *es; 2195 u64 tx_discards, csum_errs; 2196 2197 struct i40e_hw_port_stats *nsd; 2198 2199 pf = vsi->back; 2200 ifp = vsi->ifp; 2201 es = &vsi->eth_stats; 2202 nsd = &pf->stats; 2203 2204 ixl_update_eth_stats(vsi); 2205 2206 tx_discards = es->tx_discards + nsd->tx_dropped_link_down; 2207 2208 csum_errs = 0; 2209 for (int i = 0; i < vsi->num_rx_queues; i++) 2210 csum_errs += vsi->rx_queues[i].rxr.csum_errs; 2211 nsd->checksum_error = csum_errs; 2212 2213 /* Update ifnet stats */ 2214 IXL_SET_IPACKETS(vsi, es->rx_unicast + 2215 es->rx_multicast + 2216 es->rx_broadcast); 2217 IXL_SET_OPACKETS(vsi, es->tx_unicast + 2218 es->tx_multicast + 2219 es->tx_broadcast); 2220 IXL_SET_IBYTES(vsi, es->rx_bytes); 2221 IXL_SET_OBYTES(vsi, es->tx_bytes); 2222 IXL_SET_IMCASTS(vsi, es->rx_multicast); 2223 IXL_SET_OMCASTS(vsi, es->tx_multicast); 2224 2225 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + 2226 nsd->checksum_error + nsd->rx_length_errors + 2227 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize + 2228 nsd->rx_jabber); 2229 IXL_SET_OERRORS(vsi, es->tx_errors); 2230 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); 2231 IXL_SET_OQDROPS(vsi, tx_discards); 2232 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); 2233 IXL_SET_COLLISIONS(vsi, 0); 2234 } 2235 2236 /** 2237 * Reset all of the stats for the given pf 2238 **/ 2239 void 2240 ixl_pf_reset_stats(struct ixl_pf *pf) 2241 { 2242 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); 2243 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); 2244 pf->stat_offsets_loaded = false; 2245 } 2246 2247 /** 2248 * Resets all stats of the given vsi 2249 **/ 2250 void 2251 ixl_vsi_reset_stats(struct ixl_vsi *vsi) 2252 { 2253 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); 2254 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); 2255 vsi->stat_offsets_loaded = false; 2256 } 2257 2258 /** 2259 * Read and update a 48 bit stat from the hw 2260 * 2261 * Since the device stats are not reset at PFReset, they likely will not 2262 * be zeroed when the driver starts. We'll save the first values read 2263 * and use them as offsets to be subtracted from the raw values in order 2264 * to report stats that count from zero. 2265 **/ 2266 void 2267 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 2268 bool offset_loaded, u64 *offset, u64 *stat) 2269 { 2270 u64 new_data; 2271 2272 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__) 2273 new_data = rd64(hw, loreg); 2274 #else 2275 /* 2276 * Use two rd32's instead of one rd64; FreeBSD versions before 2277 * 10 don't support 64-bit bus reads/writes. 2278 */ 2279 new_data = rd32(hw, loreg); 2280 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 2281 #endif 2282 2283 if (!offset_loaded) 2284 *offset = new_data; 2285 if (new_data >= *offset) 2286 *stat = new_data - *offset; 2287 else 2288 *stat = (new_data + ((u64)1 << 48)) - *offset; 2289 *stat &= 0xFFFFFFFFFFFFULL; 2290 } 2291 2292 /** 2293 * Read and update a 32 bit stat from the hw 2294 **/ 2295 void 2296 ixl_stat_update32(struct i40e_hw *hw, u32 reg, 2297 bool offset_loaded, u64 *offset, u64 *stat) 2298 { 2299 u32 new_data; 2300 2301 new_data = rd32(hw, reg); 2302 if (!offset_loaded) 2303 *offset = new_data; 2304 if (new_data >= *offset) 2305 *stat = (u32)(new_data - *offset); 2306 else 2307 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 2308 } 2309 2310 /** 2311 * Add subset of device sysctls safe to use in recovery mode 2312 */ 2313 void 2314 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf) 2315 { 2316 device_t dev = pf->dev; 2317 2318 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2319 struct sysctl_oid_list *ctx_list = 2320 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2321 2322 struct sysctl_oid *debug_node; 2323 struct sysctl_oid_list *debug_list; 2324 2325 SYSCTL_ADD_PROC(ctx, ctx_list, 2326 OID_AUTO, "fw_version", 2327 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2328 ixl_sysctl_show_fw, "A", "Firmware version"); 2329 2330 /* Add sysctls meant to print debug information, but don't list them 2331 * in "sysctl -a" output. */ 2332 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2333 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2334 "Debug Sysctls"); 2335 debug_list = SYSCTL_CHILDREN(debug_node); 2336 2337 SYSCTL_ADD_UINT(ctx, debug_list, 2338 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2339 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2340 2341 SYSCTL_ADD_UINT(ctx, debug_list, 2342 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2343 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2344 2345 SYSCTL_ADD_PROC(ctx, debug_list, 2346 OID_AUTO, "dump_debug_data", 2347 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2348 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2349 2350 SYSCTL_ADD_PROC(ctx, debug_list, 2351 OID_AUTO, "do_pf_reset", 2352 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2353 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2354 2355 SYSCTL_ADD_PROC(ctx, debug_list, 2356 OID_AUTO, "do_core_reset", 2357 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2358 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2359 2360 SYSCTL_ADD_PROC(ctx, debug_list, 2361 OID_AUTO, "do_global_reset", 2362 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2363 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2364 2365 SYSCTL_ADD_PROC(ctx, debug_list, 2366 OID_AUTO, "queue_interrupt_table", 2367 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2368 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2369 } 2370 2371 void 2372 ixl_add_device_sysctls(struct ixl_pf *pf) 2373 { 2374 device_t dev = pf->dev; 2375 struct i40e_hw *hw = &pf->hw; 2376 2377 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2378 struct sysctl_oid_list *ctx_list = 2379 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2380 2381 struct sysctl_oid *debug_node; 2382 struct sysctl_oid_list *debug_list; 2383 2384 struct sysctl_oid *fec_node; 2385 struct sysctl_oid_list *fec_list; 2386 struct sysctl_oid *eee_node; 2387 struct sysctl_oid_list *eee_list; 2388 2389 /* Set up sysctls */ 2390 SYSCTL_ADD_PROC(ctx, ctx_list, 2391 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2392 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); 2393 2394 SYSCTL_ADD_PROC(ctx, ctx_list, 2395 OID_AUTO, "advertise_speed", 2396 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2397 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); 2398 2399 SYSCTL_ADD_PROC(ctx, ctx_list, 2400 OID_AUTO, "supported_speeds", 2401 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2402 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); 2403 2404 SYSCTL_ADD_PROC(ctx, ctx_list, 2405 OID_AUTO, "current_speed", 2406 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2407 ixl_sysctl_current_speed, "A", "Current Port Speed"); 2408 2409 SYSCTL_ADD_PROC(ctx, ctx_list, 2410 OID_AUTO, "fw_version", 2411 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2412 ixl_sysctl_show_fw, "A", "Firmware version"); 2413 2414 SYSCTL_ADD_PROC(ctx, ctx_list, 2415 OID_AUTO, "unallocated_queues", 2416 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2417 ixl_sysctl_unallocated_queues, "I", 2418 "Queues not allocated to a PF or VF"); 2419 2420 SYSCTL_ADD_PROC(ctx, ctx_list, 2421 OID_AUTO, "tx_itr", 2422 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2423 ixl_sysctl_pf_tx_itr, "I", 2424 "Immediately set TX ITR value for all queues"); 2425 2426 SYSCTL_ADD_PROC(ctx, ctx_list, 2427 OID_AUTO, "rx_itr", 2428 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2429 ixl_sysctl_pf_rx_itr, "I", 2430 "Immediately set RX ITR value for all queues"); 2431 2432 SYSCTL_ADD_INT(ctx, ctx_list, 2433 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, 2434 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); 2435 2436 SYSCTL_ADD_INT(ctx, ctx_list, 2437 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, 2438 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); 2439 2440 /* Add FEC sysctls for 25G adapters */ 2441 if (i40e_is_25G_device(hw->device_id)) { 2442 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2443 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2444 "FEC Sysctls"); 2445 fec_list = SYSCTL_CHILDREN(fec_node); 2446 2447 SYSCTL_ADD_PROC(ctx, fec_list, 2448 OID_AUTO, "fc_ability", 2449 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2450 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); 2451 2452 SYSCTL_ADD_PROC(ctx, fec_list, 2453 OID_AUTO, "rs_ability", 2454 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2455 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); 2456 2457 SYSCTL_ADD_PROC(ctx, fec_list, 2458 OID_AUTO, "fc_requested", 2459 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2460 ixl_sysctl_fec_fc_request, "I", 2461 "FC FEC mode requested on link"); 2462 2463 SYSCTL_ADD_PROC(ctx, fec_list, 2464 OID_AUTO, "rs_requested", 2465 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2466 ixl_sysctl_fec_rs_request, "I", 2467 "RS FEC mode requested on link"); 2468 2469 SYSCTL_ADD_PROC(ctx, fec_list, 2470 OID_AUTO, "auto_fec_enabled", 2471 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2472 ixl_sysctl_fec_auto_enable, "I", 2473 "Let FW decide FEC ability/request modes"); 2474 } 2475 2476 SYSCTL_ADD_PROC(ctx, ctx_list, 2477 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2478 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); 2479 2480 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2481 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2482 "Energy Efficient Ethernet (EEE) Sysctls"); 2483 eee_list = SYSCTL_CHILDREN(eee_node); 2484 2485 SYSCTL_ADD_PROC(ctx, eee_list, 2486 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 2487 pf, 0, ixl_sysctl_eee_enable, "I", 2488 "Enable Energy Efficient Ethernet (EEE)"); 2489 2490 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status", 2491 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0, 2492 "TX LPI status"); 2493 2494 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status", 2495 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0, 2496 "RX LPI status"); 2497 2498 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count", 2499 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count, 2500 "TX LPI count"); 2501 2502 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count", 2503 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count, 2504 "RX LPI count"); 2505 2506 SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, 2507 "link_active_on_if_down", 2508 CTLTYPE_INT | CTLFLAG_RWTUN, 2509 pf, 0, ixl_sysctl_set_link_active, "I", 2510 IXL_SYSCTL_HELP_SET_LINK_ACTIVE); 2511 2512 /* Add sysctls meant to print debug information, but don't list them 2513 * in "sysctl -a" output. */ 2514 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2515 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2516 "Debug Sysctls"); 2517 debug_list = SYSCTL_CHILDREN(debug_node); 2518 2519 SYSCTL_ADD_UINT(ctx, debug_list, 2520 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2521 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2522 2523 SYSCTL_ADD_UINT(ctx, debug_list, 2524 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2525 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2526 2527 SYSCTL_ADD_PROC(ctx, debug_list, 2528 OID_AUTO, "link_status", 2529 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2530 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); 2531 2532 SYSCTL_ADD_PROC(ctx, debug_list, 2533 OID_AUTO, "phy_abilities_init", 2534 CTLTYPE_STRING | CTLFLAG_RD, 2535 pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities"); 2536 2537 SYSCTL_ADD_PROC(ctx, debug_list, 2538 OID_AUTO, "phy_abilities", 2539 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2540 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); 2541 2542 SYSCTL_ADD_PROC(ctx, debug_list, 2543 OID_AUTO, "filter_list", 2544 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2545 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); 2546 2547 SYSCTL_ADD_PROC(ctx, debug_list, 2548 OID_AUTO, "hw_res_alloc", 2549 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2550 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); 2551 2552 SYSCTL_ADD_PROC(ctx, debug_list, 2553 OID_AUTO, "switch_config", 2554 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2555 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); 2556 2557 SYSCTL_ADD_PROC(ctx, debug_list, 2558 OID_AUTO, "switch_vlans", 2559 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2560 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration"); 2561 2562 SYSCTL_ADD_PROC(ctx, debug_list, 2563 OID_AUTO, "rss_key", 2564 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2565 pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); 2566 2567 SYSCTL_ADD_PROC(ctx, debug_list, 2568 OID_AUTO, "rss_lut", 2569 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2570 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); 2571 2572 SYSCTL_ADD_PROC(ctx, debug_list, 2573 OID_AUTO, "rss_hena", 2574 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2575 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); 2576 2577 SYSCTL_ADD_PROC(ctx, debug_list, 2578 OID_AUTO, "disable_fw_link_management", 2579 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2580 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); 2581 2582 SYSCTL_ADD_PROC(ctx, debug_list, 2583 OID_AUTO, "dump_debug_data", 2584 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2585 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2586 2587 SYSCTL_ADD_PROC(ctx, debug_list, 2588 OID_AUTO, "do_pf_reset", 2589 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2590 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2591 2592 SYSCTL_ADD_PROC(ctx, debug_list, 2593 OID_AUTO, "do_core_reset", 2594 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2595 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2596 2597 SYSCTL_ADD_PROC(ctx, debug_list, 2598 OID_AUTO, "do_global_reset", 2599 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2600 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2601 2602 SYSCTL_ADD_PROC(ctx, debug_list, 2603 OID_AUTO, "queue_interrupt_table", 2604 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2605 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2606 2607 if (pf->has_i2c) { 2608 SYSCTL_ADD_PROC(ctx, debug_list, 2609 OID_AUTO, "read_i2c_byte", 2610 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2611 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C); 2612 2613 SYSCTL_ADD_PROC(ctx, debug_list, 2614 OID_AUTO, "write_i2c_byte", 2615 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2616 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C); 2617 2618 SYSCTL_ADD_PROC(ctx, debug_list, 2619 OID_AUTO, "read_i2c_diag_data", 2620 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2621 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); 2622 } 2623 } 2624 2625 /* 2626 * Primarily for finding out how many queues can be assigned to VFs, 2627 * at runtime. 2628 */ 2629 static int 2630 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) 2631 { 2632 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2633 int queues; 2634 2635 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); 2636 2637 return sysctl_handle_int(oidp, NULL, queues, req); 2638 } 2639 2640 static const char * 2641 ixl_link_speed_string(enum i40e_aq_link_speed link_speed) 2642 { 2643 const char * link_speed_str[] = { 2644 "Unknown", 2645 "100 Mbps", 2646 "1 Gbps", 2647 "10 Gbps", 2648 "40 Gbps", 2649 "20 Gbps", 2650 "25 Gbps", 2651 "2.5 Gbps", 2652 "5 Gbps" 2653 }; 2654 int index; 2655 2656 switch (link_speed) { 2657 case I40E_LINK_SPEED_100MB: 2658 index = 1; 2659 break; 2660 case I40E_LINK_SPEED_1GB: 2661 index = 2; 2662 break; 2663 case I40E_LINK_SPEED_10GB: 2664 index = 3; 2665 break; 2666 case I40E_LINK_SPEED_40GB: 2667 index = 4; 2668 break; 2669 case I40E_LINK_SPEED_20GB: 2670 index = 5; 2671 break; 2672 case I40E_LINK_SPEED_25GB: 2673 index = 6; 2674 break; 2675 case I40E_LINK_SPEED_2_5GB: 2676 index = 7; 2677 break; 2678 case I40E_LINK_SPEED_5GB: 2679 index = 8; 2680 break; 2681 case I40E_LINK_SPEED_UNKNOWN: 2682 default: 2683 index = 0; 2684 break; 2685 } 2686 2687 return (link_speed_str[index]); 2688 } 2689 2690 int 2691 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2692 { 2693 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2694 struct i40e_hw *hw = &pf->hw; 2695 int error = 0; 2696 2697 ixl_update_link_status(pf); 2698 2699 error = sysctl_handle_string(oidp, 2700 __DECONST(void *, 2701 ixl_link_speed_string(hw->phy.link_info.link_speed)), 2702 8, req); 2703 2704 return (error); 2705 } 2706 2707 /* 2708 * Converts 8-bit speeds value to and from sysctl flags and 2709 * Admin Queue flags. 2710 */ 2711 static u8 2712 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) 2713 { 2714 #define SPEED_MAP_SIZE 8 2715 static u16 speedmap[SPEED_MAP_SIZE] = { 2716 (I40E_LINK_SPEED_100MB | (0x1 << 8)), 2717 (I40E_LINK_SPEED_1GB | (0x2 << 8)), 2718 (I40E_LINK_SPEED_10GB | (0x4 << 8)), 2719 (I40E_LINK_SPEED_20GB | (0x8 << 8)), 2720 (I40E_LINK_SPEED_25GB | (0x10 << 8)), 2721 (I40E_LINK_SPEED_40GB | (0x20 << 8)), 2722 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)), 2723 (I40E_LINK_SPEED_5GB | (0x80 << 8)), 2724 }; 2725 u8 retval = 0; 2726 2727 for (int i = 0; i < SPEED_MAP_SIZE; i++) { 2728 if (to_aq) 2729 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; 2730 else 2731 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; 2732 } 2733 2734 return (retval); 2735 } 2736 2737 int 2738 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) 2739 { 2740 struct i40e_hw *hw = &pf->hw; 2741 device_t dev = pf->dev; 2742 struct i40e_aq_get_phy_abilities_resp abilities; 2743 struct i40e_aq_set_phy_config config; 2744 enum i40e_status_code aq_error = 0; 2745 2746 /* Get current capability information */ 2747 aq_error = i40e_aq_get_phy_capabilities(hw, 2748 FALSE, FALSE, &abilities, NULL); 2749 if (aq_error) { 2750 device_printf(dev, 2751 "%s: Error getting phy capabilities %d," 2752 " aq error: %d\n", __func__, aq_error, 2753 hw->aq.asq_last_status); 2754 return (EIO); 2755 } 2756 2757 /* Prepare new config */ 2758 bzero(&config, sizeof(config)); 2759 if (from_aq) 2760 config.link_speed = speeds; 2761 else 2762 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); 2763 config.phy_type = abilities.phy_type; 2764 config.phy_type_ext = abilities.phy_type_ext; 2765 config.abilities = abilities.abilities 2766 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 2767 config.eee_capability = abilities.eee_capability; 2768 config.eeer = abilities.eeer_val; 2769 config.low_power_ctrl = abilities.d3_lpan; 2770 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 2771 & I40E_AQ_PHY_FEC_CONFIG_MASK; 2772 2773 /* Do aq command & restart link */ 2774 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 2775 if (aq_error) { 2776 device_printf(dev, 2777 "%s: Error setting new phy config %d," 2778 " aq error: %d\n", __func__, aq_error, 2779 hw->aq.asq_last_status); 2780 return (EIO); 2781 } 2782 2783 return (0); 2784 } 2785 2786 /* 2787 ** Supported link speeds 2788 ** Flags: 2789 ** 0x1 - 100 Mb 2790 ** 0x2 - 1G 2791 ** 0x4 - 10G 2792 ** 0x8 - 20G 2793 ** 0x10 - 25G 2794 ** 0x20 - 40G 2795 ** 0x40 - 2.5G 2796 ** 0x80 - 5G 2797 */ 2798 static int 2799 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) 2800 { 2801 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2802 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 2803 2804 return sysctl_handle_int(oidp, NULL, supported, req); 2805 } 2806 2807 /* 2808 ** Control link advertise speed: 2809 ** Flags: 2810 ** 0x1 - advertise 100 Mb 2811 ** 0x2 - advertise 1G 2812 ** 0x4 - advertise 10G 2813 ** 0x8 - advertise 20G 2814 ** 0x10 - advertise 25G 2815 ** 0x20 - advertise 40G 2816 ** 0x40 - advertise 2.5G 2817 ** 0x80 - advertise 5G 2818 ** 2819 ** Set to 0 to disable link 2820 */ 2821 int 2822 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) 2823 { 2824 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2825 device_t dev = pf->dev; 2826 u8 converted_speeds; 2827 int requested_ls = 0; 2828 int error = 0; 2829 2830 /* Read in new mode */ 2831 requested_ls = pf->advertised_speed; 2832 error = sysctl_handle_int(oidp, &requested_ls, 0, req); 2833 if ((error) || (req->newptr == NULL)) 2834 return (error); 2835 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 2836 device_printf(dev, "Interface is currently in FW recovery mode. " 2837 "Setting advertise speed not supported\n"); 2838 return (EINVAL); 2839 } 2840 2841 /* Error out if bits outside of possible flag range are set */ 2842 if ((requested_ls & ~((u8)0xFF)) != 0) { 2843 device_printf(dev, "Input advertised speed out of range; " 2844 "valid flags are: 0x%02x\n", 2845 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2846 return (EINVAL); 2847 } 2848 2849 /* Check if adapter supports input value */ 2850 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); 2851 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { 2852 device_printf(dev, "Invalid advertised speed; " 2853 "valid flags are: 0x%02x\n", 2854 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2855 return (EINVAL); 2856 } 2857 2858 error = ixl_set_advertised_speeds(pf, requested_ls, false); 2859 if (error) 2860 return (error); 2861 2862 pf->advertised_speed = requested_ls; 2863 ixl_update_link_status(pf); 2864 return (0); 2865 } 2866 2867 /* 2868 * Input: bitmap of enum i40e_aq_link_speed 2869 */ 2870 u64 2871 ixl_max_aq_speed_to_value(u8 link_speeds) 2872 { 2873 if (link_speeds & I40E_LINK_SPEED_40GB) 2874 return IF_Gbps(40); 2875 if (link_speeds & I40E_LINK_SPEED_25GB) 2876 return IF_Gbps(25); 2877 if (link_speeds & I40E_LINK_SPEED_20GB) 2878 return IF_Gbps(20); 2879 if (link_speeds & I40E_LINK_SPEED_10GB) 2880 return IF_Gbps(10); 2881 if (link_speeds & I40E_LINK_SPEED_5GB) 2882 return IF_Gbps(5); 2883 if (link_speeds & I40E_LINK_SPEED_2_5GB) 2884 return IF_Mbps(2500); 2885 if (link_speeds & I40E_LINK_SPEED_1GB) 2886 return IF_Gbps(1); 2887 if (link_speeds & I40E_LINK_SPEED_100MB) 2888 return IF_Mbps(100); 2889 else 2890 /* Minimum supported link speed */ 2891 return IF_Mbps(100); 2892 } 2893 2894 /* 2895 ** Get the width and transaction speed of 2896 ** the bus this adapter is plugged into. 2897 */ 2898 void 2899 ixl_get_bus_info(struct ixl_pf *pf) 2900 { 2901 struct i40e_hw *hw = &pf->hw; 2902 device_t dev = pf->dev; 2903 u16 link; 2904 u32 offset, num_ports; 2905 u64 max_speed; 2906 2907 /* Some devices don't use PCIE */ 2908 if (hw->mac.type == I40E_MAC_X722) 2909 return; 2910 2911 /* Read PCI Express Capabilities Link Status Register */ 2912 pci_find_cap(dev, PCIY_EXPRESS, &offset); 2913 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2914 2915 /* Fill out hw struct with PCIE info */ 2916 i40e_set_pci_config_data(hw, link); 2917 2918 /* Use info to print out bandwidth messages */ 2919 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 2920 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": 2921 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": 2922 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), 2923 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : 2924 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : 2925 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : 2926 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : 2927 ("Unknown")); 2928 2929 /* 2930 * If adapter is in slot with maximum supported speed, 2931 * no warning message needs to be printed out. 2932 */ 2933 if (hw->bus.speed >= i40e_bus_speed_8000 2934 && hw->bus.width >= i40e_bus_width_pcie_x8) 2935 return; 2936 2937 num_ports = bitcount32(hw->func_caps.valid_functions); 2938 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; 2939 2940 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { 2941 device_printf(dev, "PCI-Express bandwidth available" 2942 " for this device may be insufficient for" 2943 " optimal performance.\n"); 2944 device_printf(dev, "Please move the device to a different" 2945 " PCI-e link with more lanes and/or higher" 2946 " transfer rate.\n"); 2947 } 2948 } 2949 2950 static int 2951 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 2952 { 2953 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2954 struct i40e_hw *hw = &pf->hw; 2955 struct sbuf *sbuf; 2956 2957 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2958 ixl_nvm_version_str(hw, sbuf); 2959 sbuf_finish(sbuf); 2960 sbuf_delete(sbuf); 2961 2962 return (0); 2963 } 2964 2965 void 2966 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) 2967 { 2968 u8 nvma_ptr = nvma->config & 0xFF; 2969 u8 nvma_flags = (nvma->config & 0xF00) >> 8; 2970 const char * cmd_str; 2971 2972 switch (nvma->command) { 2973 case I40E_NVM_READ: 2974 if (nvma_ptr == 0xF && nvma_flags == 0xF && 2975 nvma->offset == 0 && nvma->data_size == 1) { 2976 device_printf(dev, "NVMUPD: Get Driver Status Command\n"); 2977 return; 2978 } 2979 cmd_str = "READ "; 2980 break; 2981 case I40E_NVM_WRITE: 2982 cmd_str = "WRITE"; 2983 break; 2984 default: 2985 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command); 2986 return; 2987 } 2988 device_printf(dev, 2989 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n", 2990 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size); 2991 } 2992 2993 int 2994 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) 2995 { 2996 struct i40e_hw *hw = &pf->hw; 2997 struct i40e_nvm_access *nvma; 2998 device_t dev = pf->dev; 2999 enum i40e_status_code status = 0; 3000 size_t nvma_size, ifd_len, exp_len; 3001 int err, perrno; 3002 3003 DEBUGFUNC("ixl_handle_nvmupd_cmd"); 3004 3005 /* Sanity checks */ 3006 nvma_size = sizeof(struct i40e_nvm_access); 3007 ifd_len = ifd->ifd_len; 3008 3009 if (ifd_len < nvma_size || 3010 ifd->ifd_data == NULL) { 3011 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", 3012 __func__); 3013 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", 3014 __func__, ifd_len, nvma_size); 3015 device_printf(dev, "%s: data pointer: %p\n", __func__, 3016 ifd->ifd_data); 3017 return (EINVAL); 3018 } 3019 3020 nvma = malloc(ifd_len, M_IXL, M_WAITOK); 3021 err = copyin(ifd->ifd_data, nvma, ifd_len); 3022 if (err) { 3023 device_printf(dev, "%s: Cannot get request from user space\n", 3024 __func__); 3025 free(nvma, M_IXL); 3026 return (err); 3027 } 3028 3029 if (pf->dbg_mask & IXL_DBG_NVMUPD) 3030 ixl_print_nvm_cmd(dev, nvma); 3031 3032 if (IXL_PF_IS_RESETTING(pf)) { 3033 int count = 0; 3034 while (count++ < 100) { 3035 i40e_msec_delay(100); 3036 if (!(IXL_PF_IS_RESETTING(pf))) 3037 break; 3038 } 3039 } 3040 3041 if (IXL_PF_IS_RESETTING(pf)) { 3042 device_printf(dev, 3043 "%s: timeout waiting for EMP reset to finish\n", 3044 __func__); 3045 free(nvma, M_IXL); 3046 return (-EBUSY); 3047 } 3048 3049 if (nvma->data_size < 1 || nvma->data_size > 4096) { 3050 device_printf(dev, 3051 "%s: invalid request, data size not in supported range\n", 3052 __func__); 3053 free(nvma, M_IXL); 3054 return (EINVAL); 3055 } 3056 3057 /* 3058 * Older versions of the NVM update tool don't set ifd_len to the size 3059 * of the entire buffer passed to the ioctl. Check the data_size field 3060 * in the contained i40e_nvm_access struct and ensure everything is 3061 * copied in from userspace. 3062 */ 3063 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */ 3064 3065 if (ifd_len < exp_len) { 3066 ifd_len = exp_len; 3067 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK); 3068 err = copyin(ifd->ifd_data, nvma, ifd_len); 3069 if (err) { 3070 device_printf(dev, "%s: Cannot get request from user space\n", 3071 __func__); 3072 free(nvma, M_IXL); 3073 return (err); 3074 } 3075 } 3076 3077 // TODO: Might need a different lock here 3078 // IXL_PF_LOCK(pf); 3079 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); 3080 // IXL_PF_UNLOCK(pf); 3081 3082 err = copyout(nvma, ifd->ifd_data, ifd_len); 3083 free(nvma, M_IXL); 3084 if (err) { 3085 device_printf(dev, "%s: Cannot return data to user space\n", 3086 __func__); 3087 return (err); 3088 } 3089 3090 /* Let the nvmupdate report errors, show them only when debug is enabled */ 3091 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) 3092 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", 3093 i40e_stat_str(hw, status), perrno); 3094 3095 /* 3096 * -EPERM is actually ERESTART, which the kernel interprets as it needing 3097 * to run this ioctl again. So use -EACCES for -EPERM instead. 3098 */ 3099 if (perrno == -EPERM) 3100 return (-EACCES); 3101 else 3102 return (perrno); 3103 } 3104 3105 int 3106 ixl_find_i2c_interface(struct ixl_pf *pf) 3107 { 3108 struct i40e_hw *hw = &pf->hw; 3109 bool i2c_en, port_matched; 3110 u32 reg; 3111 3112 for (int i = 0; i < 4; i++) { 3113 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); 3114 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); 3115 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) 3116 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) 3117 & BIT(hw->port); 3118 if (i2c_en && port_matched) 3119 return (i); 3120 } 3121 3122 return (-1); 3123 } 3124 3125 void 3126 ixl_set_link(struct ixl_pf *pf, bool enable) 3127 { 3128 struct i40e_hw *hw = &pf->hw; 3129 device_t dev = pf->dev; 3130 struct i40e_aq_get_phy_abilities_resp abilities; 3131 struct i40e_aq_set_phy_config config; 3132 enum i40e_status_code aq_error = 0; 3133 u32 phy_type, phy_type_ext; 3134 3135 /* Get initial capability information */ 3136 aq_error = i40e_aq_get_phy_capabilities(hw, 3137 FALSE, TRUE, &abilities, NULL); 3138 if (aq_error) { 3139 device_printf(dev, 3140 "%s: Error getting phy capabilities %d," 3141 " aq error: %d\n", __func__, aq_error, 3142 hw->aq.asq_last_status); 3143 return; 3144 } 3145 3146 phy_type = abilities.phy_type; 3147 phy_type_ext = abilities.phy_type_ext; 3148 3149 /* Get current capability information */ 3150 aq_error = i40e_aq_get_phy_capabilities(hw, 3151 FALSE, FALSE, &abilities, NULL); 3152 if (aq_error) { 3153 device_printf(dev, 3154 "%s: Error getting phy capabilities %d," 3155 " aq error: %d\n", __func__, aq_error, 3156 hw->aq.asq_last_status); 3157 return; 3158 } 3159 3160 /* Prepare new config */ 3161 memset(&config, 0, sizeof(config)); 3162 config.link_speed = abilities.link_speed; 3163 config.abilities = abilities.abilities; 3164 config.eee_capability = abilities.eee_capability; 3165 config.eeer = abilities.eeer_val; 3166 config.low_power_ctrl = abilities.d3_lpan; 3167 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 3168 & I40E_AQ_PHY_FEC_CONFIG_MASK; 3169 config.phy_type = 0; 3170 config.phy_type_ext = 0; 3171 3172 config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX | 3173 I40E_AQ_PHY_FLAG_PAUSE_RX); 3174 3175 switch (pf->fc) { 3176 case I40E_FC_FULL: 3177 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX | 3178 I40E_AQ_PHY_FLAG_PAUSE_RX; 3179 break; 3180 case I40E_FC_RX_PAUSE: 3181 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX; 3182 break; 3183 case I40E_FC_TX_PAUSE: 3184 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX; 3185 break; 3186 default: 3187 break; 3188 } 3189 3190 if (enable) { 3191 config.phy_type = phy_type; 3192 config.phy_type_ext = phy_type_ext; 3193 3194 } 3195 3196 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 3197 if (aq_error) { 3198 device_printf(dev, 3199 "%s: Error setting new phy config %d," 3200 " aq error: %d\n", __func__, aq_error, 3201 hw->aq.asq_last_status); 3202 return; 3203 } 3204 3205 aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL); 3206 if (aq_error) { 3207 device_printf(dev, 3208 "%s: Error set link config %d," 3209 " aq error: %d\n", __func__, aq_error, 3210 hw->aq.asq_last_status); 3211 return; 3212 } 3213 } 3214 3215 static char * 3216 ixl_phy_type_string(u32 bit_pos, bool ext) 3217 { 3218 static char * phy_types_str[32] = { 3219 "SGMII", 3220 "1000BASE-KX", 3221 "10GBASE-KX4", 3222 "10GBASE-KR", 3223 "40GBASE-KR4", 3224 "XAUI", 3225 "XFI", 3226 "SFI", 3227 "XLAUI", 3228 "XLPPI", 3229 "40GBASE-CR4", 3230 "10GBASE-CR1", 3231 "SFP+ Active DA", 3232 "QSFP+ Active DA", 3233 "Reserved (14)", 3234 "Reserved (15)", 3235 "Reserved (16)", 3236 "100BASE-TX", 3237 "1000BASE-T", 3238 "10GBASE-T", 3239 "10GBASE-SR", 3240 "10GBASE-LR", 3241 "10GBASE-SFP+Cu", 3242 "10GBASE-CR1", 3243 "40GBASE-CR4", 3244 "40GBASE-SR4", 3245 "40GBASE-LR4", 3246 "1000BASE-SX", 3247 "1000BASE-LX", 3248 "1000BASE-T Optical", 3249 "20GBASE-KR2", 3250 "Reserved (31)" 3251 }; 3252 static char * ext_phy_types_str[8] = { 3253 "25GBASE-KR", 3254 "25GBASE-CR", 3255 "25GBASE-SR", 3256 "25GBASE-LR", 3257 "25GBASE-AOC", 3258 "25GBASE-ACC", 3259 "2.5GBASE-T", 3260 "5GBASE-T" 3261 }; 3262 3263 if (ext && bit_pos > 7) return "Invalid_Ext"; 3264 if (bit_pos > 31) return "Invalid"; 3265 3266 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; 3267 } 3268 3269 /* TODO: ERJ: I don't this is necessary anymore. */ 3270 int 3271 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) 3272 { 3273 device_t dev = pf->dev; 3274 struct i40e_hw *hw = &pf->hw; 3275 struct i40e_aq_desc desc; 3276 enum i40e_status_code status; 3277 3278 struct i40e_aqc_get_link_status *aq_link_status = 3279 (struct i40e_aqc_get_link_status *)&desc.params.raw; 3280 3281 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 3282 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); 3283 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 3284 if (status) { 3285 device_printf(dev, 3286 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", 3287 __func__, i40e_stat_str(hw, status), 3288 i40e_aq_str(hw, hw->aq.asq_last_status)); 3289 return (EIO); 3290 } 3291 3292 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); 3293 return (0); 3294 } 3295 3296 static char * 3297 ixl_phy_type_string_ls(u8 val) 3298 { 3299 if (val >= 0x1F) 3300 return ixl_phy_type_string(val - 0x1F, true); 3301 else 3302 return ixl_phy_type_string(val, false); 3303 } 3304 3305 static int 3306 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) 3307 { 3308 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3309 device_t dev = pf->dev; 3310 struct sbuf *buf; 3311 int error = 0; 3312 3313 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3314 if (!buf) { 3315 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3316 return (ENOMEM); 3317 } 3318 3319 struct i40e_aqc_get_link_status link_status; 3320 error = ixl_aq_get_link_status(pf, &link_status); 3321 if (error) { 3322 sbuf_delete(buf); 3323 return (error); 3324 } 3325 3326 sbuf_printf(buf, "\n" 3327 "PHY Type : 0x%02x<%s>\n" 3328 "Speed : 0x%02x\n" 3329 "Link info: 0x%02x\n" 3330 "AN info : 0x%02x\n" 3331 "Ext info : 0x%02x\n" 3332 "Loopback : 0x%02x\n" 3333 "Max Frame: %d\n" 3334 "Config : 0x%02x\n" 3335 "Power : 0x%02x", 3336 link_status.phy_type, 3337 ixl_phy_type_string_ls(link_status.phy_type), 3338 link_status.link_speed, 3339 link_status.link_info, 3340 link_status.an_info, 3341 link_status.ext_info, 3342 link_status.loopback, 3343 link_status.max_frame_size, 3344 link_status.config, 3345 link_status.power_desc); 3346 3347 error = sbuf_finish(buf); 3348 if (error) 3349 device_printf(dev, "Error finishing sbuf: %d\n", error); 3350 3351 sbuf_delete(buf); 3352 return (error); 3353 } 3354 3355 static int 3356 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) 3357 { 3358 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3359 struct i40e_hw *hw = &pf->hw; 3360 device_t dev = pf->dev; 3361 enum i40e_status_code status; 3362 struct i40e_aq_get_phy_abilities_resp abilities; 3363 struct sbuf *buf; 3364 int error = 0; 3365 3366 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3367 if (!buf) { 3368 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3369 return (ENOMEM); 3370 } 3371 3372 status = i40e_aq_get_phy_capabilities(hw, 3373 FALSE, arg2 != 0, &abilities, NULL); 3374 if (status) { 3375 device_printf(dev, 3376 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 3377 __func__, i40e_stat_str(hw, status), 3378 i40e_aq_str(hw, hw->aq.asq_last_status)); 3379 sbuf_delete(buf); 3380 return (EIO); 3381 } 3382 3383 sbuf_printf(buf, "\n" 3384 "PHY Type : %08x", 3385 abilities.phy_type); 3386 3387 if (abilities.phy_type != 0) { 3388 sbuf_printf(buf, "<"); 3389 for (int i = 0; i < 32; i++) 3390 if ((1 << i) & abilities.phy_type) 3391 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); 3392 sbuf_printf(buf, ">"); 3393 } 3394 3395 sbuf_printf(buf, "\nPHY Ext : %02x", 3396 abilities.phy_type_ext); 3397 3398 if (abilities.phy_type_ext != 0) { 3399 sbuf_printf(buf, "<"); 3400 for (int i = 0; i < 4; i++) 3401 if ((1 << i) & abilities.phy_type_ext) 3402 sbuf_printf(buf, "%s,", 3403 ixl_phy_type_string(i, true)); 3404 sbuf_printf(buf, ">"); 3405 } 3406 3407 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed); 3408 if (abilities.link_speed != 0) { 3409 u8 link_speed; 3410 sbuf_printf(buf, " <"); 3411 for (int i = 0; i < 8; i++) { 3412 link_speed = (1 << i) & abilities.link_speed; 3413 if (link_speed) 3414 sbuf_printf(buf, "%s, ", 3415 ixl_link_speed_string(link_speed)); 3416 } 3417 sbuf_printf(buf, ">"); 3418 } 3419 3420 sbuf_printf(buf, "\n" 3421 "Abilities: %02x\n" 3422 "EEE cap : %04x\n" 3423 "EEER reg : %08x\n" 3424 "D3 Lpan : %02x\n" 3425 "ID : %02x %02x %02x %02x\n" 3426 "ModType : %02x %02x %02x\n" 3427 "ModType E: %01x\n" 3428 "FEC Cfg : %02x\n" 3429 "Ext CC : %02x", 3430 abilities.abilities, abilities.eee_capability, 3431 abilities.eeer_val, abilities.d3_lpan, 3432 abilities.phy_id[0], abilities.phy_id[1], 3433 abilities.phy_id[2], abilities.phy_id[3], 3434 abilities.module_type[0], abilities.module_type[1], 3435 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, 3436 abilities.fec_cfg_curr_mod_ext_info & 0x1F, 3437 abilities.ext_comp_code); 3438 3439 error = sbuf_finish(buf); 3440 if (error) 3441 device_printf(dev, "Error finishing sbuf: %d\n", error); 3442 3443 sbuf_delete(buf); 3444 return (error); 3445 } 3446 3447 static int 3448 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) 3449 { 3450 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3451 struct ixl_vsi *vsi = &pf->vsi; 3452 struct ixl_mac_filter *f; 3453 device_t dev = pf->dev; 3454 int error = 0, ftl_len = 0, ftl_counter = 0; 3455 3456 struct sbuf *buf; 3457 3458 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3459 if (!buf) { 3460 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3461 return (ENOMEM); 3462 } 3463 3464 sbuf_printf(buf, "\n"); 3465 3466 /* Print MAC filters */ 3467 sbuf_printf(buf, "PF Filters:\n"); 3468 LIST_FOREACH(f, &vsi->ftl, ftle) 3469 ftl_len++; 3470 3471 if (ftl_len < 1) 3472 sbuf_printf(buf, "(none)\n"); 3473 else { 3474 LIST_FOREACH(f, &vsi->ftl, ftle) { 3475 sbuf_printf(buf, 3476 MAC_FORMAT ", vlan %4d, flags %#06x", 3477 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3478 /* don't print '\n' for last entry */ 3479 if (++ftl_counter != ftl_len) 3480 sbuf_printf(buf, "\n"); 3481 } 3482 } 3483 3484 #ifdef PCI_IOV 3485 /* TODO: Give each VF its own filter list sysctl */ 3486 struct ixl_vf *vf; 3487 if (pf->num_vfs > 0) { 3488 sbuf_printf(buf, "\n\n"); 3489 for (int i = 0; i < pf->num_vfs; i++) { 3490 vf = &pf->vfs[i]; 3491 if (!(vf->vf_flags & VF_FLAG_ENABLED)) 3492 continue; 3493 3494 vsi = &vf->vsi; 3495 ftl_len = 0, ftl_counter = 0; 3496 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num); 3497 LIST_FOREACH(f, &vsi->ftl, ftle) 3498 ftl_len++; 3499 3500 if (ftl_len < 1) 3501 sbuf_printf(buf, "(none)\n"); 3502 else { 3503 LIST_FOREACH(f, &vsi->ftl, ftle) { 3504 sbuf_printf(buf, 3505 MAC_FORMAT ", vlan %4d, flags %#06x\n", 3506 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3507 } 3508 } 3509 } 3510 } 3511 #endif 3512 3513 error = sbuf_finish(buf); 3514 if (error) 3515 device_printf(dev, "Error finishing sbuf: %d\n", error); 3516 sbuf_delete(buf); 3517 3518 return (error); 3519 } 3520 3521 #define IXL_SW_RES_SIZE 0x14 3522 int 3523 ixl_res_alloc_cmp(const void *a, const void *b) 3524 { 3525 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; 3526 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; 3527 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; 3528 3529 return ((int)one->resource_type - (int)two->resource_type); 3530 } 3531 3532 /* 3533 * Longest string length: 25 3534 */ 3535 const char * 3536 ixl_switch_res_type_string(u8 type) 3537 { 3538 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = { 3539 "VEB", 3540 "VSI", 3541 "Perfect Match MAC address", 3542 "S-tag", 3543 "(Reserved)", 3544 "Multicast hash entry", 3545 "Unicast hash entry", 3546 "VLAN", 3547 "VSI List entry", 3548 "(Reserved)", 3549 "VLAN Statistic Pool", 3550 "Mirror Rule", 3551 "Queue Set", 3552 "Inner VLAN Forward filter", 3553 "(Reserved)", 3554 "Inner MAC", 3555 "IP", 3556 "GRE/VN1 Key", 3557 "VN2 Key", 3558 "Tunneling Port" 3559 }; 3560 3561 if (type < IXL_SW_RES_SIZE) 3562 return ixl_switch_res_type_strings[type]; 3563 else 3564 return "(Reserved)"; 3565 } 3566 3567 static int 3568 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) 3569 { 3570 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3571 struct i40e_hw *hw = &pf->hw; 3572 device_t dev = pf->dev; 3573 struct sbuf *buf; 3574 enum i40e_status_code status; 3575 int error = 0; 3576 3577 u8 num_entries; 3578 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; 3579 3580 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3581 if (!buf) { 3582 device_printf(dev, "Could not allocate sbuf for output.\n"); 3583 return (ENOMEM); 3584 } 3585 3586 bzero(resp, sizeof(resp)); 3587 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, 3588 resp, 3589 IXL_SW_RES_SIZE, 3590 NULL); 3591 if (status) { 3592 device_printf(dev, 3593 "%s: get_switch_resource_alloc() error %s, aq error %s\n", 3594 __func__, i40e_stat_str(hw, status), 3595 i40e_aq_str(hw, hw->aq.asq_last_status)); 3596 sbuf_delete(buf); 3597 return (error); 3598 } 3599 3600 /* Sort entries by type for display */ 3601 qsort(resp, num_entries, 3602 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), 3603 &ixl_res_alloc_cmp); 3604 3605 sbuf_cat(buf, "\n"); 3606 sbuf_printf(buf, "# of entries: %d\n", num_entries); 3607 sbuf_printf(buf, 3608 " Type | Guaranteed | Total | Used | Un-allocated\n" 3609 " | (this) | (all) | (this) | (all) \n"); 3610 for (int i = 0; i < num_entries; i++) { 3611 sbuf_printf(buf, 3612 "%25s | %10d %5d %6d %12d", 3613 ixl_switch_res_type_string(resp[i].resource_type), 3614 resp[i].guaranteed, 3615 resp[i].total, 3616 resp[i].used, 3617 resp[i].total_unalloced); 3618 if (i < num_entries - 1) 3619 sbuf_cat(buf, "\n"); 3620 } 3621 3622 error = sbuf_finish(buf); 3623 if (error) 3624 device_printf(dev, "Error finishing sbuf: %d\n", error); 3625 3626 sbuf_delete(buf); 3627 return (error); 3628 } 3629 3630 enum ixl_sw_seid_offset { 3631 IXL_SW_SEID_EMP = 1, 3632 IXL_SW_SEID_MAC_START = 2, 3633 IXL_SW_SEID_MAC_END = 5, 3634 IXL_SW_SEID_PF_START = 16, 3635 IXL_SW_SEID_PF_END = 31, 3636 IXL_SW_SEID_VF_START = 32, 3637 IXL_SW_SEID_VF_END = 159, 3638 }; 3639 3640 /* 3641 * Caller must init and delete sbuf; this function will clear and 3642 * finish it for caller. 3643 * 3644 * Note: The SEID argument only applies for elements defined by FW at 3645 * power-on; these include the EMP, Ports, PFs and VFs. 3646 */ 3647 static char * 3648 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid) 3649 { 3650 sbuf_clear(s); 3651 3652 /* If SEID is in certain ranges, then we can infer the 3653 * mapping of SEID to switch element. 3654 */ 3655 if (seid == IXL_SW_SEID_EMP) { 3656 sbuf_cat(s, "EMP"); 3657 goto out; 3658 } else if (seid >= IXL_SW_SEID_MAC_START && 3659 seid <= IXL_SW_SEID_MAC_END) { 3660 sbuf_printf(s, "MAC %2d", 3661 seid - IXL_SW_SEID_MAC_START); 3662 goto out; 3663 } else if (seid >= IXL_SW_SEID_PF_START && 3664 seid <= IXL_SW_SEID_PF_END) { 3665 sbuf_printf(s, "PF %3d", 3666 seid - IXL_SW_SEID_PF_START); 3667 goto out; 3668 } else if (seid >= IXL_SW_SEID_VF_START && 3669 seid <= IXL_SW_SEID_VF_END) { 3670 sbuf_printf(s, "VF %3d", 3671 seid - IXL_SW_SEID_VF_START); 3672 goto out; 3673 } 3674 3675 switch (element_type) { 3676 case I40E_AQ_SW_ELEM_TYPE_BMC: 3677 sbuf_cat(s, "BMC"); 3678 break; 3679 case I40E_AQ_SW_ELEM_TYPE_PV: 3680 sbuf_cat(s, "PV"); 3681 break; 3682 case I40E_AQ_SW_ELEM_TYPE_VEB: 3683 sbuf_cat(s, "VEB"); 3684 break; 3685 case I40E_AQ_SW_ELEM_TYPE_PA: 3686 sbuf_cat(s, "PA"); 3687 break; 3688 case I40E_AQ_SW_ELEM_TYPE_VSI: 3689 sbuf_printf(s, "VSI"); 3690 break; 3691 default: 3692 sbuf_cat(s, "?"); 3693 break; 3694 } 3695 3696 out: 3697 sbuf_finish(s); 3698 return sbuf_data(s); 3699 } 3700 3701 static int 3702 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b) 3703 { 3704 const struct i40e_aqc_switch_config_element_resp *one, *two; 3705 one = (const struct i40e_aqc_switch_config_element_resp *)a; 3706 two = (const struct i40e_aqc_switch_config_element_resp *)b; 3707 3708 return ((int)one->seid - (int)two->seid); 3709 } 3710 3711 static int 3712 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) 3713 { 3714 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3715 struct i40e_hw *hw = &pf->hw; 3716 device_t dev = pf->dev; 3717 struct sbuf *buf; 3718 struct sbuf *nmbuf; 3719 enum i40e_status_code status; 3720 int error = 0; 3721 u16 next = 0; 3722 u8 aq_buf[I40E_AQ_LARGE_BUF]; 3723 3724 struct i40e_aqc_switch_config_element_resp *elem; 3725 struct i40e_aqc_get_switch_config_resp *sw_config; 3726 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 3727 3728 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3729 if (!buf) { 3730 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3731 return (ENOMEM); 3732 } 3733 3734 status = i40e_aq_get_switch_config(hw, sw_config, 3735 sizeof(aq_buf), &next, NULL); 3736 if (status) { 3737 device_printf(dev, 3738 "%s: aq_get_switch_config() error %s, aq error %s\n", 3739 __func__, i40e_stat_str(hw, status), 3740 i40e_aq_str(hw, hw->aq.asq_last_status)); 3741 sbuf_delete(buf); 3742 return error; 3743 } 3744 if (next) 3745 device_printf(dev, "%s: TODO: get more config with SEID %d\n", 3746 __func__, next); 3747 3748 nmbuf = sbuf_new_auto(); 3749 if (!nmbuf) { 3750 device_printf(dev, "Could not allocate sbuf for name output.\n"); 3751 sbuf_delete(buf); 3752 return (ENOMEM); 3753 } 3754 3755 /* Sort entries by SEID for display */ 3756 qsort(sw_config->element, sw_config->header.num_reported, 3757 sizeof(struct i40e_aqc_switch_config_element_resp), 3758 &ixl_sw_cfg_elem_seid_cmp); 3759 3760 sbuf_cat(buf, "\n"); 3761 /* Assuming <= 255 elements in switch */ 3762 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); 3763 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); 3764 /* Exclude: 3765 * Revision -- all elements are revision 1 for now 3766 */ 3767 sbuf_printf(buf, 3768 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n" 3769 " | | | (uplink)\n"); 3770 for (int i = 0; i < sw_config->header.num_reported; i++) { 3771 elem = &sw_config->element[i]; 3772 3773 // "%4d (%8s) | %8s %8s %#8x", 3774 sbuf_printf(buf, "%4d", elem->seid); 3775 sbuf_cat(buf, " "); 3776 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3777 elem->element_type, elem->seid)); 3778 sbuf_cat(buf, " | "); 3779 sbuf_printf(buf, "%4d", elem->uplink_seid); 3780 sbuf_cat(buf, " "); 3781 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3782 0, elem->uplink_seid)); 3783 sbuf_cat(buf, " | "); 3784 sbuf_printf(buf, "%4d", elem->downlink_seid); 3785 sbuf_cat(buf, " "); 3786 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3787 0, elem->downlink_seid)); 3788 sbuf_cat(buf, " | "); 3789 sbuf_printf(buf, "%8d", elem->connection_type); 3790 if (i < sw_config->header.num_reported - 1) 3791 sbuf_cat(buf, "\n"); 3792 } 3793 sbuf_delete(nmbuf); 3794 3795 error = sbuf_finish(buf); 3796 if (error) 3797 device_printf(dev, "Error finishing sbuf: %d\n", error); 3798 3799 sbuf_delete(buf); 3800 3801 return (error); 3802 } 3803 3804 static int 3805 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS) 3806 { 3807 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3808 struct i40e_hw *hw = &pf->hw; 3809 device_t dev = pf->dev; 3810 int requested_vlan = -1; 3811 enum i40e_status_code status = 0; 3812 int error = 0; 3813 3814 error = sysctl_handle_int(oidp, &requested_vlan, 0, req); 3815 if ((error) || (req->newptr == NULL)) 3816 return (error); 3817 3818 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) { 3819 device_printf(dev, "Flags disallow setting of vlans\n"); 3820 return (ENODEV); 3821 } 3822 3823 hw->switch_tag = requested_vlan; 3824 device_printf(dev, 3825 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n", 3826 hw->switch_tag, hw->first_tag, hw->second_tag); 3827 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL); 3828 if (status) { 3829 device_printf(dev, 3830 "%s: aq_set_switch_config() error %s, aq error %s\n", 3831 __func__, i40e_stat_str(hw, status), 3832 i40e_aq_str(hw, hw->aq.asq_last_status)); 3833 return (status); 3834 } 3835 return (0); 3836 } 3837 3838 static int 3839 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) 3840 { 3841 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3842 struct i40e_hw *hw = &pf->hw; 3843 device_t dev = pf->dev; 3844 struct sbuf *buf; 3845 int error = 0; 3846 enum i40e_status_code status; 3847 u32 reg; 3848 3849 struct i40e_aqc_get_set_rss_key_data key_data; 3850 3851 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3852 if (!buf) { 3853 device_printf(dev, "Could not allocate sbuf for output.\n"); 3854 return (ENOMEM); 3855 } 3856 3857 bzero(&key_data, sizeof(key_data)); 3858 3859 sbuf_cat(buf, "\n"); 3860 if (hw->mac.type == I40E_MAC_X722) { 3861 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); 3862 if (status) 3863 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", 3864 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3865 } else { 3866 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 3867 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 3868 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); 3869 } 3870 } 3871 3872 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); 3873 3874 error = sbuf_finish(buf); 3875 if (error) 3876 device_printf(dev, "Error finishing sbuf: %d\n", error); 3877 sbuf_delete(buf); 3878 3879 return (error); 3880 } 3881 3882 static void 3883 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) 3884 { 3885 int i, j, k, width; 3886 char c; 3887 3888 if (length < 1 || buf == NULL) return; 3889 3890 int byte_stride = 16; 3891 int lines = length / byte_stride; 3892 int rem = length % byte_stride; 3893 if (rem > 0) 3894 lines++; 3895 3896 for (i = 0; i < lines; i++) { 3897 width = (rem > 0 && i == lines - 1) 3898 ? rem : byte_stride; 3899 3900 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); 3901 3902 for (j = 0; j < width; j++) 3903 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); 3904 3905 if (width < byte_stride) { 3906 for (k = 0; k < (byte_stride - width); k++) 3907 sbuf_printf(sb, " "); 3908 } 3909 3910 if (!text) { 3911 sbuf_printf(sb, "\n"); 3912 continue; 3913 } 3914 3915 for (j = 0; j < width; j++) { 3916 c = (char)buf[i * byte_stride + j]; 3917 if (c < 32 || c > 126) 3918 sbuf_printf(sb, "."); 3919 else 3920 sbuf_printf(sb, "%c", c); 3921 3922 if (j == width - 1) 3923 sbuf_printf(sb, "\n"); 3924 } 3925 } 3926 } 3927 3928 static int 3929 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) 3930 { 3931 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3932 struct i40e_hw *hw = &pf->hw; 3933 device_t dev = pf->dev; 3934 struct sbuf *buf; 3935 int error = 0; 3936 enum i40e_status_code status; 3937 u8 hlut[512]; 3938 u32 reg; 3939 3940 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3941 if (!buf) { 3942 device_printf(dev, "Could not allocate sbuf for output.\n"); 3943 return (ENOMEM); 3944 } 3945 3946 bzero(hlut, sizeof(hlut)); 3947 sbuf_cat(buf, "\n"); 3948 if (hw->mac.type == I40E_MAC_X722) { 3949 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); 3950 if (status) 3951 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", 3952 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3953 } else { 3954 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { 3955 reg = rd32(hw, I40E_PFQF_HLUT(i)); 3956 bcopy(®, &hlut[i << 2], 4); 3957 } 3958 } 3959 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); 3960 3961 error = sbuf_finish(buf); 3962 if (error) 3963 device_printf(dev, "Error finishing sbuf: %d\n", error); 3964 sbuf_delete(buf); 3965 3966 return (error); 3967 } 3968 3969 static int 3970 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) 3971 { 3972 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3973 struct i40e_hw *hw = &pf->hw; 3974 u64 hena; 3975 3976 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 3977 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 3978 3979 return sysctl_handle_long(oidp, NULL, hena, req); 3980 } 3981 3982 /* 3983 * Sysctl to disable firmware's link management 3984 * 3985 * 1 - Disable link management on this port 3986 * 0 - Re-enable link management 3987 * 3988 * On normal NVMs, firmware manages link by default. 3989 */ 3990 static int 3991 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) 3992 { 3993 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3994 struct i40e_hw *hw = &pf->hw; 3995 device_t dev = pf->dev; 3996 int requested_mode = -1; 3997 enum i40e_status_code status = 0; 3998 int error = 0; 3999 4000 /* Read in new mode */ 4001 error = sysctl_handle_int(oidp, &requested_mode, 0, req); 4002 if ((error) || (req->newptr == NULL)) 4003 return (error); 4004 /* Check for sane value */ 4005 if (requested_mode < 0 || requested_mode > 1) { 4006 device_printf(dev, "Valid modes are 0 or 1\n"); 4007 return (EINVAL); 4008 } 4009 4010 /* Set new mode */ 4011 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); 4012 if (status) { 4013 device_printf(dev, 4014 "%s: Error setting new phy debug mode %s," 4015 " aq error: %s\n", __func__, i40e_stat_str(hw, status), 4016 i40e_aq_str(hw, hw->aq.asq_last_status)); 4017 return (EIO); 4018 } 4019 4020 return (0); 4021 } 4022 4023 /* 4024 * Read some diagnostic data from a (Q)SFP+ module 4025 * 4026 * SFP A2 QSFP Lower Page 4027 * Temperature 96-97 22-23 4028 * Vcc 98-99 26-27 4029 * TX power 102-103 34-35..40-41 4030 * RX power 104-105 50-51..56-57 4031 */ 4032 static int 4033 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 4034 { 4035 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4036 device_t dev = pf->dev; 4037 struct sbuf *sbuf; 4038 int error = 0; 4039 u8 output; 4040 4041 if (req->oldptr == NULL) { 4042 error = SYSCTL_OUT(req, 0, 128); 4043 return (0); 4044 } 4045 4046 error = pf->read_i2c_byte(pf, 0, 0xA0, &output); 4047 if (error) { 4048 device_printf(dev, "Error reading from i2c\n"); 4049 return (error); 4050 } 4051 4052 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 4053 if (output == 0x3) { 4054 /* 4055 * Check for: 4056 * - Internally calibrated data 4057 * - Diagnostic monitoring is implemented 4058 */ 4059 pf->read_i2c_byte(pf, 92, 0xA0, &output); 4060 if (!(output & 0x60)) { 4061 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); 4062 return (0); 4063 } 4064 4065 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4066 4067 for (u8 offset = 96; offset < 100; offset++) { 4068 pf->read_i2c_byte(pf, offset, 0xA2, &output); 4069 sbuf_printf(sbuf, "%02X ", output); 4070 } 4071 for (u8 offset = 102; offset < 106; offset++) { 4072 pf->read_i2c_byte(pf, offset, 0xA2, &output); 4073 sbuf_printf(sbuf, "%02X ", output); 4074 } 4075 } else if (output == 0xD || output == 0x11) { 4076 /* 4077 * QSFP+ modules are always internally calibrated, and must indicate 4078 * what types of diagnostic monitoring are implemented 4079 */ 4080 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4081 4082 for (u8 offset = 22; offset < 24; offset++) { 4083 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4084 sbuf_printf(sbuf, "%02X ", output); 4085 } 4086 for (u8 offset = 26; offset < 28; offset++) { 4087 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4088 sbuf_printf(sbuf, "%02X ", output); 4089 } 4090 /* Read the data from the first lane */ 4091 for (u8 offset = 34; offset < 36; offset++) { 4092 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4093 sbuf_printf(sbuf, "%02X ", output); 4094 } 4095 for (u8 offset = 50; offset < 52; offset++) { 4096 pf->read_i2c_byte(pf, offset, 0xA0, &output); 4097 sbuf_printf(sbuf, "%02X ", output); 4098 } 4099 } else { 4100 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output); 4101 return (0); 4102 } 4103 4104 sbuf_finish(sbuf); 4105 sbuf_delete(sbuf); 4106 4107 return (0); 4108 } 4109 4110 /* 4111 * Sysctl to read a byte from I2C bus. 4112 * 4113 * Input: 32-bit value: 4114 * bits 0-7: device address (0xA0 or 0xA2) 4115 * bits 8-15: offset (0-255) 4116 * bits 16-31: unused 4117 * Output: 8-bit value read 4118 */ 4119 static int 4120 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) 4121 { 4122 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4123 device_t dev = pf->dev; 4124 int input = -1, error = 0; 4125 u8 dev_addr, offset, output; 4126 4127 /* Read in I2C read parameters */ 4128 error = sysctl_handle_int(oidp, &input, 0, req); 4129 if ((error) || (req->newptr == NULL)) 4130 return (error); 4131 /* Validate device address */ 4132 dev_addr = input & 0xFF; 4133 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4134 return (EINVAL); 4135 } 4136 offset = (input >> 8) & 0xFF; 4137 4138 error = pf->read_i2c_byte(pf, offset, dev_addr, &output); 4139 if (error) 4140 return (error); 4141 4142 device_printf(dev, "%02X\n", output); 4143 return (0); 4144 } 4145 4146 /* 4147 * Sysctl to write a byte to the I2C bus. 4148 * 4149 * Input: 32-bit value: 4150 * bits 0-7: device address (0xA0 or 0xA2) 4151 * bits 8-15: offset (0-255) 4152 * bits 16-23: value to write 4153 * bits 24-31: unused 4154 * Output: 8-bit value written 4155 */ 4156 static int 4157 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) 4158 { 4159 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4160 device_t dev = pf->dev; 4161 int input = -1, error = 0; 4162 u8 dev_addr, offset, value; 4163 4164 /* Read in I2C write parameters */ 4165 error = sysctl_handle_int(oidp, &input, 0, req); 4166 if ((error) || (req->newptr == NULL)) 4167 return (error); 4168 /* Validate device address */ 4169 dev_addr = input & 0xFF; 4170 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4171 return (EINVAL); 4172 } 4173 offset = (input >> 8) & 0xFF; 4174 value = (input >> 16) & 0xFF; 4175 4176 error = pf->write_i2c_byte(pf, offset, dev_addr, value); 4177 if (error) 4178 return (error); 4179 4180 device_printf(dev, "%02X written\n", value); 4181 return (0); 4182 } 4183 4184 static int 4185 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4186 u8 bit_pos, int *is_set) 4187 { 4188 device_t dev = pf->dev; 4189 struct i40e_hw *hw = &pf->hw; 4190 enum i40e_status_code status; 4191 4192 if (IXL_PF_IN_RECOVERY_MODE(pf)) 4193 return (EIO); 4194 4195 status = i40e_aq_get_phy_capabilities(hw, 4196 FALSE, FALSE, abilities, NULL); 4197 if (status) { 4198 device_printf(dev, 4199 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 4200 __func__, i40e_stat_str(hw, status), 4201 i40e_aq_str(hw, hw->aq.asq_last_status)); 4202 return (EIO); 4203 } 4204 4205 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); 4206 return (0); 4207 } 4208 4209 static int 4210 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4211 u8 bit_pos, int set) 4212 { 4213 device_t dev = pf->dev; 4214 struct i40e_hw *hw = &pf->hw; 4215 struct i40e_aq_set_phy_config config; 4216 enum i40e_status_code status; 4217 4218 /* Set new PHY config */ 4219 memset(&config, 0, sizeof(config)); 4220 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); 4221 if (set) 4222 config.fec_config |= bit_pos; 4223 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { 4224 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 4225 config.phy_type = abilities->phy_type; 4226 config.phy_type_ext = abilities->phy_type_ext; 4227 config.link_speed = abilities->link_speed; 4228 config.eee_capability = abilities->eee_capability; 4229 config.eeer = abilities->eeer_val; 4230 config.low_power_ctrl = abilities->d3_lpan; 4231 status = i40e_aq_set_phy_config(hw, &config, NULL); 4232 4233 if (status) { 4234 device_printf(dev, 4235 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", 4236 __func__, i40e_stat_str(hw, status), 4237 i40e_aq_str(hw, hw->aq.asq_last_status)); 4238 return (EIO); 4239 } 4240 } 4241 4242 return (0); 4243 } 4244 4245 static int 4246 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) 4247 { 4248 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4249 int mode, error = 0; 4250 4251 struct i40e_aq_get_phy_abilities_resp abilities; 4252 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); 4253 if (error) 4254 return (error); 4255 /* Read in new mode */ 4256 error = sysctl_handle_int(oidp, &mode, 0, req); 4257 if ((error) || (req->newptr == NULL)) 4258 return (error); 4259 4260 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); 4261 } 4262 4263 static int 4264 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) 4265 { 4266 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4267 int mode, error = 0; 4268 4269 struct i40e_aq_get_phy_abilities_resp abilities; 4270 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); 4271 if (error) 4272 return (error); 4273 /* Read in new mode */ 4274 error = sysctl_handle_int(oidp, &mode, 0, req); 4275 if ((error) || (req->newptr == NULL)) 4276 return (error); 4277 4278 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); 4279 } 4280 4281 static int 4282 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) 4283 { 4284 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4285 int mode, error = 0; 4286 4287 struct i40e_aq_get_phy_abilities_resp abilities; 4288 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); 4289 if (error) 4290 return (error); 4291 /* Read in new mode */ 4292 error = sysctl_handle_int(oidp, &mode, 0, req); 4293 if ((error) || (req->newptr == NULL)) 4294 return (error); 4295 4296 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); 4297 } 4298 4299 static int 4300 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) 4301 { 4302 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4303 int mode, error = 0; 4304 4305 struct i40e_aq_get_phy_abilities_resp abilities; 4306 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); 4307 if (error) 4308 return (error); 4309 /* Read in new mode */ 4310 error = sysctl_handle_int(oidp, &mode, 0, req); 4311 if ((error) || (req->newptr == NULL)) 4312 return (error); 4313 4314 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); 4315 } 4316 4317 static int 4318 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) 4319 { 4320 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4321 int mode, error = 0; 4322 4323 struct i40e_aq_get_phy_abilities_resp abilities; 4324 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); 4325 if (error) 4326 return (error); 4327 /* Read in new mode */ 4328 error = sysctl_handle_int(oidp, &mode, 0, req); 4329 if ((error) || (req->newptr == NULL)) 4330 return (error); 4331 4332 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); 4333 } 4334 4335 static int 4336 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) 4337 { 4338 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4339 struct i40e_hw *hw = &pf->hw; 4340 device_t dev = pf->dev; 4341 struct sbuf *buf; 4342 int error = 0; 4343 enum i40e_status_code status; 4344 4345 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4346 if (!buf) { 4347 device_printf(dev, "Could not allocate sbuf for output.\n"); 4348 return (ENOMEM); 4349 } 4350 4351 u8 *final_buff; 4352 /* This amount is only necessary if reading the entire cluster into memory */ 4353 #define IXL_FINAL_BUFF_SIZE (1280 * 1024) 4354 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT); 4355 if (final_buff == NULL) { 4356 device_printf(dev, "Could not allocate memory for output.\n"); 4357 goto out; 4358 } 4359 int final_buff_len = 0; 4360 4361 u8 cluster_id = 1; 4362 bool more = true; 4363 4364 u8 dump_buf[4096]; 4365 u16 curr_buff_size = 4096; 4366 u8 curr_next_table = 0; 4367 u32 curr_next_index = 0; 4368 4369 u16 ret_buff_size; 4370 u8 ret_next_table; 4371 u32 ret_next_index; 4372 4373 sbuf_cat(buf, "\n"); 4374 4375 while (more) { 4376 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, 4377 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); 4378 if (status) { 4379 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", 4380 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 4381 goto free_out; 4382 } 4383 4384 /* copy info out of temp buffer */ 4385 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); 4386 final_buff_len += ret_buff_size; 4387 4388 if (ret_next_table != curr_next_table) { 4389 /* We're done with the current table; we can dump out read data. */ 4390 sbuf_printf(buf, "%d:", curr_next_table); 4391 int bytes_printed = 0; 4392 while (bytes_printed <= final_buff_len) { 4393 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); 4394 bytes_printed += 16; 4395 } 4396 sbuf_cat(buf, "\n"); 4397 4398 /* The entire cluster has been read; we're finished */ 4399 if (ret_next_table == 0xFF) 4400 break; 4401 4402 /* Otherwise clear the output buffer and continue reading */ 4403 bzero(final_buff, IXL_FINAL_BUFF_SIZE); 4404 final_buff_len = 0; 4405 } 4406 4407 if (ret_next_index == 0xFFFFFFFF) 4408 ret_next_index = 0; 4409 4410 bzero(dump_buf, sizeof(dump_buf)); 4411 curr_next_table = ret_next_table; 4412 curr_next_index = ret_next_index; 4413 } 4414 4415 free_out: 4416 free(final_buff, M_IXL); 4417 out: 4418 error = sbuf_finish(buf); 4419 if (error) 4420 device_printf(dev, "Error finishing sbuf: %d\n", error); 4421 sbuf_delete(buf); 4422 4423 return (error); 4424 } 4425 4426 static int 4427 ixl_start_fw_lldp(struct ixl_pf *pf) 4428 { 4429 struct i40e_hw *hw = &pf->hw; 4430 enum i40e_status_code status; 4431 4432 status = i40e_aq_start_lldp(hw, false, NULL); 4433 if (status != I40E_SUCCESS) { 4434 switch (hw->aq.asq_last_status) { 4435 case I40E_AQ_RC_EEXIST: 4436 device_printf(pf->dev, 4437 "FW LLDP agent is already running\n"); 4438 break; 4439 case I40E_AQ_RC_EPERM: 4440 device_printf(pf->dev, 4441 "Device configuration forbids SW from starting " 4442 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII " 4443 "attribute to \"Enabled\" to use this sysctl\n"); 4444 return (EINVAL); 4445 default: 4446 device_printf(pf->dev, 4447 "Starting FW LLDP agent failed: error: %s, %s\n", 4448 i40e_stat_str(hw, status), 4449 i40e_aq_str(hw, hw->aq.asq_last_status)); 4450 return (EINVAL); 4451 } 4452 } 4453 4454 atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 4455 return (0); 4456 } 4457 4458 static int 4459 ixl_stop_fw_lldp(struct ixl_pf *pf) 4460 { 4461 struct i40e_hw *hw = &pf->hw; 4462 device_t dev = pf->dev; 4463 enum i40e_status_code status; 4464 4465 if (hw->func_caps.npar_enable != 0) { 4466 device_printf(dev, 4467 "Disabling FW LLDP agent is not supported on this device\n"); 4468 return (EINVAL); 4469 } 4470 4471 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) { 4472 device_printf(dev, 4473 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); 4474 return (EINVAL); 4475 } 4476 4477 status = i40e_aq_stop_lldp(hw, true, false, NULL); 4478 if (status != I40E_SUCCESS) { 4479 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) { 4480 device_printf(dev, 4481 "Disabling FW LLDP agent failed: error: %s, %s\n", 4482 i40e_stat_str(hw, status), 4483 i40e_aq_str(hw, hw->aq.asq_last_status)); 4484 return (EINVAL); 4485 } 4486 4487 device_printf(dev, "FW LLDP agent is already stopped\n"); 4488 } 4489 4490 i40e_aq_set_dcb_parameters(hw, true, NULL); 4491 atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 4492 return (0); 4493 } 4494 4495 static int 4496 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) 4497 { 4498 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4499 int state, new_state, error = 0; 4500 4501 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0); 4502 4503 /* Read in new mode */ 4504 error = sysctl_handle_int(oidp, &new_state, 0, req); 4505 if ((error) || (req->newptr == NULL)) 4506 return (error); 4507 4508 /* Already in requested state */ 4509 if (new_state == state) 4510 return (error); 4511 4512 if (new_state == 0) 4513 return ixl_stop_fw_lldp(pf); 4514 4515 return ixl_start_fw_lldp(pf); 4516 } 4517 4518 static int 4519 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 4520 { 4521 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4522 int state, new_state; 4523 int sysctl_handle_status = 0; 4524 enum i40e_status_code cmd_status; 4525 4526 /* Init states' values */ 4527 state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED)); 4528 4529 /* Get requested mode */ 4530 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req); 4531 if ((sysctl_handle_status) || (req->newptr == NULL)) 4532 return (sysctl_handle_status); 4533 4534 /* Check if state has changed */ 4535 if (new_state == state) 4536 return (0); 4537 4538 /* Set new state */ 4539 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state)); 4540 4541 /* Save new state or report error */ 4542 if (!cmd_status) { 4543 if (new_state == 0) 4544 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 4545 else 4546 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 4547 } else if (cmd_status == I40E_ERR_CONFIG) 4548 return (EPERM); 4549 else 4550 return (EIO); 4551 4552 return (0); 4553 } 4554 4555 static int 4556 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS) 4557 { 4558 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4559 int error, state; 4560 4561 state = !!(atomic_load_acq_32(&pf->state) & 4562 IXL_PF_STATE_LINK_ACTIVE_ON_DOWN); 4563 4564 error = sysctl_handle_int(oidp, &state, 0, req); 4565 if ((error) || (req->newptr == NULL)) 4566 return (error); 4567 4568 if (state == 0) 4569 atomic_clear_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN); 4570 else 4571 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN); 4572 4573 return (0); 4574 } 4575 4576 4577 int 4578 ixl_attach_get_link_status(struct ixl_pf *pf) 4579 { 4580 struct i40e_hw *hw = &pf->hw; 4581 device_t dev = pf->dev; 4582 int error = 0; 4583 4584 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 4585 (hw->aq.fw_maj_ver < 4)) { 4586 i40e_msec_delay(75); 4587 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); 4588 if (error) { 4589 device_printf(dev, "link restart failed, aq_err=%d\n", 4590 pf->hw.aq.asq_last_status); 4591 return error; 4592 } 4593 } 4594 4595 /* Determine link state */ 4596 hw->phy.get_link_info = TRUE; 4597 i40e_get_link_status(hw, &pf->link_up); 4598 4599 /* Flow Control mode not set by user, read current FW settings */ 4600 if (pf->fc == -1) 4601 pf->fc = hw->fc.current_mode; 4602 4603 return (0); 4604 } 4605 4606 static int 4607 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS) 4608 { 4609 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4610 int requested = 0, error = 0; 4611 4612 /* Read in new mode */ 4613 error = sysctl_handle_int(oidp, &requested, 0, req); 4614 if ((error) || (req->newptr == NULL)) 4615 return (error); 4616 4617 /* Initiate the PF reset later in the admin task */ 4618 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ); 4619 4620 return (error); 4621 } 4622 4623 static int 4624 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS) 4625 { 4626 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4627 struct i40e_hw *hw = &pf->hw; 4628 int requested = 0, error = 0; 4629 4630 /* Read in new mode */ 4631 error = sysctl_handle_int(oidp, &requested, 0, req); 4632 if ((error) || (req->newptr == NULL)) 4633 return (error); 4634 4635 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 4636 4637 return (error); 4638 } 4639 4640 static int 4641 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS) 4642 { 4643 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4644 struct i40e_hw *hw = &pf->hw; 4645 int requested = 0, error = 0; 4646 4647 /* Read in new mode */ 4648 error = sysctl_handle_int(oidp, &requested, 0, req); 4649 if ((error) || (req->newptr == NULL)) 4650 return (error); 4651 4652 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK); 4653 4654 return (error); 4655 } 4656 4657 /* 4658 * Print out mapping of TX queue indexes and Rx queue indexes 4659 * to MSI-X vectors. 4660 */ 4661 static int 4662 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) 4663 { 4664 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4665 struct ixl_vsi *vsi = &pf->vsi; 4666 device_t dev = pf->dev; 4667 struct sbuf *buf; 4668 int error = 0; 4669 4670 struct ixl_rx_queue *rx_que = vsi->rx_queues; 4671 struct ixl_tx_queue *tx_que = vsi->tx_queues; 4672 4673 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4674 if (!buf) { 4675 device_printf(dev, "Could not allocate sbuf for output.\n"); 4676 return (ENOMEM); 4677 } 4678 4679 sbuf_cat(buf, "\n"); 4680 for (int i = 0; i < vsi->num_rx_queues; i++) { 4681 rx_que = &vsi->rx_queues[i]; 4682 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); 4683 } 4684 for (int i = 0; i < vsi->num_tx_queues; i++) { 4685 tx_que = &vsi->tx_queues[i]; 4686 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); 4687 } 4688 4689 error = sbuf_finish(buf); 4690 if (error) 4691 device_printf(dev, "Error finishing sbuf: %d\n", error); 4692 sbuf_delete(buf); 4693 4694 return (error); 4695 } 4696