1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "ixl_pf.h" 37 38 #ifdef PCI_IOV 39 #include "ixl_pf_iov.h" 40 #endif 41 42 #ifdef IXL_IW 43 #include "ixl_iw.h" 44 #include "ixl_iw_int.h" 45 #endif 46 47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); 48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); 49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed); 50 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int); 51 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int); 52 static char * ixl_switch_element_string(struct sbuf *, u8, u16); 53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *); 54 55 /* Sysctls */ 56 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); 57 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); 58 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 59 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 60 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); 61 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); 62 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); 63 64 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 65 66 /* Debug Sysctls */ 67 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); 68 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); 69 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); 70 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); 71 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); 72 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); 73 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); 74 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); 75 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); 76 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); 77 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); 78 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); 79 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); 80 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); 81 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); 82 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); 83 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); 84 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); 85 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 86 87 /* Debug Sysctls */ 88 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS); 89 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS); 90 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS); 91 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); 92 #ifdef IXL_DEBUG 93 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); 94 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); 95 #endif 96 97 #ifdef IXL_IW 98 extern int ixl_enable_iwarp; 99 extern int ixl_limit_iwarp_msix; 100 #endif 101 102 static const char * const ixl_fc_string[6] = { 103 "None", 104 "Rx", 105 "Tx", 106 "Full", 107 "Priority", 108 "Default" 109 }; 110 111 static char *ixl_fec_string[3] = { 112 "CL108 RS-FEC", 113 "CL74 FC-FEC/BASE-R", 114 "None" 115 }; 116 117 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); 118 119 /* 120 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string 121 */ 122 void 123 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) 124 { 125 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); 126 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); 127 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); 128 129 sbuf_printf(buf, 130 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", 131 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 132 hw->aq.api_maj_ver, hw->aq.api_min_ver, 133 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> 134 IXL_NVM_VERSION_HI_SHIFT, 135 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> 136 IXL_NVM_VERSION_LO_SHIFT, 137 hw->nvm.eetrack, 138 oem_ver, oem_build, oem_patch); 139 } 140 141 void 142 ixl_print_nvm_version(struct ixl_pf *pf) 143 { 144 struct i40e_hw *hw = &pf->hw; 145 device_t dev = pf->dev; 146 struct sbuf *sbuf; 147 148 sbuf = sbuf_new_auto(); 149 ixl_nvm_version_str(hw, sbuf); 150 sbuf_finish(sbuf); 151 device_printf(dev, "%s\n", sbuf_data(sbuf)); 152 sbuf_delete(sbuf); 153 } 154 155 /** 156 * ixl_get_fw_mode - Check the state of FW 157 * @hw: device hardware structure 158 * 159 * Identify state of FW. It might be in a recovery mode 160 * which limits functionality and requires special handling 161 * from the driver. 162 * 163 * @returns FW mode (normal, recovery, unexpected EMP reset) 164 */ 165 static enum ixl_fw_mode 166 ixl_get_fw_mode(struct ixl_pf *pf) 167 { 168 struct i40e_hw *hw = &pf->hw; 169 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL; 170 u32 fwsts; 171 172 #ifdef IXL_DEBUG 173 if (pf->recovery_mode) 174 return IXL_FW_MODE_RECOVERY; 175 #endif 176 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; 177 178 /* Is set and has one of expected values */ 179 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK && 180 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) || 181 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || 182 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK) 183 fw_mode = IXL_FW_MODE_RECOVERY; 184 else { 185 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 && 186 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10) 187 fw_mode = IXL_FW_MODE_UEMPR; 188 } 189 return (fw_mode); 190 } 191 192 /** 193 * ixl_pf_reset - Reset the PF 194 * @pf: PF structure 195 * 196 * Ensure that FW is in the right state and do the reset 197 * if needed. 198 * 199 * @returns zero on success, or an error code on failure. 200 */ 201 int 202 ixl_pf_reset(struct ixl_pf *pf) 203 { 204 struct i40e_hw *hw = &pf->hw; 205 enum i40e_status_code status; 206 enum ixl_fw_mode fw_mode; 207 208 fw_mode = ixl_get_fw_mode(pf); 209 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode); 210 if (fw_mode == IXL_FW_MODE_RECOVERY) { 211 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 212 /* Don't try to reset device if it's in recovery mode */ 213 return (0); 214 } 215 216 status = i40e_pf_reset(hw); 217 if (status == I40E_SUCCESS) 218 return (0); 219 220 /* Check FW mode again in case it has changed while 221 * waiting for reset to complete */ 222 fw_mode = ixl_get_fw_mode(pf); 223 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode); 224 if (fw_mode == IXL_FW_MODE_RECOVERY) { 225 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 226 return (0); 227 } 228 229 if (fw_mode == IXL_FW_MODE_UEMPR) 230 device_printf(pf->dev, 231 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); 232 else 233 device_printf(pf->dev, "PF reset failure %s\n", 234 i40e_stat_str(hw, status)); 235 return (EIO); 236 } 237 238 /** 239 * ixl_setup_hmc - Setup LAN Host Memory Cache 240 * @pf: PF structure 241 * 242 * Init and configure LAN Host Memory Cache 243 * 244 * @returns 0 on success, EIO on error 245 */ 246 int 247 ixl_setup_hmc(struct ixl_pf *pf) 248 { 249 struct i40e_hw *hw = &pf->hw; 250 enum i40e_status_code status; 251 252 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 253 hw->func_caps.num_rx_qp, 0, 0); 254 if (status) { 255 device_printf(pf->dev, "init_lan_hmc failed: %s\n", 256 i40e_stat_str(hw, status)); 257 return (EIO); 258 } 259 260 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 261 if (status) { 262 device_printf(pf->dev, "configure_lan_hmc failed: %s\n", 263 i40e_stat_str(hw, status)); 264 return (EIO); 265 } 266 267 return (0); 268 } 269 270 /** 271 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache 272 * @pf: PF structure 273 * 274 * Shutdown Host Memory Cache if configured. 275 * 276 */ 277 void 278 ixl_shutdown_hmc(struct ixl_pf *pf) 279 { 280 struct i40e_hw *hw = &pf->hw; 281 enum i40e_status_code status; 282 283 /* HMC not configured, no need to shutdown */ 284 if (hw->hmc.hmc_obj == NULL) 285 return; 286 287 status = i40e_shutdown_lan_hmc(hw); 288 if (status) 289 device_printf(pf->dev, 290 "Shutdown LAN HMC failed with code %s\n", 291 i40e_stat_str(hw, status)); 292 } 293 /* 294 * Write PF ITR values to queue ITR registers. 295 */ 296 void 297 ixl_configure_itr(struct ixl_pf *pf) 298 { 299 ixl_configure_tx_itr(pf); 300 ixl_configure_rx_itr(pf); 301 } 302 303 /********************************************************************* 304 * 305 * Get the hardware capabilities 306 * 307 **********************************************************************/ 308 309 int 310 ixl_get_hw_capabilities(struct ixl_pf *pf) 311 { 312 struct i40e_aqc_list_capabilities_element_resp *buf; 313 struct i40e_hw *hw = &pf->hw; 314 device_t dev = pf->dev; 315 enum i40e_status_code status; 316 int len, i2c_intfc_num; 317 bool again = TRUE; 318 u16 needed; 319 320 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 321 hw->func_caps.iwarp = 0; 322 return (0); 323 } 324 325 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 326 retry: 327 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) 328 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) { 329 device_printf(dev, "Unable to allocate cap memory\n"); 330 return (ENOMEM); 331 } 332 333 /* This populates the hw struct */ 334 status = i40e_aq_discover_capabilities(hw, buf, len, 335 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 336 free(buf, M_DEVBUF); 337 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && 338 (again == TRUE)) { 339 /* retry once with a larger buffer */ 340 again = FALSE; 341 len = needed; 342 goto retry; 343 } else if (status != I40E_SUCCESS) { 344 device_printf(dev, "capability discovery failed; status %s, error %s\n", 345 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 346 return (ENODEV); 347 } 348 349 /* 350 * Some devices have both MDIO and I2C; since this isn't reported 351 * by the FW, check registers to see if an I2C interface exists. 352 */ 353 i2c_intfc_num = ixl_find_i2c_interface(pf); 354 if (i2c_intfc_num != -1) 355 pf->has_i2c = true; 356 357 /* Determine functions to use for driver I2C accesses */ 358 switch (pf->i2c_access_method) { 359 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: { 360 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 361 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 362 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 363 } else { 364 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 365 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 366 } 367 break; 368 } 369 case IXL_I2C_ACCESS_METHOD_AQ: 370 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 371 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 372 break; 373 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD: 374 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 375 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 376 break; 377 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS: 378 pf->read_i2c_byte = ixl_read_i2c_byte_bb; 379 pf->write_i2c_byte = ixl_write_i2c_byte_bb; 380 break; 381 default: 382 /* Should not happen */ 383 device_printf(dev, "Error setting I2C access functions\n"); 384 break; 385 } 386 387 /* Print a subset of the capability information. */ 388 device_printf(dev, 389 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n", 390 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, 391 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, 392 (hw->func_caps.mdio_port_mode == 2) ? "I2C" : 393 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : 394 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : 395 "MDIO shared"); 396 397 return (0); 398 } 399 400 /* For the set_advertise sysctl */ 401 void 402 ixl_set_initial_advertised_speeds(struct ixl_pf *pf) 403 { 404 device_t dev = pf->dev; 405 int err; 406 407 /* Make sure to initialize the device to the complete list of 408 * supported speeds on driver load, to ensure unloading and 409 * reloading the driver will restore this value. 410 */ 411 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); 412 if (err) { 413 /* Non-fatal error */ 414 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", 415 __func__, err); 416 return; 417 } 418 419 pf->advertised_speed = 420 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 421 } 422 423 int 424 ixl_teardown_hw_structs(struct ixl_pf *pf) 425 { 426 enum i40e_status_code status = 0; 427 struct i40e_hw *hw = &pf->hw; 428 device_t dev = pf->dev; 429 430 /* Shutdown LAN HMC */ 431 if (hw->hmc.hmc_obj) { 432 status = i40e_shutdown_lan_hmc(hw); 433 if (status) { 434 device_printf(dev, 435 "init: LAN HMC shutdown failure; status %s\n", 436 i40e_stat_str(hw, status)); 437 goto err_out; 438 } 439 } 440 441 /* Shutdown admin queue */ 442 ixl_disable_intr0(hw); 443 status = i40e_shutdown_adminq(hw); 444 if (status) 445 device_printf(dev, 446 "init: Admin Queue shutdown failure; status %s\n", 447 i40e_stat_str(hw, status)); 448 449 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); 450 err_out: 451 return (status); 452 } 453 454 static u_int 455 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 456 { 457 struct ixl_vsi *vsi = arg; 458 459 ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl)); 460 461 return (1); 462 } 463 464 /********************************************************************* 465 * Filter Routines 466 * 467 * Routines for multicast and vlan filter management. 468 * 469 *********************************************************************/ 470 void 471 ixl_add_multi(struct ixl_vsi *vsi) 472 { 473 struct ifnet *ifp = vsi->ifp; 474 struct i40e_hw *hw = vsi->hw; 475 int mcnt = 0, flags; 476 477 IOCTL_DEBUGOUT("ixl_add_multi: begin"); 478 479 /* 480 ** First just get a count, to decide if we 481 ** we simply use multicast promiscuous. 482 */ 483 mcnt = if_llmaddr_count(ifp); 484 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { 485 /* delete existing MC filters */ 486 ixl_del_hw_filters(vsi, mcnt); 487 i40e_aq_set_vsi_multicast_promiscuous(hw, 488 vsi->seid, TRUE, NULL); 489 return; 490 } 491 492 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, vsi); 493 if (mcnt > 0) { 494 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); 495 ixl_add_hw_filters(vsi, flags, mcnt); 496 } 497 498 IOCTL_DEBUGOUT("ixl_add_multi: end"); 499 } 500 501 static u_int 502 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 503 { 504 struct ixl_mac_filter *f = arg; 505 506 if (cmp_etheraddr(f->macaddr, (u8 *)LLADDR(sdl))) 507 return (1); 508 else 509 return (0); 510 } 511 512 int 513 ixl_del_multi(struct ixl_vsi *vsi) 514 { 515 struct ifnet *ifp = vsi->ifp; 516 struct ixl_mac_filter *f; 517 int mcnt = 0; 518 519 IOCTL_DEBUGOUT("ixl_del_multi: begin"); 520 521 /* Search for removed multicast addresses */ 522 SLIST_FOREACH(f, &vsi->ftl, next) 523 if ((f->flags & IXL_FILTER_USED) && 524 (f->flags & IXL_FILTER_MC) && 525 (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)) { 526 f->flags |= IXL_FILTER_DEL; 527 mcnt++; 528 } 529 530 if (mcnt > 0) 531 ixl_del_hw_filters(vsi, mcnt); 532 533 return (mcnt); 534 } 535 536 void 537 ixl_link_up_msg(struct ixl_pf *pf) 538 { 539 struct i40e_hw *hw = &pf->hw; 540 struct ifnet *ifp = pf->vsi.ifp; 541 char *req_fec_string, *neg_fec_string; 542 u8 fec_abilities; 543 544 fec_abilities = hw->phy.link_info.req_fec_info; 545 /* If both RS and KR are requested, only show RS */ 546 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) 547 req_fec_string = ixl_fec_string[0]; 548 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) 549 req_fec_string = ixl_fec_string[1]; 550 else 551 req_fec_string = ixl_fec_string[2]; 552 553 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) 554 neg_fec_string = ixl_fec_string[0]; 555 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) 556 neg_fec_string = ixl_fec_string[1]; 557 else 558 neg_fec_string = ixl_fec_string[2]; 559 560 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 561 ifp->if_xname, 562 ixl_link_speed_string(hw->phy.link_info.link_speed), 563 req_fec_string, neg_fec_string, 564 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", 565 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && 566 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 567 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? 568 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 569 ixl_fc_string[1] : ixl_fc_string[0]); 570 } 571 572 /* 573 * Configure admin queue/misc interrupt cause registers in hardware. 574 */ 575 void 576 ixl_configure_intr0_msix(struct ixl_pf *pf) 577 { 578 struct i40e_hw *hw = &pf->hw; 579 u32 reg; 580 581 /* First set up the adminq - vector 0 */ 582 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 583 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 584 585 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 586 I40E_PFINT_ICR0_ENA_GRST_MASK | 587 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 588 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 589 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 590 I40E_PFINT_ICR0_ENA_VFLR_MASK | 591 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 592 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; 593 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 594 595 /* 596 * 0x7FF is the end of the queue list. 597 * This means we won't use MSI-X vector 0 for a queue interrupt 598 * in MSI-X mode. 599 */ 600 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 601 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ 602 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); 603 604 wr32(hw, I40E_PFINT_DYN_CTL0, 605 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 606 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 607 608 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 609 } 610 611 void 612 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types) 613 { 614 /* Display supported media types */ 615 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) 616 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL); 617 618 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) 619 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL); 620 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) 621 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL); 622 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) 623 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL); 624 625 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T)) 626 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL); 627 628 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T)) 629 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL); 630 631 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || 632 phy_types & (I40E_CAP_PHY_TYPE_XFI) || 633 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) 634 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 635 636 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) 637 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL); 638 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) 639 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL); 640 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) 641 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL); 642 643 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || 644 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || 645 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || 646 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || 647 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 648 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL); 649 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) 650 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL); 651 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) 652 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL); 653 654 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) 655 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL); 656 657 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) 658 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) 659 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL); 660 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) 661 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL); 662 if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) 663 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL); 664 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) 665 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 666 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) 667 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL); 668 669 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) 670 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL); 671 672 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 673 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL); 674 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) 675 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); 676 677 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) 678 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL); 679 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) 680 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL); 681 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) 682 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL); 683 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) 684 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL); 685 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) 686 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL); 687 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) 688 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL); 689 } 690 691 /********************************************************************* 692 * 693 * Get Firmware Switch configuration 694 * - this will need to be more robust when more complex 695 * switch configurations are enabled. 696 * 697 **********************************************************************/ 698 int 699 ixl_switch_config(struct ixl_pf *pf) 700 { 701 struct i40e_hw *hw = &pf->hw; 702 struct ixl_vsi *vsi = &pf->vsi; 703 device_t dev = iflib_get_dev(vsi->ctx); 704 struct i40e_aqc_get_switch_config_resp *sw_config; 705 u8 aq_buf[I40E_AQ_LARGE_BUF]; 706 int ret; 707 u16 next = 0; 708 709 memset(&aq_buf, 0, sizeof(aq_buf)); 710 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 711 ret = i40e_aq_get_switch_config(hw, sw_config, 712 sizeof(aq_buf), &next, NULL); 713 if (ret) { 714 device_printf(dev, "aq_get_switch_config() failed, error %d," 715 " aq_error %d\n", ret, pf->hw.aq.asq_last_status); 716 return (ret); 717 } 718 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { 719 device_printf(dev, 720 "Switch config: header reported: %d in structure, %d total\n", 721 LE16_TO_CPU(sw_config->header.num_reported), 722 LE16_TO_CPU(sw_config->header.num_total)); 723 for (int i = 0; 724 i < LE16_TO_CPU(sw_config->header.num_reported); i++) { 725 device_printf(dev, 726 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, 727 sw_config->element[i].element_type, 728 LE16_TO_CPU(sw_config->element[i].seid), 729 LE16_TO_CPU(sw_config->element[i].uplink_seid), 730 LE16_TO_CPU(sw_config->element[i].downlink_seid)); 731 } 732 } 733 /* Simplified due to a single VSI */ 734 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid); 735 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid); 736 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid); 737 return (ret); 738 } 739 740 void 741 ixl_free_mac_filters(struct ixl_vsi *vsi) 742 { 743 struct ixl_mac_filter *f; 744 745 while (!SLIST_EMPTY(&vsi->ftl)) { 746 f = SLIST_FIRST(&vsi->ftl); 747 SLIST_REMOVE_HEAD(&vsi->ftl, next); 748 free(f, M_DEVBUF); 749 } 750 751 vsi->num_hw_filters = 0; 752 } 753 754 void 755 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls) 756 { 757 struct sysctl_oid *tree; 758 struct sysctl_oid_list *child; 759 struct sysctl_oid_list *vsi_list; 760 761 tree = device_get_sysctl_tree(vsi->dev); 762 child = SYSCTL_CHILDREN(tree); 763 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name, 764 CTLFLAG_RD, NULL, "VSI Number"); 765 766 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 767 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats); 768 769 if (queues_sysctls) 770 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx); 771 } 772 773 /* 774 * Used to set the Tx ITR value for all of the PF LAN VSI's queues. 775 * Writes to the ITR registers immediately. 776 */ 777 static int 778 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) 779 { 780 struct ixl_pf *pf = (struct ixl_pf *)arg1; 781 device_t dev = pf->dev; 782 int error = 0; 783 int requested_tx_itr; 784 785 requested_tx_itr = pf->tx_itr; 786 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); 787 if ((error) || (req->newptr == NULL)) 788 return (error); 789 if (pf->dynamic_tx_itr) { 790 device_printf(dev, 791 "Cannot set TX itr value while dynamic TX itr is enabled\n"); 792 return (EINVAL); 793 } 794 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { 795 device_printf(dev, 796 "Invalid TX itr value; value must be between 0 and %d\n", 797 IXL_MAX_ITR); 798 return (EINVAL); 799 } 800 801 pf->tx_itr = requested_tx_itr; 802 ixl_configure_tx_itr(pf); 803 804 return (error); 805 } 806 807 /* 808 * Used to set the Rx ITR value for all of the PF LAN VSI's queues. 809 * Writes to the ITR registers immediately. 810 */ 811 static int 812 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) 813 { 814 struct ixl_pf *pf = (struct ixl_pf *)arg1; 815 device_t dev = pf->dev; 816 int error = 0; 817 int requested_rx_itr; 818 819 requested_rx_itr = pf->rx_itr; 820 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); 821 if ((error) || (req->newptr == NULL)) 822 return (error); 823 if (pf->dynamic_rx_itr) { 824 device_printf(dev, 825 "Cannot set RX itr value while dynamic RX itr is enabled\n"); 826 return (EINVAL); 827 } 828 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { 829 device_printf(dev, 830 "Invalid RX itr value; value must be between 0 and %d\n", 831 IXL_MAX_ITR); 832 return (EINVAL); 833 } 834 835 pf->rx_itr = requested_rx_itr; 836 ixl_configure_rx_itr(pf); 837 838 return (error); 839 } 840 841 void 842 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 843 struct sysctl_oid_list *child, 844 struct i40e_hw_port_stats *stats) 845 { 846 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 847 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics"); 848 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); 849 850 struct i40e_eth_stats *eth_stats = &stats->eth; 851 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); 852 853 struct ixl_sysctl_info ctls[] = 854 { 855 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 856 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 857 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 858 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 859 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, 860 /* Packet Reception Stats */ 861 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 862 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 863 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 864 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 865 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 866 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 867 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 868 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 869 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 870 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 871 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 872 {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, 873 /* Packet Transmission Stats */ 874 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 875 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 876 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 877 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 878 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 879 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 880 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 881 /* Flow control */ 882 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 883 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 884 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 885 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 886 /* End */ 887 {0,0,0} 888 }; 889 890 struct ixl_sysctl_info *entry = ctls; 891 while (entry->stat != 0) 892 { 893 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, 894 CTLFLAG_RD, entry->stat, 895 entry->description); 896 entry++; 897 } 898 } 899 900 void 901 ixl_set_rss_key(struct ixl_pf *pf) 902 { 903 struct i40e_hw *hw = &pf->hw; 904 struct ixl_vsi *vsi = &pf->vsi; 905 device_t dev = pf->dev; 906 u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; 907 enum i40e_status_code status; 908 909 #ifdef RSS 910 /* Fetch the configured RSS key */ 911 rss_getkey((uint8_t *) &rss_seed); 912 #else 913 ixl_get_default_rss_key(rss_seed); 914 #endif 915 /* Fill out hash function seed */ 916 if (hw->mac.type == I40E_MAC_X722) { 917 struct i40e_aqc_get_set_rss_key_data key_data; 918 bcopy(rss_seed, &key_data, 52); 919 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); 920 if (status) 921 device_printf(dev, 922 "i40e_aq_set_rss_key status %s, error %s\n", 923 i40e_stat_str(hw, status), 924 i40e_aq_str(hw, hw->aq.asq_last_status)); 925 } else { 926 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) 927 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); 928 } 929 } 930 931 /* 932 * Configure enabled PCTYPES for RSS. 933 */ 934 void 935 ixl_set_rss_pctypes(struct ixl_pf *pf) 936 { 937 struct i40e_hw *hw = &pf->hw; 938 u64 set_hena = 0, hena; 939 940 #ifdef RSS 941 u32 rss_hash_config; 942 943 rss_hash_config = rss_gethashconfig(); 944 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 945 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); 946 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 947 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 948 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 949 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 950 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 951 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); 952 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 953 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); 954 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 955 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 956 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 957 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 958 #else 959 if (hw->mac.type == I40E_MAC_X722) 960 set_hena = IXL_DEFAULT_RSS_HENA_X722; 961 else 962 set_hena = IXL_DEFAULT_RSS_HENA_XL710; 963 #endif 964 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 965 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 966 hena |= set_hena; 967 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 968 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 969 970 } 971 972 /* 973 ** Setup the PF's RSS parameters. 974 */ 975 void 976 ixl_config_rss(struct ixl_pf *pf) 977 { 978 ixl_set_rss_key(pf); 979 ixl_set_rss_pctypes(pf); 980 ixl_set_rss_hlut(pf); 981 } 982 983 /* 984 * In some firmware versions there is default MAC/VLAN filter 985 * configured which interferes with filters managed by driver. 986 * Make sure it's removed. 987 */ 988 void 989 ixl_del_default_hw_filters(struct ixl_vsi *vsi) 990 { 991 struct i40e_aqc_remove_macvlan_element_data e; 992 993 bzero(&e, sizeof(e)); 994 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 995 e.vlan_tag = 0; 996 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 997 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 998 999 bzero(&e, sizeof(e)); 1000 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1001 e.vlan_tag = 0; 1002 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1003 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1004 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1005 } 1006 1007 /* 1008 ** Initialize filter list and add filters that the hardware 1009 ** needs to know about. 1010 ** 1011 ** Requires VSI's seid to be set before calling. 1012 */ 1013 void 1014 ixl_init_filters(struct ixl_vsi *vsi) 1015 { 1016 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1017 1018 ixl_dbg_filter(pf, "%s: start\n", __func__); 1019 1020 /* Initialize mac filter list for VSI */ 1021 SLIST_INIT(&vsi->ftl); 1022 vsi->num_hw_filters = 0; 1023 1024 /* Receive broadcast Ethernet frames */ 1025 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL); 1026 1027 if (IXL_VSI_IS_VF(vsi)) 1028 return; 1029 1030 ixl_del_default_hw_filters(vsi); 1031 1032 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); 1033 1034 /* 1035 * Prevent Tx flow control frames from being sent out by 1036 * non-firmware transmitters. 1037 * This affects every VSI in the PF. 1038 */ 1039 #ifndef IXL_DEBUG_FC 1040 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1041 #else 1042 if (pf->enable_tx_fc_filter) 1043 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1044 #endif 1045 } 1046 1047 /* 1048 ** This routine adds mulicast filters 1049 */ 1050 void 1051 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr) 1052 { 1053 struct ixl_mac_filter *f; 1054 1055 /* Does one already exist */ 1056 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); 1057 if (f != NULL) 1058 return; 1059 1060 f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY); 1061 if (f != NULL) 1062 f->flags |= IXL_FILTER_MC; 1063 else 1064 printf("WARNING: no filter available!!\n"); 1065 } 1066 1067 void 1068 ixl_reconfigure_filters(struct ixl_vsi *vsi) 1069 { 1070 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs); 1071 } 1072 1073 /* 1074 * This routine adds a MAC/VLAN filter to the software filter 1075 * list, then adds that new filter to the HW if it doesn't already 1076 * exist in the SW filter list. 1077 */ 1078 void 1079 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1080 { 1081 struct ixl_mac_filter *f, *tmp; 1082 struct ixl_pf *pf; 1083 device_t dev; 1084 1085 pf = vsi->back; 1086 dev = pf->dev; 1087 1088 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n", 1089 MAC_FORMAT_ARGS(macaddr), vlan); 1090 1091 /* Does one already exist */ 1092 f = ixl_find_filter(vsi, macaddr, vlan); 1093 if (f != NULL) 1094 return; 1095 /* 1096 ** Is this the first vlan being registered, if so we 1097 ** need to remove the ANY filter that indicates we are 1098 ** not in a vlan, and replace that with a 0 filter. 1099 */ 1100 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { 1101 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); 1102 if (tmp != NULL) { 1103 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY); 1104 ixl_add_filter(vsi, macaddr, 0); 1105 } 1106 } 1107 1108 f = ixl_new_filter(vsi, macaddr, vlan); 1109 if (f == NULL) { 1110 device_printf(dev, "WARNING: no filter available!!\n"); 1111 return; 1112 } 1113 if (f->vlan != IXL_VLAN_ANY) 1114 f->flags |= IXL_FILTER_VLAN; 1115 else 1116 vsi->num_macs++; 1117 1118 f->flags |= IXL_FILTER_USED; 1119 ixl_add_hw_filters(vsi, f->flags, 1); 1120 } 1121 1122 void 1123 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1124 { 1125 struct ixl_mac_filter *f; 1126 1127 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1128 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n", 1129 MAC_FORMAT_ARGS(macaddr), vlan); 1130 1131 f = ixl_find_filter(vsi, macaddr, vlan); 1132 if (f == NULL) 1133 return; 1134 1135 f->flags |= IXL_FILTER_DEL; 1136 ixl_del_hw_filters(vsi, 1); 1137 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0) 1138 vsi->num_macs--; 1139 1140 /* Check if this is the last vlan removal */ 1141 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) { 1142 /* Switch back to a non-vlan filter */ 1143 ixl_del_filter(vsi, macaddr, 0); 1144 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1145 } 1146 return; 1147 } 1148 1149 /* 1150 ** Find the filter with both matching mac addr and vlan id 1151 */ 1152 struct ixl_mac_filter * 1153 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1154 { 1155 struct ixl_mac_filter *f; 1156 1157 SLIST_FOREACH(f, &vsi->ftl, next) { 1158 if ((cmp_etheraddr(f->macaddr, macaddr) != 0) 1159 && (f->vlan == vlan)) { 1160 return (f); 1161 } 1162 } 1163 1164 return (NULL); 1165 } 1166 1167 /* 1168 ** This routine takes additions to the vsi filter 1169 ** table and creates an Admin Queue call to create 1170 ** the filters in the hardware. 1171 */ 1172 void 1173 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) 1174 { 1175 struct i40e_aqc_add_macvlan_element_data *a, *b; 1176 struct ixl_mac_filter *f; 1177 struct ixl_pf *pf; 1178 struct i40e_hw *hw; 1179 device_t dev; 1180 enum i40e_status_code status; 1181 int j = 0; 1182 1183 pf = vsi->back; 1184 dev = vsi->dev; 1185 hw = &pf->hw; 1186 1187 ixl_dbg_filter(pf, 1188 "ixl_add_hw_filters: flags: %d cnt: %d\n", flags, cnt); 1189 1190 if (cnt < 1) { 1191 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n"); 1192 return; 1193 } 1194 1195 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, 1196 M_DEVBUF, M_NOWAIT | M_ZERO); 1197 if (a == NULL) { 1198 device_printf(dev, "add_hw_filters failed to get memory\n"); 1199 return; 1200 } 1201 1202 /* 1203 ** Scan the filter list, each time we find one 1204 ** we add it to the admin queue array and turn off 1205 ** the add bit. 1206 */ 1207 SLIST_FOREACH(f, &vsi->ftl, next) { 1208 if ((f->flags & flags) == flags) { 1209 b = &a[j]; // a pox on fvl long names :) 1210 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); 1211 if (f->vlan == IXL_VLAN_ANY) { 1212 b->vlan_tag = 0; 1213 b->flags = CPU_TO_LE16( 1214 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN); 1215 } else { 1216 b->vlan_tag = CPU_TO_LE16(f->vlan); 1217 b->flags = 0; 1218 } 1219 b->flags |= CPU_TO_LE16( 1220 I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 1221 f->flags &= ~IXL_FILTER_ADD; 1222 j++; 1223 1224 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n", 1225 MAC_FORMAT_ARGS(f->macaddr)); 1226 } 1227 if (j == cnt) 1228 break; 1229 } 1230 if (j > 0) { 1231 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); 1232 if (status) 1233 device_printf(dev, "i40e_aq_add_macvlan status %s, " 1234 "error %s\n", i40e_stat_str(hw, status), 1235 i40e_aq_str(hw, hw->aq.asq_last_status)); 1236 else 1237 vsi->num_hw_filters += j; 1238 } 1239 free(a, M_DEVBUF); 1240 return; 1241 } 1242 1243 /* 1244 ** This routine takes removals in the vsi filter 1245 ** table and creates an Admin Queue call to delete 1246 ** the filters in the hardware. 1247 */ 1248 void 1249 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) 1250 { 1251 struct i40e_aqc_remove_macvlan_element_data *d, *e; 1252 struct ixl_pf *pf; 1253 struct i40e_hw *hw; 1254 device_t dev; 1255 struct ixl_mac_filter *f, *f_temp; 1256 enum i40e_status_code status; 1257 int j = 0; 1258 1259 pf = vsi->back; 1260 hw = &pf->hw; 1261 dev = vsi->dev; 1262 1263 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt); 1264 1265 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, 1266 M_DEVBUF, M_NOWAIT | M_ZERO); 1267 if (d == NULL) { 1268 device_printf(dev, "%s: failed to get memory\n", __func__); 1269 return; 1270 } 1271 1272 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { 1273 if (f->flags & IXL_FILTER_DEL) { 1274 e = &d[j]; // a pox on fvl long names :) 1275 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); 1276 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1277 if (f->vlan == IXL_VLAN_ANY) { 1278 e->vlan_tag = 0; 1279 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1280 } else { 1281 e->vlan_tag = f->vlan; 1282 } 1283 1284 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n", 1285 MAC_FORMAT_ARGS(f->macaddr)); 1286 1287 /* delete entry from vsi list */ 1288 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); 1289 free(f, M_DEVBUF); 1290 j++; 1291 } 1292 if (j == cnt) 1293 break; 1294 } 1295 if (j > 0) { 1296 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); 1297 if (status) { 1298 int sc = 0; 1299 for (int i = 0; i < j; i++) 1300 sc += (!d[i].error_code); 1301 vsi->num_hw_filters -= sc; 1302 device_printf(dev, 1303 "Failed to remove %d/%d filters, error %s\n", 1304 j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status)); 1305 } else 1306 vsi->num_hw_filters -= j; 1307 } 1308 free(d, M_DEVBUF); 1309 1310 ixl_dbg_filter(pf, "%s: end\n", __func__); 1311 return; 1312 } 1313 1314 int 1315 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1316 { 1317 struct i40e_hw *hw = &pf->hw; 1318 int error = 0; 1319 u32 reg; 1320 u16 pf_qidx; 1321 1322 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1323 1324 ixl_dbg(pf, IXL_DBG_EN_DIS, 1325 "Enabling PF TX ring %4d / VSI TX ring %4d...\n", 1326 pf_qidx, vsi_qidx); 1327 1328 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); 1329 1330 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1331 reg |= I40E_QTX_ENA_QENA_REQ_MASK | 1332 I40E_QTX_ENA_QENA_STAT_MASK; 1333 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1334 /* Verify the enable took */ 1335 for (int j = 0; j < 10; j++) { 1336 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1337 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 1338 break; 1339 i40e_usec_delay(10); 1340 } 1341 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 1342 device_printf(pf->dev, "TX queue %d still disabled!\n", 1343 pf_qidx); 1344 error = ETIMEDOUT; 1345 } 1346 1347 return (error); 1348 } 1349 1350 int 1351 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1352 { 1353 struct i40e_hw *hw = &pf->hw; 1354 int error = 0; 1355 u32 reg; 1356 u16 pf_qidx; 1357 1358 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1359 1360 ixl_dbg(pf, IXL_DBG_EN_DIS, 1361 "Enabling PF RX ring %4d / VSI RX ring %4d...\n", 1362 pf_qidx, vsi_qidx); 1363 1364 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1365 reg |= I40E_QRX_ENA_QENA_REQ_MASK | 1366 I40E_QRX_ENA_QENA_STAT_MASK; 1367 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1368 /* Verify the enable took */ 1369 for (int j = 0; j < 10; j++) { 1370 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1371 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 1372 break; 1373 i40e_usec_delay(10); 1374 } 1375 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 1376 device_printf(pf->dev, "RX queue %d still disabled!\n", 1377 pf_qidx); 1378 error = ETIMEDOUT; 1379 } 1380 1381 return (error); 1382 } 1383 1384 int 1385 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1386 { 1387 int error = 0; 1388 1389 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); 1390 /* Called function already prints error message */ 1391 if (error) 1392 return (error); 1393 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); 1394 return (error); 1395 } 1396 1397 /* 1398 * Returns error on first ring that is detected hung. 1399 */ 1400 int 1401 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1402 { 1403 struct i40e_hw *hw = &pf->hw; 1404 int error = 0; 1405 u32 reg; 1406 u16 pf_qidx; 1407 1408 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1409 1410 ixl_dbg(pf, IXL_DBG_EN_DIS, 1411 "Disabling PF TX ring %4d / VSI TX ring %4d...\n", 1412 pf_qidx, vsi_qidx); 1413 1414 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); 1415 i40e_usec_delay(500); 1416 1417 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1418 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 1419 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1420 /* Verify the disable took */ 1421 for (int j = 0; j < 10; j++) { 1422 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1423 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) 1424 break; 1425 i40e_msec_delay(10); 1426 } 1427 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { 1428 device_printf(pf->dev, "TX queue %d still enabled!\n", 1429 pf_qidx); 1430 error = ETIMEDOUT; 1431 } 1432 1433 return (error); 1434 } 1435 1436 /* 1437 * Returns error on first ring that is detected hung. 1438 */ 1439 int 1440 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1441 { 1442 struct i40e_hw *hw = &pf->hw; 1443 int error = 0; 1444 u32 reg; 1445 u16 pf_qidx; 1446 1447 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1448 1449 ixl_dbg(pf, IXL_DBG_EN_DIS, 1450 "Disabling PF RX ring %4d / VSI RX ring %4d...\n", 1451 pf_qidx, vsi_qidx); 1452 1453 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1454 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 1455 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1456 /* Verify the disable took */ 1457 for (int j = 0; j < 10; j++) { 1458 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1459 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) 1460 break; 1461 i40e_msec_delay(10); 1462 } 1463 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { 1464 device_printf(pf->dev, "RX queue %d still enabled!\n", 1465 pf_qidx); 1466 error = ETIMEDOUT; 1467 } 1468 1469 return (error); 1470 } 1471 1472 int 1473 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1474 { 1475 int error = 0; 1476 1477 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); 1478 /* Called function already prints error message */ 1479 if (error) 1480 return (error); 1481 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); 1482 return (error); 1483 } 1484 1485 static void 1486 ixl_handle_tx_mdd_event(struct ixl_pf *pf) 1487 { 1488 struct i40e_hw *hw = &pf->hw; 1489 device_t dev = pf->dev; 1490 struct ixl_vf *vf; 1491 bool mdd_detected = false; 1492 bool pf_mdd_detected = false; 1493 bool vf_mdd_detected = false; 1494 u16 vf_num, queue; 1495 u8 pf_num, event; 1496 u8 pf_mdet_num, vp_mdet_num; 1497 u32 reg; 1498 1499 /* find what triggered the MDD event */ 1500 reg = rd32(hw, I40E_GL_MDET_TX); 1501 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 1502 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 1503 I40E_GL_MDET_TX_PF_NUM_SHIFT; 1504 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 1505 I40E_GL_MDET_TX_VF_NUM_SHIFT; 1506 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 1507 I40E_GL_MDET_TX_EVENT_SHIFT; 1508 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 1509 I40E_GL_MDET_TX_QUEUE_SHIFT; 1510 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 1511 mdd_detected = true; 1512 } 1513 1514 if (!mdd_detected) 1515 return; 1516 1517 reg = rd32(hw, I40E_PF_MDET_TX); 1518 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 1519 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 1520 pf_mdet_num = hw->pf_id; 1521 pf_mdd_detected = true; 1522 } 1523 1524 /* Check if MDD was caused by a VF */ 1525 for (int i = 0; i < pf->num_vfs; i++) { 1526 vf = &(pf->vfs[i]); 1527 reg = rd32(hw, I40E_VP_MDET_TX(i)); 1528 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 1529 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 1530 vp_mdet_num = i; 1531 vf->num_mdd_events++; 1532 vf_mdd_detected = true; 1533 } 1534 } 1535 1536 /* Print out an error message */ 1537 if (vf_mdd_detected && pf_mdd_detected) 1538 device_printf(dev, 1539 "Malicious Driver Detection event %d" 1540 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n", 1541 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num); 1542 else if (vf_mdd_detected && !pf_mdd_detected) 1543 device_printf(dev, 1544 "Malicious Driver Detection event %d" 1545 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n", 1546 event, queue, pf_num, vf_num, vp_mdet_num); 1547 else if (!vf_mdd_detected && pf_mdd_detected) 1548 device_printf(dev, 1549 "Malicious Driver Detection event %d" 1550 " on TX queue %d, pf number %d (PF-%d)\n", 1551 event, queue, pf_num, pf_mdet_num); 1552 /* Theoretically shouldn't happen */ 1553 else 1554 device_printf(dev, 1555 "TX Malicious Driver Detection event (unknown)\n"); 1556 } 1557 1558 static void 1559 ixl_handle_rx_mdd_event(struct ixl_pf *pf) 1560 { 1561 struct i40e_hw *hw = &pf->hw; 1562 device_t dev = pf->dev; 1563 struct ixl_vf *vf; 1564 bool mdd_detected = false; 1565 bool pf_mdd_detected = false; 1566 bool vf_mdd_detected = false; 1567 u16 queue; 1568 u8 pf_num, event; 1569 u8 pf_mdet_num, vp_mdet_num; 1570 u32 reg; 1571 1572 /* 1573 * GL_MDET_RX doesn't contain VF number information, unlike 1574 * GL_MDET_TX. 1575 */ 1576 reg = rd32(hw, I40E_GL_MDET_RX); 1577 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 1578 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 1579 I40E_GL_MDET_RX_FUNCTION_SHIFT; 1580 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 1581 I40E_GL_MDET_RX_EVENT_SHIFT; 1582 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 1583 I40E_GL_MDET_RX_QUEUE_SHIFT; 1584 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 1585 mdd_detected = true; 1586 } 1587 1588 if (!mdd_detected) 1589 return; 1590 1591 reg = rd32(hw, I40E_PF_MDET_RX); 1592 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 1593 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 1594 pf_mdet_num = hw->pf_id; 1595 pf_mdd_detected = true; 1596 } 1597 1598 /* Check if MDD was caused by a VF */ 1599 for (int i = 0; i < pf->num_vfs; i++) { 1600 vf = &(pf->vfs[i]); 1601 reg = rd32(hw, I40E_VP_MDET_RX(i)); 1602 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 1603 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 1604 vp_mdet_num = i; 1605 vf->num_mdd_events++; 1606 vf_mdd_detected = true; 1607 } 1608 } 1609 1610 /* Print out an error message */ 1611 if (vf_mdd_detected && pf_mdd_detected) 1612 device_printf(dev, 1613 "Malicious Driver Detection event %d" 1614 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n", 1615 event, queue, pf_num, pf_mdet_num, vp_mdet_num); 1616 else if (vf_mdd_detected && !pf_mdd_detected) 1617 device_printf(dev, 1618 "Malicious Driver Detection event %d" 1619 " on RX queue %d, pf number %d, (VF-%d)\n", 1620 event, queue, pf_num, vp_mdet_num); 1621 else if (!vf_mdd_detected && pf_mdd_detected) 1622 device_printf(dev, 1623 "Malicious Driver Detection event %d" 1624 " on RX queue %d, pf number %d (PF-%d)\n", 1625 event, queue, pf_num, pf_mdet_num); 1626 /* Theoretically shouldn't happen */ 1627 else 1628 device_printf(dev, 1629 "RX Malicious Driver Detection event (unknown)\n"); 1630 } 1631 1632 /** 1633 * ixl_handle_mdd_event 1634 * 1635 * Called from interrupt handler to identify possibly malicious vfs 1636 * (But also detects events from the PF, as well) 1637 **/ 1638 void 1639 ixl_handle_mdd_event(struct ixl_pf *pf) 1640 { 1641 struct i40e_hw *hw = &pf->hw; 1642 u32 reg; 1643 1644 /* 1645 * Handle both TX/RX because it's possible they could 1646 * both trigger in the same interrupt. 1647 */ 1648 ixl_handle_tx_mdd_event(pf); 1649 ixl_handle_rx_mdd_event(pf); 1650 1651 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING); 1652 1653 /* re-enable mdd interrupt cause */ 1654 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1655 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 1656 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1657 ixl_flush(hw); 1658 } 1659 1660 void 1661 ixl_enable_intr0(struct i40e_hw *hw) 1662 { 1663 u32 reg; 1664 1665 /* Use IXL_ITR_NONE so ITR isn't updated here */ 1666 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | 1667 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 1668 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 1669 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1670 } 1671 1672 void 1673 ixl_disable_intr0(struct i40e_hw *hw) 1674 { 1675 u32 reg; 1676 1677 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; 1678 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1679 ixl_flush(hw); 1680 } 1681 1682 void 1683 ixl_enable_queue(struct i40e_hw *hw, int id) 1684 { 1685 u32 reg; 1686 1687 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | 1688 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1689 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 1690 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1691 } 1692 1693 void 1694 ixl_disable_queue(struct i40e_hw *hw, int id) 1695 { 1696 u32 reg; 1697 1698 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1699 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1700 } 1701 1702 void 1703 ixl_handle_empr_reset(struct ixl_pf *pf) 1704 { 1705 struct ixl_vsi *vsi = &pf->vsi; 1706 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING); 1707 1708 ixl_prepare_for_reset(pf, is_up); 1709 /* 1710 * i40e_pf_reset checks the type of reset and acts 1711 * accordingly. If EMP or Core reset was performed 1712 * doing PF reset is not necessary and it sometimes 1713 * fails. 1714 */ 1715 ixl_pf_reset(pf); 1716 1717 if (!IXL_PF_IN_RECOVERY_MODE(pf) && 1718 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) { 1719 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 1720 device_printf(pf->dev, 1721 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 1722 pf->link_up = FALSE; 1723 ixl_update_link_status(pf); 1724 } 1725 1726 ixl_rebuild_hw_structs_after_reset(pf, is_up); 1727 1728 atomic_clear_32(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING); 1729 } 1730 1731 void 1732 ixl_update_stats_counters(struct ixl_pf *pf) 1733 { 1734 struct i40e_hw *hw = &pf->hw; 1735 struct ixl_vsi *vsi = &pf->vsi; 1736 struct ixl_vf *vf; 1737 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx; 1738 1739 struct i40e_hw_port_stats *nsd = &pf->stats; 1740 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 1741 1742 /* Update hw stats */ 1743 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 1744 pf->stat_offsets_loaded, 1745 &osd->crc_errors, &nsd->crc_errors); 1746 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 1747 pf->stat_offsets_loaded, 1748 &osd->illegal_bytes, &nsd->illegal_bytes); 1749 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 1750 I40E_GLPRT_GORCL(hw->port), 1751 pf->stat_offsets_loaded, 1752 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 1753 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 1754 I40E_GLPRT_GOTCL(hw->port), 1755 pf->stat_offsets_loaded, 1756 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 1757 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 1758 pf->stat_offsets_loaded, 1759 &osd->eth.rx_discards, 1760 &nsd->eth.rx_discards); 1761 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 1762 I40E_GLPRT_UPRCL(hw->port), 1763 pf->stat_offsets_loaded, 1764 &osd->eth.rx_unicast, 1765 &nsd->eth.rx_unicast); 1766 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 1767 I40E_GLPRT_UPTCL(hw->port), 1768 pf->stat_offsets_loaded, 1769 &osd->eth.tx_unicast, 1770 &nsd->eth.tx_unicast); 1771 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 1772 I40E_GLPRT_MPRCL(hw->port), 1773 pf->stat_offsets_loaded, 1774 &osd->eth.rx_multicast, 1775 &nsd->eth.rx_multicast); 1776 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 1777 I40E_GLPRT_MPTCL(hw->port), 1778 pf->stat_offsets_loaded, 1779 &osd->eth.tx_multicast, 1780 &nsd->eth.tx_multicast); 1781 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 1782 I40E_GLPRT_BPRCL(hw->port), 1783 pf->stat_offsets_loaded, 1784 &osd->eth.rx_broadcast, 1785 &nsd->eth.rx_broadcast); 1786 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 1787 I40E_GLPRT_BPTCL(hw->port), 1788 pf->stat_offsets_loaded, 1789 &osd->eth.tx_broadcast, 1790 &nsd->eth.tx_broadcast); 1791 1792 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 1793 pf->stat_offsets_loaded, 1794 &osd->tx_dropped_link_down, 1795 &nsd->tx_dropped_link_down); 1796 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 1797 pf->stat_offsets_loaded, 1798 &osd->mac_local_faults, 1799 &nsd->mac_local_faults); 1800 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 1801 pf->stat_offsets_loaded, 1802 &osd->mac_remote_faults, 1803 &nsd->mac_remote_faults); 1804 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 1805 pf->stat_offsets_loaded, 1806 &osd->rx_length_errors, 1807 &nsd->rx_length_errors); 1808 1809 /* Flow control (LFC) stats */ 1810 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 1811 pf->stat_offsets_loaded, 1812 &osd->link_xon_rx, &nsd->link_xon_rx); 1813 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 1814 pf->stat_offsets_loaded, 1815 &osd->link_xon_tx, &nsd->link_xon_tx); 1816 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 1817 pf->stat_offsets_loaded, 1818 &osd->link_xoff_rx, &nsd->link_xoff_rx); 1819 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 1820 pf->stat_offsets_loaded, 1821 &osd->link_xoff_tx, &nsd->link_xoff_tx); 1822 1823 /* 1824 * For watchdog management we need to know if we have been paused 1825 * during the last interval, so capture that here. 1826 */ 1827 if (pf->stats.link_xoff_rx != prev_link_xoff_rx) 1828 vsi->shared->isc_pause_frames = 1; 1829 1830 /* Packet size stats rx */ 1831 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 1832 I40E_GLPRT_PRC64L(hw->port), 1833 pf->stat_offsets_loaded, 1834 &osd->rx_size_64, &nsd->rx_size_64); 1835 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 1836 I40E_GLPRT_PRC127L(hw->port), 1837 pf->stat_offsets_loaded, 1838 &osd->rx_size_127, &nsd->rx_size_127); 1839 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 1840 I40E_GLPRT_PRC255L(hw->port), 1841 pf->stat_offsets_loaded, 1842 &osd->rx_size_255, &nsd->rx_size_255); 1843 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 1844 I40E_GLPRT_PRC511L(hw->port), 1845 pf->stat_offsets_loaded, 1846 &osd->rx_size_511, &nsd->rx_size_511); 1847 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 1848 I40E_GLPRT_PRC1023L(hw->port), 1849 pf->stat_offsets_loaded, 1850 &osd->rx_size_1023, &nsd->rx_size_1023); 1851 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 1852 I40E_GLPRT_PRC1522L(hw->port), 1853 pf->stat_offsets_loaded, 1854 &osd->rx_size_1522, &nsd->rx_size_1522); 1855 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 1856 I40E_GLPRT_PRC9522L(hw->port), 1857 pf->stat_offsets_loaded, 1858 &osd->rx_size_big, &nsd->rx_size_big); 1859 1860 /* Packet size stats tx */ 1861 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 1862 I40E_GLPRT_PTC64L(hw->port), 1863 pf->stat_offsets_loaded, 1864 &osd->tx_size_64, &nsd->tx_size_64); 1865 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 1866 I40E_GLPRT_PTC127L(hw->port), 1867 pf->stat_offsets_loaded, 1868 &osd->tx_size_127, &nsd->tx_size_127); 1869 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 1870 I40E_GLPRT_PTC255L(hw->port), 1871 pf->stat_offsets_loaded, 1872 &osd->tx_size_255, &nsd->tx_size_255); 1873 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 1874 I40E_GLPRT_PTC511L(hw->port), 1875 pf->stat_offsets_loaded, 1876 &osd->tx_size_511, &nsd->tx_size_511); 1877 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 1878 I40E_GLPRT_PTC1023L(hw->port), 1879 pf->stat_offsets_loaded, 1880 &osd->tx_size_1023, &nsd->tx_size_1023); 1881 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 1882 I40E_GLPRT_PTC1522L(hw->port), 1883 pf->stat_offsets_loaded, 1884 &osd->tx_size_1522, &nsd->tx_size_1522); 1885 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 1886 I40E_GLPRT_PTC9522L(hw->port), 1887 pf->stat_offsets_loaded, 1888 &osd->tx_size_big, &nsd->tx_size_big); 1889 1890 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 1891 pf->stat_offsets_loaded, 1892 &osd->rx_undersize, &nsd->rx_undersize); 1893 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 1894 pf->stat_offsets_loaded, 1895 &osd->rx_fragments, &nsd->rx_fragments); 1896 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 1897 pf->stat_offsets_loaded, 1898 &osd->rx_oversize, &nsd->rx_oversize); 1899 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 1900 pf->stat_offsets_loaded, 1901 &osd->rx_jabber, &nsd->rx_jabber); 1902 /* EEE */ 1903 i40e_get_phy_lpi_status(hw, nsd); 1904 1905 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded, 1906 &osd->tx_lpi_count, &nsd->tx_lpi_count, 1907 &osd->rx_lpi_count, &nsd->rx_lpi_count); 1908 1909 pf->stat_offsets_loaded = true; 1910 /* End hw stats */ 1911 1912 /* Update vsi stats */ 1913 ixl_update_vsi_stats(vsi); 1914 1915 for (int i = 0; i < pf->num_vfs; i++) { 1916 vf = &pf->vfs[i]; 1917 if (vf->vf_flags & VF_FLAG_ENABLED) 1918 ixl_update_eth_stats(&pf->vfs[i].vsi); 1919 } 1920 } 1921 1922 /** 1923 * Update VSI-specific ethernet statistics counters. 1924 **/ 1925 void 1926 ixl_update_eth_stats(struct ixl_vsi *vsi) 1927 { 1928 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1929 struct i40e_hw *hw = &pf->hw; 1930 struct i40e_eth_stats *es; 1931 struct i40e_eth_stats *oes; 1932 u16 stat_idx = vsi->info.stat_counter_idx; 1933 1934 es = &vsi->eth_stats; 1935 oes = &vsi->eth_stats_offsets; 1936 1937 /* Gather up the stats that the hw collects */ 1938 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 1939 vsi->stat_offsets_loaded, 1940 &oes->tx_errors, &es->tx_errors); 1941 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 1942 vsi->stat_offsets_loaded, 1943 &oes->rx_discards, &es->rx_discards); 1944 1945 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 1946 I40E_GLV_GORCL(stat_idx), 1947 vsi->stat_offsets_loaded, 1948 &oes->rx_bytes, &es->rx_bytes); 1949 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 1950 I40E_GLV_UPRCL(stat_idx), 1951 vsi->stat_offsets_loaded, 1952 &oes->rx_unicast, &es->rx_unicast); 1953 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 1954 I40E_GLV_MPRCL(stat_idx), 1955 vsi->stat_offsets_loaded, 1956 &oes->rx_multicast, &es->rx_multicast); 1957 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 1958 I40E_GLV_BPRCL(stat_idx), 1959 vsi->stat_offsets_loaded, 1960 &oes->rx_broadcast, &es->rx_broadcast); 1961 1962 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 1963 I40E_GLV_GOTCL(stat_idx), 1964 vsi->stat_offsets_loaded, 1965 &oes->tx_bytes, &es->tx_bytes); 1966 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 1967 I40E_GLV_UPTCL(stat_idx), 1968 vsi->stat_offsets_loaded, 1969 &oes->tx_unicast, &es->tx_unicast); 1970 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 1971 I40E_GLV_MPTCL(stat_idx), 1972 vsi->stat_offsets_loaded, 1973 &oes->tx_multicast, &es->tx_multicast); 1974 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 1975 I40E_GLV_BPTCL(stat_idx), 1976 vsi->stat_offsets_loaded, 1977 &oes->tx_broadcast, &es->tx_broadcast); 1978 vsi->stat_offsets_loaded = true; 1979 } 1980 1981 void 1982 ixl_update_vsi_stats(struct ixl_vsi *vsi) 1983 { 1984 struct ixl_pf *pf; 1985 struct ifnet *ifp; 1986 struct i40e_eth_stats *es; 1987 u64 tx_discards; 1988 1989 struct i40e_hw_port_stats *nsd; 1990 1991 pf = vsi->back; 1992 ifp = vsi->ifp; 1993 es = &vsi->eth_stats; 1994 nsd = &pf->stats; 1995 1996 ixl_update_eth_stats(vsi); 1997 1998 tx_discards = es->tx_discards + nsd->tx_dropped_link_down; 1999 2000 /* Update ifnet stats */ 2001 IXL_SET_IPACKETS(vsi, es->rx_unicast + 2002 es->rx_multicast + 2003 es->rx_broadcast); 2004 IXL_SET_OPACKETS(vsi, es->tx_unicast + 2005 es->tx_multicast + 2006 es->tx_broadcast); 2007 IXL_SET_IBYTES(vsi, es->rx_bytes); 2008 IXL_SET_OBYTES(vsi, es->tx_bytes); 2009 IXL_SET_IMCASTS(vsi, es->rx_multicast); 2010 IXL_SET_OMCASTS(vsi, es->tx_multicast); 2011 2012 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + 2013 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments + 2014 nsd->rx_jabber); 2015 IXL_SET_OERRORS(vsi, es->tx_errors); 2016 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); 2017 IXL_SET_OQDROPS(vsi, tx_discards); 2018 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); 2019 IXL_SET_COLLISIONS(vsi, 0); 2020 } 2021 2022 /** 2023 * Reset all of the stats for the given pf 2024 **/ 2025 void 2026 ixl_pf_reset_stats(struct ixl_pf *pf) 2027 { 2028 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); 2029 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); 2030 pf->stat_offsets_loaded = false; 2031 } 2032 2033 /** 2034 * Resets all stats of the given vsi 2035 **/ 2036 void 2037 ixl_vsi_reset_stats(struct ixl_vsi *vsi) 2038 { 2039 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); 2040 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); 2041 vsi->stat_offsets_loaded = false; 2042 } 2043 2044 /** 2045 * Read and update a 48 bit stat from the hw 2046 * 2047 * Since the device stats are not reset at PFReset, they likely will not 2048 * be zeroed when the driver starts. We'll save the first values read 2049 * and use them as offsets to be subtracted from the raw values in order 2050 * to report stats that count from zero. 2051 **/ 2052 void 2053 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 2054 bool offset_loaded, u64 *offset, u64 *stat) 2055 { 2056 u64 new_data; 2057 2058 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__) 2059 new_data = rd64(hw, loreg); 2060 #else 2061 /* 2062 * Use two rd32's instead of one rd64; FreeBSD versions before 2063 * 10 don't support 64-bit bus reads/writes. 2064 */ 2065 new_data = rd32(hw, loreg); 2066 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 2067 #endif 2068 2069 if (!offset_loaded) 2070 *offset = new_data; 2071 if (new_data >= *offset) 2072 *stat = new_data - *offset; 2073 else 2074 *stat = (new_data + ((u64)1 << 48)) - *offset; 2075 *stat &= 0xFFFFFFFFFFFFULL; 2076 } 2077 2078 /** 2079 * Read and update a 32 bit stat from the hw 2080 **/ 2081 void 2082 ixl_stat_update32(struct i40e_hw *hw, u32 reg, 2083 bool offset_loaded, u64 *offset, u64 *stat) 2084 { 2085 u32 new_data; 2086 2087 new_data = rd32(hw, reg); 2088 if (!offset_loaded) 2089 *offset = new_data; 2090 if (new_data >= *offset) 2091 *stat = (u32)(new_data - *offset); 2092 else 2093 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 2094 } 2095 2096 /** 2097 * Add subset of device sysctls safe to use in recovery mode 2098 */ 2099 void 2100 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf) 2101 { 2102 device_t dev = pf->dev; 2103 2104 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2105 struct sysctl_oid_list *ctx_list = 2106 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2107 2108 struct sysctl_oid *debug_node; 2109 struct sysctl_oid_list *debug_list; 2110 2111 SYSCTL_ADD_PROC(ctx, ctx_list, 2112 OID_AUTO, "fw_version", 2113 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2114 ixl_sysctl_show_fw, "A", "Firmware version"); 2115 2116 /* Add sysctls meant to print debug information, but don't list them 2117 * in "sysctl -a" output. */ 2118 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2119 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2120 "Debug Sysctls"); 2121 debug_list = SYSCTL_CHILDREN(debug_node); 2122 2123 SYSCTL_ADD_UINT(ctx, debug_list, 2124 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2125 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2126 2127 SYSCTL_ADD_UINT(ctx, debug_list, 2128 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2129 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2130 2131 SYSCTL_ADD_PROC(ctx, debug_list, 2132 OID_AUTO, "dump_debug_data", 2133 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2134 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2135 2136 SYSCTL_ADD_PROC(ctx, debug_list, 2137 OID_AUTO, "do_pf_reset", 2138 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2139 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2140 2141 SYSCTL_ADD_PROC(ctx, debug_list, 2142 OID_AUTO, "do_core_reset", 2143 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2144 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2145 2146 SYSCTL_ADD_PROC(ctx, debug_list, 2147 OID_AUTO, "do_global_reset", 2148 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2149 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2150 2151 SYSCTL_ADD_PROC(ctx, debug_list, 2152 OID_AUTO, "queue_interrupt_table", 2153 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2154 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2155 } 2156 2157 void 2158 ixl_add_device_sysctls(struct ixl_pf *pf) 2159 { 2160 device_t dev = pf->dev; 2161 struct i40e_hw *hw = &pf->hw; 2162 2163 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2164 struct sysctl_oid_list *ctx_list = 2165 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2166 2167 struct sysctl_oid *debug_node; 2168 struct sysctl_oid_list *debug_list; 2169 2170 struct sysctl_oid *fec_node; 2171 struct sysctl_oid_list *fec_list; 2172 struct sysctl_oid *eee_node; 2173 struct sysctl_oid_list *eee_list; 2174 2175 /* Set up sysctls */ 2176 SYSCTL_ADD_PROC(ctx, ctx_list, 2177 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2178 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); 2179 2180 SYSCTL_ADD_PROC(ctx, ctx_list, 2181 OID_AUTO, "advertise_speed", 2182 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2183 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); 2184 2185 SYSCTL_ADD_PROC(ctx, ctx_list, 2186 OID_AUTO, "supported_speeds", 2187 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2188 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); 2189 2190 SYSCTL_ADD_PROC(ctx, ctx_list, 2191 OID_AUTO, "current_speed", 2192 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2193 ixl_sysctl_current_speed, "A", "Current Port Speed"); 2194 2195 SYSCTL_ADD_PROC(ctx, ctx_list, 2196 OID_AUTO, "fw_version", 2197 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2198 ixl_sysctl_show_fw, "A", "Firmware version"); 2199 2200 SYSCTL_ADD_PROC(ctx, ctx_list, 2201 OID_AUTO, "unallocated_queues", 2202 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2203 ixl_sysctl_unallocated_queues, "I", 2204 "Queues not allocated to a PF or VF"); 2205 2206 SYSCTL_ADD_PROC(ctx, ctx_list, 2207 OID_AUTO, "tx_itr", 2208 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2209 ixl_sysctl_pf_tx_itr, "I", 2210 "Immediately set TX ITR value for all queues"); 2211 2212 SYSCTL_ADD_PROC(ctx, ctx_list, 2213 OID_AUTO, "rx_itr", 2214 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2215 ixl_sysctl_pf_rx_itr, "I", 2216 "Immediately set RX ITR value for all queues"); 2217 2218 SYSCTL_ADD_INT(ctx, ctx_list, 2219 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, 2220 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); 2221 2222 SYSCTL_ADD_INT(ctx, ctx_list, 2223 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, 2224 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); 2225 2226 /* Add FEC sysctls for 25G adapters */ 2227 if (i40e_is_25G_device(hw->device_id)) { 2228 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2229 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2230 "FEC Sysctls"); 2231 fec_list = SYSCTL_CHILDREN(fec_node); 2232 2233 SYSCTL_ADD_PROC(ctx, fec_list, 2234 OID_AUTO, "fc_ability", 2235 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2236 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); 2237 2238 SYSCTL_ADD_PROC(ctx, fec_list, 2239 OID_AUTO, "rs_ability", 2240 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2241 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); 2242 2243 SYSCTL_ADD_PROC(ctx, fec_list, 2244 OID_AUTO, "fc_requested", 2245 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2246 ixl_sysctl_fec_fc_request, "I", 2247 "FC FEC mode requested on link"); 2248 2249 SYSCTL_ADD_PROC(ctx, fec_list, 2250 OID_AUTO, "rs_requested", 2251 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2252 ixl_sysctl_fec_rs_request, "I", 2253 "RS FEC mode requested on link"); 2254 2255 SYSCTL_ADD_PROC(ctx, fec_list, 2256 OID_AUTO, "auto_fec_enabled", 2257 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2258 ixl_sysctl_fec_auto_enable, "I", 2259 "Let FW decide FEC ability/request modes"); 2260 } 2261 2262 SYSCTL_ADD_PROC(ctx, ctx_list, 2263 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2264 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); 2265 2266 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2267 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2268 "Energy Efficient Ethernet (EEE) Sysctls"); 2269 eee_list = SYSCTL_CHILDREN(eee_node); 2270 2271 SYSCTL_ADD_PROC(ctx, eee_list, 2272 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 2273 pf, 0, ixl_sysctl_eee_enable, "I", 2274 "Enable Energy Efficient Ethernet (EEE)"); 2275 2276 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status", 2277 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0, 2278 "TX LPI status"); 2279 2280 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status", 2281 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0, 2282 "RX LPI status"); 2283 2284 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count", 2285 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count, 2286 "TX LPI count"); 2287 2288 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count", 2289 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count, 2290 "RX LPI count"); 2291 2292 /* Add sysctls meant to print debug information, but don't list them 2293 * in "sysctl -a" output. */ 2294 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2295 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2296 "Debug Sysctls"); 2297 debug_list = SYSCTL_CHILDREN(debug_node); 2298 2299 SYSCTL_ADD_UINT(ctx, debug_list, 2300 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2301 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2302 2303 SYSCTL_ADD_UINT(ctx, debug_list, 2304 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2305 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2306 2307 SYSCTL_ADD_PROC(ctx, debug_list, 2308 OID_AUTO, "link_status", 2309 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2310 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); 2311 2312 SYSCTL_ADD_PROC(ctx, debug_list, 2313 OID_AUTO, "phy_abilities", 2314 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2315 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); 2316 2317 SYSCTL_ADD_PROC(ctx, debug_list, 2318 OID_AUTO, "filter_list", 2319 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2320 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); 2321 2322 SYSCTL_ADD_PROC(ctx, debug_list, 2323 OID_AUTO, "hw_res_alloc", 2324 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2325 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); 2326 2327 SYSCTL_ADD_PROC(ctx, debug_list, 2328 OID_AUTO, "switch_config", 2329 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2330 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); 2331 2332 SYSCTL_ADD_PROC(ctx, debug_list, 2333 OID_AUTO, "rss_key", 2334 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2335 pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); 2336 2337 SYSCTL_ADD_PROC(ctx, debug_list, 2338 OID_AUTO, "rss_lut", 2339 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2340 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); 2341 2342 SYSCTL_ADD_PROC(ctx, debug_list, 2343 OID_AUTO, "rss_hena", 2344 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2345 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); 2346 2347 SYSCTL_ADD_PROC(ctx, debug_list, 2348 OID_AUTO, "disable_fw_link_management", 2349 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2350 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); 2351 2352 SYSCTL_ADD_PROC(ctx, debug_list, 2353 OID_AUTO, "dump_debug_data", 2354 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2355 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2356 2357 SYSCTL_ADD_PROC(ctx, debug_list, 2358 OID_AUTO, "do_pf_reset", 2359 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2360 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2361 2362 SYSCTL_ADD_PROC(ctx, debug_list, 2363 OID_AUTO, "do_core_reset", 2364 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2365 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2366 2367 SYSCTL_ADD_PROC(ctx, debug_list, 2368 OID_AUTO, "do_global_reset", 2369 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2370 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2371 2372 SYSCTL_ADD_PROC(ctx, debug_list, 2373 OID_AUTO, "queue_interrupt_table", 2374 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2375 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2376 2377 if (pf->has_i2c) { 2378 SYSCTL_ADD_PROC(ctx, debug_list, 2379 OID_AUTO, "read_i2c_byte", 2380 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2381 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C); 2382 2383 SYSCTL_ADD_PROC(ctx, debug_list, 2384 OID_AUTO, "write_i2c_byte", 2385 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2386 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C); 2387 2388 SYSCTL_ADD_PROC(ctx, debug_list, 2389 OID_AUTO, "read_i2c_diag_data", 2390 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2391 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); 2392 } 2393 } 2394 2395 /* 2396 * Primarily for finding out how many queues can be assigned to VFs, 2397 * at runtime. 2398 */ 2399 static int 2400 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) 2401 { 2402 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2403 int queues; 2404 2405 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); 2406 2407 return sysctl_handle_int(oidp, NULL, queues, req); 2408 } 2409 2410 static const char * 2411 ixl_link_speed_string(enum i40e_aq_link_speed link_speed) 2412 { 2413 const char * link_speed_str[] = { 2414 "Unknown", 2415 "100 Mbps", 2416 "1 Gbps", 2417 "10 Gbps", 2418 "40 Gbps", 2419 "20 Gbps", 2420 "25 Gbps", 2421 "2.5 Gbps", 2422 "5 Gbps" 2423 }; 2424 int index; 2425 2426 switch (link_speed) { 2427 case I40E_LINK_SPEED_100MB: 2428 index = 1; 2429 break; 2430 case I40E_LINK_SPEED_1GB: 2431 index = 2; 2432 break; 2433 case I40E_LINK_SPEED_10GB: 2434 index = 3; 2435 break; 2436 case I40E_LINK_SPEED_40GB: 2437 index = 4; 2438 break; 2439 case I40E_LINK_SPEED_20GB: 2440 index = 5; 2441 break; 2442 case I40E_LINK_SPEED_25GB: 2443 index = 6; 2444 break; 2445 case I40E_LINK_SPEED_2_5GB: 2446 index = 7; 2447 break; 2448 case I40E_LINK_SPEED_5GB: 2449 index = 8; 2450 break; 2451 case I40E_LINK_SPEED_UNKNOWN: 2452 default: 2453 index = 0; 2454 break; 2455 } 2456 2457 return (link_speed_str[index]); 2458 } 2459 2460 int 2461 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2462 { 2463 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2464 struct i40e_hw *hw = &pf->hw; 2465 int error = 0; 2466 2467 ixl_update_link_status(pf); 2468 2469 error = sysctl_handle_string(oidp, 2470 __DECONST(void *, 2471 ixl_link_speed_string(hw->phy.link_info.link_speed)), 2472 8, req); 2473 2474 return (error); 2475 } 2476 2477 /* 2478 * Converts 8-bit speeds value to and from sysctl flags and 2479 * Admin Queue flags. 2480 */ 2481 static u8 2482 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) 2483 { 2484 #define SPEED_MAP_SIZE 8 2485 static u16 speedmap[SPEED_MAP_SIZE] = { 2486 (I40E_LINK_SPEED_100MB | (0x1 << 8)), 2487 (I40E_LINK_SPEED_1GB | (0x2 << 8)), 2488 (I40E_LINK_SPEED_10GB | (0x4 << 8)), 2489 (I40E_LINK_SPEED_20GB | (0x8 << 8)), 2490 (I40E_LINK_SPEED_25GB | (0x10 << 8)), 2491 (I40E_LINK_SPEED_40GB | (0x20 << 8)), 2492 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)), 2493 (I40E_LINK_SPEED_5GB | (0x80 << 8)), 2494 }; 2495 u8 retval = 0; 2496 2497 for (int i = 0; i < SPEED_MAP_SIZE; i++) { 2498 if (to_aq) 2499 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; 2500 else 2501 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; 2502 } 2503 2504 return (retval); 2505 } 2506 2507 int 2508 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) 2509 { 2510 struct i40e_hw *hw = &pf->hw; 2511 device_t dev = pf->dev; 2512 struct i40e_aq_get_phy_abilities_resp abilities; 2513 struct i40e_aq_set_phy_config config; 2514 enum i40e_status_code aq_error = 0; 2515 2516 /* Get current capability information */ 2517 aq_error = i40e_aq_get_phy_capabilities(hw, 2518 FALSE, FALSE, &abilities, NULL); 2519 if (aq_error) { 2520 device_printf(dev, 2521 "%s: Error getting phy capabilities %d," 2522 " aq error: %d\n", __func__, aq_error, 2523 hw->aq.asq_last_status); 2524 return (EIO); 2525 } 2526 2527 /* Prepare new config */ 2528 bzero(&config, sizeof(config)); 2529 if (from_aq) 2530 config.link_speed = speeds; 2531 else 2532 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); 2533 config.phy_type = abilities.phy_type; 2534 config.phy_type_ext = abilities.phy_type_ext; 2535 config.abilities = abilities.abilities 2536 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 2537 config.eee_capability = abilities.eee_capability; 2538 config.eeer = abilities.eeer_val; 2539 config.low_power_ctrl = abilities.d3_lpan; 2540 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 2541 & I40E_AQ_PHY_FEC_CONFIG_MASK; 2542 2543 /* Do aq command & restart link */ 2544 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 2545 if (aq_error) { 2546 device_printf(dev, 2547 "%s: Error setting new phy config %d," 2548 " aq error: %d\n", __func__, aq_error, 2549 hw->aq.asq_last_status); 2550 return (EIO); 2551 } 2552 2553 return (0); 2554 } 2555 2556 /* 2557 ** Supported link speeds 2558 ** Flags: 2559 ** 0x1 - 100 Mb 2560 ** 0x2 - 1G 2561 ** 0x4 - 10G 2562 ** 0x8 - 20G 2563 ** 0x10 - 25G 2564 ** 0x20 - 40G 2565 ** 0x40 - 2.5G 2566 ** 0x80 - 5G 2567 */ 2568 static int 2569 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) 2570 { 2571 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2572 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 2573 2574 return sysctl_handle_int(oidp, NULL, supported, req); 2575 } 2576 2577 /* 2578 ** Control link advertise speed: 2579 ** Flags: 2580 ** 0x1 - advertise 100 Mb 2581 ** 0x2 - advertise 1G 2582 ** 0x4 - advertise 10G 2583 ** 0x8 - advertise 20G 2584 ** 0x10 - advertise 25G 2585 ** 0x20 - advertise 40G 2586 ** 0x40 - advertise 2.5G 2587 ** 0x80 - advertise 5G 2588 ** 2589 ** Set to 0 to disable link 2590 */ 2591 int 2592 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) 2593 { 2594 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2595 device_t dev = pf->dev; 2596 u8 converted_speeds; 2597 int requested_ls = 0; 2598 int error = 0; 2599 2600 /* Read in new mode */ 2601 requested_ls = pf->advertised_speed; 2602 error = sysctl_handle_int(oidp, &requested_ls, 0, req); 2603 if ((error) || (req->newptr == NULL)) 2604 return (error); 2605 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 2606 device_printf(dev, "Interface is currently in FW recovery mode. " 2607 "Setting advertise speed not supported\n"); 2608 return (EINVAL); 2609 } 2610 2611 /* Error out if bits outside of possible flag range are set */ 2612 if ((requested_ls & ~((u8)0xFF)) != 0) { 2613 device_printf(dev, "Input advertised speed out of range; " 2614 "valid flags are: 0x%02x\n", 2615 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2616 return (EINVAL); 2617 } 2618 2619 /* Check if adapter supports input value */ 2620 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); 2621 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { 2622 device_printf(dev, "Invalid advertised speed; " 2623 "valid flags are: 0x%02x\n", 2624 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2625 return (EINVAL); 2626 } 2627 2628 error = ixl_set_advertised_speeds(pf, requested_ls, false); 2629 if (error) 2630 return (error); 2631 2632 pf->advertised_speed = requested_ls; 2633 ixl_update_link_status(pf); 2634 return (0); 2635 } 2636 2637 /* 2638 * Input: bitmap of enum i40e_aq_link_speed 2639 */ 2640 u64 2641 ixl_max_aq_speed_to_value(u8 link_speeds) 2642 { 2643 if (link_speeds & I40E_LINK_SPEED_40GB) 2644 return IF_Gbps(40); 2645 if (link_speeds & I40E_LINK_SPEED_25GB) 2646 return IF_Gbps(25); 2647 if (link_speeds & I40E_LINK_SPEED_20GB) 2648 return IF_Gbps(20); 2649 if (link_speeds & I40E_LINK_SPEED_10GB) 2650 return IF_Gbps(10); 2651 if (link_speeds & I40E_LINK_SPEED_5GB) 2652 return IF_Gbps(5); 2653 if (link_speeds & I40E_LINK_SPEED_2_5GB) 2654 return IF_Mbps(2500); 2655 if (link_speeds & I40E_LINK_SPEED_1GB) 2656 return IF_Gbps(1); 2657 if (link_speeds & I40E_LINK_SPEED_100MB) 2658 return IF_Mbps(100); 2659 else 2660 /* Minimum supported link speed */ 2661 return IF_Mbps(100); 2662 } 2663 2664 /* 2665 ** Get the width and transaction speed of 2666 ** the bus this adapter is plugged into. 2667 */ 2668 void 2669 ixl_get_bus_info(struct ixl_pf *pf) 2670 { 2671 struct i40e_hw *hw = &pf->hw; 2672 device_t dev = pf->dev; 2673 u16 link; 2674 u32 offset, num_ports; 2675 u64 max_speed; 2676 2677 /* Some devices don't use PCIE */ 2678 if (hw->mac.type == I40E_MAC_X722) 2679 return; 2680 2681 /* Read PCI Express Capabilities Link Status Register */ 2682 pci_find_cap(dev, PCIY_EXPRESS, &offset); 2683 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2684 2685 /* Fill out hw struct with PCIE info */ 2686 i40e_set_pci_config_data(hw, link); 2687 2688 /* Use info to print out bandwidth messages */ 2689 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 2690 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": 2691 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": 2692 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), 2693 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : 2694 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : 2695 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : 2696 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : 2697 ("Unknown")); 2698 2699 /* 2700 * If adapter is in slot with maximum supported speed, 2701 * no warning message needs to be printed out. 2702 */ 2703 if (hw->bus.speed >= i40e_bus_speed_8000 2704 && hw->bus.width >= i40e_bus_width_pcie_x8) 2705 return; 2706 2707 num_ports = bitcount32(hw->func_caps.valid_functions); 2708 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; 2709 2710 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { 2711 device_printf(dev, "PCI-Express bandwidth available" 2712 " for this device may be insufficient for" 2713 " optimal performance.\n"); 2714 device_printf(dev, "Please move the device to a different" 2715 " PCI-e link with more lanes and/or higher" 2716 " transfer rate.\n"); 2717 } 2718 } 2719 2720 static int 2721 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 2722 { 2723 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2724 struct i40e_hw *hw = &pf->hw; 2725 struct sbuf *sbuf; 2726 2727 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2728 ixl_nvm_version_str(hw, sbuf); 2729 sbuf_finish(sbuf); 2730 sbuf_delete(sbuf); 2731 2732 return (0); 2733 } 2734 2735 void 2736 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) 2737 { 2738 u8 nvma_ptr = nvma->config & 0xFF; 2739 u8 nvma_flags = (nvma->config & 0xF00) >> 8; 2740 const char * cmd_str; 2741 2742 switch (nvma->command) { 2743 case I40E_NVM_READ: 2744 if (nvma_ptr == 0xF && nvma_flags == 0xF && 2745 nvma->offset == 0 && nvma->data_size == 1) { 2746 device_printf(dev, "NVMUPD: Get Driver Status Command\n"); 2747 return; 2748 } 2749 cmd_str = "READ "; 2750 break; 2751 case I40E_NVM_WRITE: 2752 cmd_str = "WRITE"; 2753 break; 2754 default: 2755 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command); 2756 return; 2757 } 2758 device_printf(dev, 2759 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n", 2760 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size); 2761 } 2762 2763 int 2764 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) 2765 { 2766 struct i40e_hw *hw = &pf->hw; 2767 struct i40e_nvm_access *nvma; 2768 device_t dev = pf->dev; 2769 enum i40e_status_code status = 0; 2770 size_t nvma_size, ifd_len, exp_len; 2771 int err, perrno; 2772 2773 DEBUGFUNC("ixl_handle_nvmupd_cmd"); 2774 2775 /* Sanity checks */ 2776 nvma_size = sizeof(struct i40e_nvm_access); 2777 ifd_len = ifd->ifd_len; 2778 2779 if (ifd_len < nvma_size || 2780 ifd->ifd_data == NULL) { 2781 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", 2782 __func__); 2783 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", 2784 __func__, ifd_len, nvma_size); 2785 device_printf(dev, "%s: data pointer: %p\n", __func__, 2786 ifd->ifd_data); 2787 return (EINVAL); 2788 } 2789 2790 nvma = malloc(ifd_len, M_IXL, M_WAITOK); 2791 err = copyin(ifd->ifd_data, nvma, ifd_len); 2792 if (err) { 2793 device_printf(dev, "%s: Cannot get request from user space\n", 2794 __func__); 2795 free(nvma, M_IXL); 2796 return (err); 2797 } 2798 2799 if (pf->dbg_mask & IXL_DBG_NVMUPD) 2800 ixl_print_nvm_cmd(dev, nvma); 2801 2802 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) { 2803 int count = 0; 2804 while (count++ < 100) { 2805 i40e_msec_delay(100); 2806 if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING)) 2807 break; 2808 } 2809 } 2810 2811 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) { 2812 device_printf(dev, 2813 "%s: timeout waiting for EMP reset to finish\n", 2814 __func__); 2815 free(nvma, M_IXL); 2816 return (-EBUSY); 2817 } 2818 2819 if (nvma->data_size < 1 || nvma->data_size > 4096) { 2820 device_printf(dev, 2821 "%s: invalid request, data size not in supported range\n", 2822 __func__); 2823 free(nvma, M_IXL); 2824 return (EINVAL); 2825 } 2826 2827 /* 2828 * Older versions of the NVM update tool don't set ifd_len to the size 2829 * of the entire buffer passed to the ioctl. Check the data_size field 2830 * in the contained i40e_nvm_access struct and ensure everything is 2831 * copied in from userspace. 2832 */ 2833 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */ 2834 2835 if (ifd_len < exp_len) { 2836 ifd_len = exp_len; 2837 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK); 2838 err = copyin(ifd->ifd_data, nvma, ifd_len); 2839 if (err) { 2840 device_printf(dev, "%s: Cannot get request from user space\n", 2841 __func__); 2842 free(nvma, M_IXL); 2843 return (err); 2844 } 2845 } 2846 2847 // TODO: Might need a different lock here 2848 // IXL_PF_LOCK(pf); 2849 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); 2850 // IXL_PF_UNLOCK(pf); 2851 2852 err = copyout(nvma, ifd->ifd_data, ifd_len); 2853 free(nvma, M_IXL); 2854 if (err) { 2855 device_printf(dev, "%s: Cannot return data to user space\n", 2856 __func__); 2857 return (err); 2858 } 2859 2860 /* Let the nvmupdate report errors, show them only when debug is enabled */ 2861 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) 2862 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", 2863 i40e_stat_str(hw, status), perrno); 2864 2865 /* 2866 * -EPERM is actually ERESTART, which the kernel interprets as it needing 2867 * to run this ioctl again. So use -EACCES for -EPERM instead. 2868 */ 2869 if (perrno == -EPERM) 2870 return (-EACCES); 2871 else 2872 return (perrno); 2873 } 2874 2875 int 2876 ixl_find_i2c_interface(struct ixl_pf *pf) 2877 { 2878 struct i40e_hw *hw = &pf->hw; 2879 bool i2c_en, port_matched; 2880 u32 reg; 2881 2882 for (int i = 0; i < 4; i++) { 2883 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); 2884 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); 2885 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) 2886 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) 2887 & BIT(hw->port); 2888 if (i2c_en && port_matched) 2889 return (i); 2890 } 2891 2892 return (-1); 2893 } 2894 2895 static char * 2896 ixl_phy_type_string(u32 bit_pos, bool ext) 2897 { 2898 static char * phy_types_str[32] = { 2899 "SGMII", 2900 "1000BASE-KX", 2901 "10GBASE-KX4", 2902 "10GBASE-KR", 2903 "40GBASE-KR4", 2904 "XAUI", 2905 "XFI", 2906 "SFI", 2907 "XLAUI", 2908 "XLPPI", 2909 "40GBASE-CR4", 2910 "10GBASE-CR1", 2911 "SFP+ Active DA", 2912 "QSFP+ Active DA", 2913 "Reserved (14)", 2914 "Reserved (15)", 2915 "Reserved (16)", 2916 "100BASE-TX", 2917 "1000BASE-T", 2918 "10GBASE-T", 2919 "10GBASE-SR", 2920 "10GBASE-LR", 2921 "10GBASE-SFP+Cu", 2922 "10GBASE-CR1", 2923 "40GBASE-CR4", 2924 "40GBASE-SR4", 2925 "40GBASE-LR4", 2926 "1000BASE-SX", 2927 "1000BASE-LX", 2928 "1000BASE-T Optical", 2929 "20GBASE-KR2", 2930 "Reserved (31)" 2931 }; 2932 static char * ext_phy_types_str[8] = { 2933 "25GBASE-KR", 2934 "25GBASE-CR", 2935 "25GBASE-SR", 2936 "25GBASE-LR", 2937 "25GBASE-AOC", 2938 "25GBASE-ACC", 2939 "2.5GBASE-T", 2940 "5GBASE-T" 2941 }; 2942 2943 if (ext && bit_pos > 7) return "Invalid_Ext"; 2944 if (bit_pos > 31) return "Invalid"; 2945 2946 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; 2947 } 2948 2949 /* TODO: ERJ: I don't this is necessary anymore. */ 2950 int 2951 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) 2952 { 2953 device_t dev = pf->dev; 2954 struct i40e_hw *hw = &pf->hw; 2955 struct i40e_aq_desc desc; 2956 enum i40e_status_code status; 2957 2958 struct i40e_aqc_get_link_status *aq_link_status = 2959 (struct i40e_aqc_get_link_status *)&desc.params.raw; 2960 2961 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 2962 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); 2963 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 2964 if (status) { 2965 device_printf(dev, 2966 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", 2967 __func__, i40e_stat_str(hw, status), 2968 i40e_aq_str(hw, hw->aq.asq_last_status)); 2969 return (EIO); 2970 } 2971 2972 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); 2973 return (0); 2974 } 2975 2976 static char * 2977 ixl_phy_type_string_ls(u8 val) 2978 { 2979 if (val >= 0x1F) 2980 return ixl_phy_type_string(val - 0x1F, true); 2981 else 2982 return ixl_phy_type_string(val, false); 2983 } 2984 2985 static int 2986 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) 2987 { 2988 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2989 device_t dev = pf->dev; 2990 struct sbuf *buf; 2991 int error = 0; 2992 2993 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2994 if (!buf) { 2995 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 2996 return (ENOMEM); 2997 } 2998 2999 struct i40e_aqc_get_link_status link_status; 3000 error = ixl_aq_get_link_status(pf, &link_status); 3001 if (error) { 3002 sbuf_delete(buf); 3003 return (error); 3004 } 3005 3006 sbuf_printf(buf, "\n" 3007 "PHY Type : 0x%02x<%s>\n" 3008 "Speed : 0x%02x\n" 3009 "Link info: 0x%02x\n" 3010 "AN info : 0x%02x\n" 3011 "Ext info : 0x%02x\n" 3012 "Loopback : 0x%02x\n" 3013 "Max Frame: %d\n" 3014 "Config : 0x%02x\n" 3015 "Power : 0x%02x", 3016 link_status.phy_type, 3017 ixl_phy_type_string_ls(link_status.phy_type), 3018 link_status.link_speed, 3019 link_status.link_info, 3020 link_status.an_info, 3021 link_status.ext_info, 3022 link_status.loopback, 3023 link_status.max_frame_size, 3024 link_status.config, 3025 link_status.power_desc); 3026 3027 error = sbuf_finish(buf); 3028 if (error) 3029 device_printf(dev, "Error finishing sbuf: %d\n", error); 3030 3031 sbuf_delete(buf); 3032 return (error); 3033 } 3034 3035 static int 3036 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) 3037 { 3038 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3039 struct i40e_hw *hw = &pf->hw; 3040 device_t dev = pf->dev; 3041 enum i40e_status_code status; 3042 struct i40e_aq_get_phy_abilities_resp abilities; 3043 struct sbuf *buf; 3044 int error = 0; 3045 3046 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3047 if (!buf) { 3048 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3049 return (ENOMEM); 3050 } 3051 3052 status = i40e_aq_get_phy_capabilities(hw, 3053 FALSE, FALSE, &abilities, NULL); 3054 if (status) { 3055 device_printf(dev, 3056 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 3057 __func__, i40e_stat_str(hw, status), 3058 i40e_aq_str(hw, hw->aq.asq_last_status)); 3059 sbuf_delete(buf); 3060 return (EIO); 3061 } 3062 3063 sbuf_printf(buf, "\n" 3064 "PHY Type : %08x", 3065 abilities.phy_type); 3066 3067 if (abilities.phy_type != 0) { 3068 sbuf_printf(buf, "<"); 3069 for (int i = 0; i < 32; i++) 3070 if ((1 << i) & abilities.phy_type) 3071 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); 3072 sbuf_printf(buf, ">"); 3073 } 3074 3075 sbuf_printf(buf, "\nPHY Ext : %02x", 3076 abilities.phy_type_ext); 3077 3078 if (abilities.phy_type_ext != 0) { 3079 sbuf_printf(buf, "<"); 3080 for (int i = 0; i < 4; i++) 3081 if ((1 << i) & abilities.phy_type_ext) 3082 sbuf_printf(buf, "%s,", 3083 ixl_phy_type_string(i, true)); 3084 sbuf_printf(buf, ">"); 3085 } 3086 3087 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed); 3088 if (abilities.link_speed != 0) { 3089 u8 link_speed; 3090 sbuf_printf(buf, " <"); 3091 for (int i = 0; i < 8; i++) { 3092 link_speed = (1 << i) & abilities.link_speed; 3093 if (link_speed) 3094 sbuf_printf(buf, "%s, ", 3095 ixl_link_speed_string(link_speed)); 3096 } 3097 sbuf_printf(buf, ">"); 3098 } 3099 3100 sbuf_printf(buf, "\n" 3101 "Abilities: %02x\n" 3102 "EEE cap : %04x\n" 3103 "EEER reg : %08x\n" 3104 "D3 Lpan : %02x\n" 3105 "ID : %02x %02x %02x %02x\n" 3106 "ModType : %02x %02x %02x\n" 3107 "ModType E: %01x\n" 3108 "FEC Cfg : %02x\n" 3109 "Ext CC : %02x", 3110 abilities.abilities, abilities.eee_capability, 3111 abilities.eeer_val, abilities.d3_lpan, 3112 abilities.phy_id[0], abilities.phy_id[1], 3113 abilities.phy_id[2], abilities.phy_id[3], 3114 abilities.module_type[0], abilities.module_type[1], 3115 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, 3116 abilities.fec_cfg_curr_mod_ext_info & 0x1F, 3117 abilities.ext_comp_code); 3118 3119 error = sbuf_finish(buf); 3120 if (error) 3121 device_printf(dev, "Error finishing sbuf: %d\n", error); 3122 3123 sbuf_delete(buf); 3124 return (error); 3125 } 3126 3127 static int 3128 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) 3129 { 3130 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3131 struct ixl_vsi *vsi = &pf->vsi; 3132 struct ixl_mac_filter *f; 3133 device_t dev = pf->dev; 3134 int error = 0, ftl_len = 0, ftl_counter = 0; 3135 3136 struct sbuf *buf; 3137 3138 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3139 if (!buf) { 3140 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3141 return (ENOMEM); 3142 } 3143 3144 sbuf_printf(buf, "\n"); 3145 3146 /* Print MAC filters */ 3147 sbuf_printf(buf, "PF Filters:\n"); 3148 SLIST_FOREACH(f, &vsi->ftl, next) 3149 ftl_len++; 3150 3151 if (ftl_len < 1) 3152 sbuf_printf(buf, "(none)\n"); 3153 else { 3154 SLIST_FOREACH(f, &vsi->ftl, next) { 3155 sbuf_printf(buf, 3156 MAC_FORMAT ", vlan %4d, flags %#06x", 3157 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3158 /* don't print '\n' for last entry */ 3159 if (++ftl_counter != ftl_len) 3160 sbuf_printf(buf, "\n"); 3161 } 3162 } 3163 3164 #ifdef PCI_IOV 3165 /* TODO: Give each VF its own filter list sysctl */ 3166 struct ixl_vf *vf; 3167 if (pf->num_vfs > 0) { 3168 sbuf_printf(buf, "\n\n"); 3169 for (int i = 0; i < pf->num_vfs; i++) { 3170 vf = &pf->vfs[i]; 3171 if (!(vf->vf_flags & VF_FLAG_ENABLED)) 3172 continue; 3173 3174 vsi = &vf->vsi; 3175 ftl_len = 0, ftl_counter = 0; 3176 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num); 3177 SLIST_FOREACH(f, &vsi->ftl, next) 3178 ftl_len++; 3179 3180 if (ftl_len < 1) 3181 sbuf_printf(buf, "(none)\n"); 3182 else { 3183 SLIST_FOREACH(f, &vsi->ftl, next) { 3184 sbuf_printf(buf, 3185 MAC_FORMAT ", vlan %4d, flags %#06x\n", 3186 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3187 } 3188 } 3189 } 3190 } 3191 #endif 3192 3193 error = sbuf_finish(buf); 3194 if (error) 3195 device_printf(dev, "Error finishing sbuf: %d\n", error); 3196 sbuf_delete(buf); 3197 3198 return (error); 3199 } 3200 3201 #define IXL_SW_RES_SIZE 0x14 3202 int 3203 ixl_res_alloc_cmp(const void *a, const void *b) 3204 { 3205 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; 3206 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; 3207 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; 3208 3209 return ((int)one->resource_type - (int)two->resource_type); 3210 } 3211 3212 /* 3213 * Longest string length: 25 3214 */ 3215 const char * 3216 ixl_switch_res_type_string(u8 type) 3217 { 3218 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = { 3219 "VEB", 3220 "VSI", 3221 "Perfect Match MAC address", 3222 "S-tag", 3223 "(Reserved)", 3224 "Multicast hash entry", 3225 "Unicast hash entry", 3226 "VLAN", 3227 "VSI List entry", 3228 "(Reserved)", 3229 "VLAN Statistic Pool", 3230 "Mirror Rule", 3231 "Queue Set", 3232 "Inner VLAN Forward filter", 3233 "(Reserved)", 3234 "Inner MAC", 3235 "IP", 3236 "GRE/VN1 Key", 3237 "VN2 Key", 3238 "Tunneling Port" 3239 }; 3240 3241 if (type < IXL_SW_RES_SIZE) 3242 return ixl_switch_res_type_strings[type]; 3243 else 3244 return "(Reserved)"; 3245 } 3246 3247 static int 3248 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) 3249 { 3250 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3251 struct i40e_hw *hw = &pf->hw; 3252 device_t dev = pf->dev; 3253 struct sbuf *buf; 3254 enum i40e_status_code status; 3255 int error = 0; 3256 3257 u8 num_entries; 3258 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; 3259 3260 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3261 if (!buf) { 3262 device_printf(dev, "Could not allocate sbuf for output.\n"); 3263 return (ENOMEM); 3264 } 3265 3266 bzero(resp, sizeof(resp)); 3267 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, 3268 resp, 3269 IXL_SW_RES_SIZE, 3270 NULL); 3271 if (status) { 3272 device_printf(dev, 3273 "%s: get_switch_resource_alloc() error %s, aq error %s\n", 3274 __func__, i40e_stat_str(hw, status), 3275 i40e_aq_str(hw, hw->aq.asq_last_status)); 3276 sbuf_delete(buf); 3277 return (error); 3278 } 3279 3280 /* Sort entries by type for display */ 3281 qsort(resp, num_entries, 3282 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), 3283 &ixl_res_alloc_cmp); 3284 3285 sbuf_cat(buf, "\n"); 3286 sbuf_printf(buf, "# of entries: %d\n", num_entries); 3287 sbuf_printf(buf, 3288 " Type | Guaranteed | Total | Used | Un-allocated\n" 3289 " | (this) | (all) | (this) | (all) \n"); 3290 for (int i = 0; i < num_entries; i++) { 3291 sbuf_printf(buf, 3292 "%25s | %10d %5d %6d %12d", 3293 ixl_switch_res_type_string(resp[i].resource_type), 3294 resp[i].guaranteed, 3295 resp[i].total, 3296 resp[i].used, 3297 resp[i].total_unalloced); 3298 if (i < num_entries - 1) 3299 sbuf_cat(buf, "\n"); 3300 } 3301 3302 error = sbuf_finish(buf); 3303 if (error) 3304 device_printf(dev, "Error finishing sbuf: %d\n", error); 3305 3306 sbuf_delete(buf); 3307 return (error); 3308 } 3309 3310 enum ixl_sw_seid_offset { 3311 IXL_SW_SEID_EMP = 1, 3312 IXL_SW_SEID_MAC_START = 2, 3313 IXL_SW_SEID_MAC_END = 5, 3314 IXL_SW_SEID_PF_START = 16, 3315 IXL_SW_SEID_PF_END = 31, 3316 IXL_SW_SEID_VF_START = 32, 3317 IXL_SW_SEID_VF_END = 159, 3318 }; 3319 3320 /* 3321 * Caller must init and delete sbuf; this function will clear and 3322 * finish it for caller. 3323 * 3324 * Note: The SEID argument only applies for elements defined by FW at 3325 * power-on; these include the EMP, Ports, PFs and VFs. 3326 */ 3327 static char * 3328 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid) 3329 { 3330 sbuf_clear(s); 3331 3332 /* If SEID is in certain ranges, then we can infer the 3333 * mapping of SEID to switch element. 3334 */ 3335 if (seid == IXL_SW_SEID_EMP) { 3336 sbuf_cat(s, "EMP"); 3337 goto out; 3338 } else if (seid >= IXL_SW_SEID_MAC_START && 3339 seid <= IXL_SW_SEID_MAC_END) { 3340 sbuf_printf(s, "MAC %2d", 3341 seid - IXL_SW_SEID_MAC_START); 3342 goto out; 3343 } else if (seid >= IXL_SW_SEID_PF_START && 3344 seid <= IXL_SW_SEID_PF_END) { 3345 sbuf_printf(s, "PF %3d", 3346 seid - IXL_SW_SEID_PF_START); 3347 goto out; 3348 } else if (seid >= IXL_SW_SEID_VF_START && 3349 seid <= IXL_SW_SEID_VF_END) { 3350 sbuf_printf(s, "VF %3d", 3351 seid - IXL_SW_SEID_VF_START); 3352 goto out; 3353 } 3354 3355 switch (element_type) { 3356 case I40E_AQ_SW_ELEM_TYPE_BMC: 3357 sbuf_cat(s, "BMC"); 3358 break; 3359 case I40E_AQ_SW_ELEM_TYPE_PV: 3360 sbuf_cat(s, "PV"); 3361 break; 3362 case I40E_AQ_SW_ELEM_TYPE_VEB: 3363 sbuf_cat(s, "VEB"); 3364 break; 3365 case I40E_AQ_SW_ELEM_TYPE_PA: 3366 sbuf_cat(s, "PA"); 3367 break; 3368 case I40E_AQ_SW_ELEM_TYPE_VSI: 3369 sbuf_printf(s, "VSI"); 3370 break; 3371 default: 3372 sbuf_cat(s, "?"); 3373 break; 3374 } 3375 3376 out: 3377 sbuf_finish(s); 3378 return sbuf_data(s); 3379 } 3380 3381 static int 3382 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b) 3383 { 3384 const struct i40e_aqc_switch_config_element_resp *one, *two; 3385 one = (const struct i40e_aqc_switch_config_element_resp *)a; 3386 two = (const struct i40e_aqc_switch_config_element_resp *)b; 3387 3388 return ((int)one->seid - (int)two->seid); 3389 } 3390 3391 static int 3392 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) 3393 { 3394 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3395 struct i40e_hw *hw = &pf->hw; 3396 device_t dev = pf->dev; 3397 struct sbuf *buf; 3398 struct sbuf *nmbuf; 3399 enum i40e_status_code status; 3400 int error = 0; 3401 u16 next = 0; 3402 u8 aq_buf[I40E_AQ_LARGE_BUF]; 3403 3404 struct i40e_aqc_switch_config_element_resp *elem; 3405 struct i40e_aqc_get_switch_config_resp *sw_config; 3406 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 3407 3408 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3409 if (!buf) { 3410 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3411 return (ENOMEM); 3412 } 3413 3414 status = i40e_aq_get_switch_config(hw, sw_config, 3415 sizeof(aq_buf), &next, NULL); 3416 if (status) { 3417 device_printf(dev, 3418 "%s: aq_get_switch_config() error %s, aq error %s\n", 3419 __func__, i40e_stat_str(hw, status), 3420 i40e_aq_str(hw, hw->aq.asq_last_status)); 3421 sbuf_delete(buf); 3422 return error; 3423 } 3424 if (next) 3425 device_printf(dev, "%s: TODO: get more config with SEID %d\n", 3426 __func__, next); 3427 3428 nmbuf = sbuf_new_auto(); 3429 if (!nmbuf) { 3430 device_printf(dev, "Could not allocate sbuf for name output.\n"); 3431 sbuf_delete(buf); 3432 return (ENOMEM); 3433 } 3434 3435 /* Sort entries by SEID for display */ 3436 qsort(sw_config->element, sw_config->header.num_reported, 3437 sizeof(struct i40e_aqc_switch_config_element_resp), 3438 &ixl_sw_cfg_elem_seid_cmp); 3439 3440 sbuf_cat(buf, "\n"); 3441 /* Assuming <= 255 elements in switch */ 3442 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); 3443 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); 3444 /* Exclude: 3445 * Revision -- all elements are revision 1 for now 3446 */ 3447 sbuf_printf(buf, 3448 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n" 3449 " | | | (uplink)\n"); 3450 for (int i = 0; i < sw_config->header.num_reported; i++) { 3451 elem = &sw_config->element[i]; 3452 3453 // "%4d (%8s) | %8s %8s %#8x", 3454 sbuf_printf(buf, "%4d", elem->seid); 3455 sbuf_cat(buf, " "); 3456 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3457 elem->element_type, elem->seid)); 3458 sbuf_cat(buf, " | "); 3459 sbuf_printf(buf, "%4d", elem->uplink_seid); 3460 sbuf_cat(buf, " "); 3461 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3462 0, elem->uplink_seid)); 3463 sbuf_cat(buf, " | "); 3464 sbuf_printf(buf, "%4d", elem->downlink_seid); 3465 sbuf_cat(buf, " "); 3466 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3467 0, elem->downlink_seid)); 3468 sbuf_cat(buf, " | "); 3469 sbuf_printf(buf, "%8d", elem->connection_type); 3470 if (i < sw_config->header.num_reported - 1) 3471 sbuf_cat(buf, "\n"); 3472 } 3473 sbuf_delete(nmbuf); 3474 3475 error = sbuf_finish(buf); 3476 if (error) 3477 device_printf(dev, "Error finishing sbuf: %d\n", error); 3478 3479 sbuf_delete(buf); 3480 3481 return (error); 3482 } 3483 3484 static int 3485 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) 3486 { 3487 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3488 struct i40e_hw *hw = &pf->hw; 3489 device_t dev = pf->dev; 3490 struct sbuf *buf; 3491 int error = 0; 3492 enum i40e_status_code status; 3493 u32 reg; 3494 3495 struct i40e_aqc_get_set_rss_key_data key_data; 3496 3497 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3498 if (!buf) { 3499 device_printf(dev, "Could not allocate sbuf for output.\n"); 3500 return (ENOMEM); 3501 } 3502 3503 bzero(&key_data, sizeof(key_data)); 3504 3505 sbuf_cat(buf, "\n"); 3506 if (hw->mac.type == I40E_MAC_X722) { 3507 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); 3508 if (status) 3509 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", 3510 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3511 } else { 3512 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 3513 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 3514 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); 3515 } 3516 } 3517 3518 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); 3519 3520 error = sbuf_finish(buf); 3521 if (error) 3522 device_printf(dev, "Error finishing sbuf: %d\n", error); 3523 sbuf_delete(buf); 3524 3525 return (error); 3526 } 3527 3528 static void 3529 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) 3530 { 3531 int i, j, k, width; 3532 char c; 3533 3534 if (length < 1 || buf == NULL) return; 3535 3536 int byte_stride = 16; 3537 int lines = length / byte_stride; 3538 int rem = length % byte_stride; 3539 if (rem > 0) 3540 lines++; 3541 3542 for (i = 0; i < lines; i++) { 3543 width = (rem > 0 && i == lines - 1) 3544 ? rem : byte_stride; 3545 3546 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); 3547 3548 for (j = 0; j < width; j++) 3549 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); 3550 3551 if (width < byte_stride) { 3552 for (k = 0; k < (byte_stride - width); k++) 3553 sbuf_printf(sb, " "); 3554 } 3555 3556 if (!text) { 3557 sbuf_printf(sb, "\n"); 3558 continue; 3559 } 3560 3561 for (j = 0; j < width; j++) { 3562 c = (char)buf[i * byte_stride + j]; 3563 if (c < 32 || c > 126) 3564 sbuf_printf(sb, "."); 3565 else 3566 sbuf_printf(sb, "%c", c); 3567 3568 if (j == width - 1) 3569 sbuf_printf(sb, "\n"); 3570 } 3571 } 3572 } 3573 3574 static int 3575 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) 3576 { 3577 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3578 struct i40e_hw *hw = &pf->hw; 3579 device_t dev = pf->dev; 3580 struct sbuf *buf; 3581 int error = 0; 3582 enum i40e_status_code status; 3583 u8 hlut[512]; 3584 u32 reg; 3585 3586 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3587 if (!buf) { 3588 device_printf(dev, "Could not allocate sbuf for output.\n"); 3589 return (ENOMEM); 3590 } 3591 3592 bzero(hlut, sizeof(hlut)); 3593 sbuf_cat(buf, "\n"); 3594 if (hw->mac.type == I40E_MAC_X722) { 3595 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); 3596 if (status) 3597 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", 3598 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3599 } else { 3600 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { 3601 reg = rd32(hw, I40E_PFQF_HLUT(i)); 3602 bcopy(®, &hlut[i << 2], 4); 3603 } 3604 } 3605 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); 3606 3607 error = sbuf_finish(buf); 3608 if (error) 3609 device_printf(dev, "Error finishing sbuf: %d\n", error); 3610 sbuf_delete(buf); 3611 3612 return (error); 3613 } 3614 3615 static int 3616 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) 3617 { 3618 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3619 struct i40e_hw *hw = &pf->hw; 3620 u64 hena; 3621 3622 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 3623 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 3624 3625 return sysctl_handle_long(oidp, NULL, hena, req); 3626 } 3627 3628 /* 3629 * Sysctl to disable firmware's link management 3630 * 3631 * 1 - Disable link management on this port 3632 * 0 - Re-enable link management 3633 * 3634 * On normal NVMs, firmware manages link by default. 3635 */ 3636 static int 3637 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) 3638 { 3639 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3640 struct i40e_hw *hw = &pf->hw; 3641 device_t dev = pf->dev; 3642 int requested_mode = -1; 3643 enum i40e_status_code status = 0; 3644 int error = 0; 3645 3646 /* Read in new mode */ 3647 error = sysctl_handle_int(oidp, &requested_mode, 0, req); 3648 if ((error) || (req->newptr == NULL)) 3649 return (error); 3650 /* Check for sane value */ 3651 if (requested_mode < 0 || requested_mode > 1) { 3652 device_printf(dev, "Valid modes are 0 or 1\n"); 3653 return (EINVAL); 3654 } 3655 3656 /* Set new mode */ 3657 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); 3658 if (status) { 3659 device_printf(dev, 3660 "%s: Error setting new phy debug mode %s," 3661 " aq error: %s\n", __func__, i40e_stat_str(hw, status), 3662 i40e_aq_str(hw, hw->aq.asq_last_status)); 3663 return (EIO); 3664 } 3665 3666 return (0); 3667 } 3668 3669 /* 3670 * Read some diagnostic data from a (Q)SFP+ module 3671 * 3672 * SFP A2 QSFP Lower Page 3673 * Temperature 96-97 22-23 3674 * Vcc 98-99 26-27 3675 * TX power 102-103 34-35..40-41 3676 * RX power 104-105 50-51..56-57 3677 */ 3678 static int 3679 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 3680 { 3681 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3682 device_t dev = pf->dev; 3683 struct sbuf *sbuf; 3684 int error = 0; 3685 u8 output; 3686 3687 if (req->oldptr == NULL) { 3688 error = SYSCTL_OUT(req, 0, 128); 3689 return (0); 3690 } 3691 3692 error = pf->read_i2c_byte(pf, 0, 0xA0, &output); 3693 if (error) { 3694 device_printf(dev, "Error reading from i2c\n"); 3695 return (error); 3696 } 3697 3698 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 3699 if (output == 0x3) { 3700 /* 3701 * Check for: 3702 * - Internally calibrated data 3703 * - Diagnostic monitoring is implemented 3704 */ 3705 pf->read_i2c_byte(pf, 92, 0xA0, &output); 3706 if (!(output & 0x60)) { 3707 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); 3708 return (0); 3709 } 3710 3711 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3712 3713 for (u8 offset = 96; offset < 100; offset++) { 3714 pf->read_i2c_byte(pf, offset, 0xA2, &output); 3715 sbuf_printf(sbuf, "%02X ", output); 3716 } 3717 for (u8 offset = 102; offset < 106; offset++) { 3718 pf->read_i2c_byte(pf, offset, 0xA2, &output); 3719 sbuf_printf(sbuf, "%02X ", output); 3720 } 3721 } else if (output == 0xD || output == 0x11) { 3722 /* 3723 * QSFP+ modules are always internally calibrated, and must indicate 3724 * what types of diagnostic monitoring are implemented 3725 */ 3726 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3727 3728 for (u8 offset = 22; offset < 24; offset++) { 3729 pf->read_i2c_byte(pf, offset, 0xA0, &output); 3730 sbuf_printf(sbuf, "%02X ", output); 3731 } 3732 for (u8 offset = 26; offset < 28; offset++) { 3733 pf->read_i2c_byte(pf, offset, 0xA0, &output); 3734 sbuf_printf(sbuf, "%02X ", output); 3735 } 3736 /* Read the data from the first lane */ 3737 for (u8 offset = 34; offset < 36; offset++) { 3738 pf->read_i2c_byte(pf, offset, 0xA0, &output); 3739 sbuf_printf(sbuf, "%02X ", output); 3740 } 3741 for (u8 offset = 50; offset < 52; offset++) { 3742 pf->read_i2c_byte(pf, offset, 0xA0, &output); 3743 sbuf_printf(sbuf, "%02X ", output); 3744 } 3745 } else { 3746 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output); 3747 return (0); 3748 } 3749 3750 sbuf_finish(sbuf); 3751 sbuf_delete(sbuf); 3752 3753 return (0); 3754 } 3755 3756 /* 3757 * Sysctl to read a byte from I2C bus. 3758 * 3759 * Input: 32-bit value: 3760 * bits 0-7: device address (0xA0 or 0xA2) 3761 * bits 8-15: offset (0-255) 3762 * bits 16-31: unused 3763 * Output: 8-bit value read 3764 */ 3765 static int 3766 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) 3767 { 3768 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3769 device_t dev = pf->dev; 3770 int input = -1, error = 0; 3771 u8 dev_addr, offset, output; 3772 3773 /* Read in I2C read parameters */ 3774 error = sysctl_handle_int(oidp, &input, 0, req); 3775 if ((error) || (req->newptr == NULL)) 3776 return (error); 3777 /* Validate device address */ 3778 dev_addr = input & 0xFF; 3779 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 3780 return (EINVAL); 3781 } 3782 offset = (input >> 8) & 0xFF; 3783 3784 error = pf->read_i2c_byte(pf, offset, dev_addr, &output); 3785 if (error) 3786 return (error); 3787 3788 device_printf(dev, "%02X\n", output); 3789 return (0); 3790 } 3791 3792 /* 3793 * Sysctl to write a byte to the I2C bus. 3794 * 3795 * Input: 32-bit value: 3796 * bits 0-7: device address (0xA0 or 0xA2) 3797 * bits 8-15: offset (0-255) 3798 * bits 16-23: value to write 3799 * bits 24-31: unused 3800 * Output: 8-bit value written 3801 */ 3802 static int 3803 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) 3804 { 3805 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3806 device_t dev = pf->dev; 3807 int input = -1, error = 0; 3808 u8 dev_addr, offset, value; 3809 3810 /* Read in I2C write parameters */ 3811 error = sysctl_handle_int(oidp, &input, 0, req); 3812 if ((error) || (req->newptr == NULL)) 3813 return (error); 3814 /* Validate device address */ 3815 dev_addr = input & 0xFF; 3816 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 3817 return (EINVAL); 3818 } 3819 offset = (input >> 8) & 0xFF; 3820 value = (input >> 16) & 0xFF; 3821 3822 error = pf->write_i2c_byte(pf, offset, dev_addr, value); 3823 if (error) 3824 return (error); 3825 3826 device_printf(dev, "%02X written\n", value); 3827 return (0); 3828 } 3829 3830 static int 3831 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 3832 u8 bit_pos, int *is_set) 3833 { 3834 device_t dev = pf->dev; 3835 struct i40e_hw *hw = &pf->hw; 3836 enum i40e_status_code status; 3837 3838 if (IXL_PF_IN_RECOVERY_MODE(pf)) 3839 return (EIO); 3840 3841 status = i40e_aq_get_phy_capabilities(hw, 3842 FALSE, FALSE, abilities, NULL); 3843 if (status) { 3844 device_printf(dev, 3845 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 3846 __func__, i40e_stat_str(hw, status), 3847 i40e_aq_str(hw, hw->aq.asq_last_status)); 3848 return (EIO); 3849 } 3850 3851 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); 3852 return (0); 3853 } 3854 3855 static int 3856 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 3857 u8 bit_pos, int set) 3858 { 3859 device_t dev = pf->dev; 3860 struct i40e_hw *hw = &pf->hw; 3861 struct i40e_aq_set_phy_config config; 3862 enum i40e_status_code status; 3863 3864 /* Set new PHY config */ 3865 memset(&config, 0, sizeof(config)); 3866 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); 3867 if (set) 3868 config.fec_config |= bit_pos; 3869 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { 3870 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 3871 config.phy_type = abilities->phy_type; 3872 config.phy_type_ext = abilities->phy_type_ext; 3873 config.link_speed = abilities->link_speed; 3874 config.eee_capability = abilities->eee_capability; 3875 config.eeer = abilities->eeer_val; 3876 config.low_power_ctrl = abilities->d3_lpan; 3877 status = i40e_aq_set_phy_config(hw, &config, NULL); 3878 3879 if (status) { 3880 device_printf(dev, 3881 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", 3882 __func__, i40e_stat_str(hw, status), 3883 i40e_aq_str(hw, hw->aq.asq_last_status)); 3884 return (EIO); 3885 } 3886 } 3887 3888 return (0); 3889 } 3890 3891 static int 3892 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) 3893 { 3894 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3895 int mode, error = 0; 3896 3897 struct i40e_aq_get_phy_abilities_resp abilities; 3898 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); 3899 if (error) 3900 return (error); 3901 /* Read in new mode */ 3902 error = sysctl_handle_int(oidp, &mode, 0, req); 3903 if ((error) || (req->newptr == NULL)) 3904 return (error); 3905 3906 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); 3907 } 3908 3909 static int 3910 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) 3911 { 3912 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3913 int mode, error = 0; 3914 3915 struct i40e_aq_get_phy_abilities_resp abilities; 3916 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); 3917 if (error) 3918 return (error); 3919 /* Read in new mode */ 3920 error = sysctl_handle_int(oidp, &mode, 0, req); 3921 if ((error) || (req->newptr == NULL)) 3922 return (error); 3923 3924 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); 3925 } 3926 3927 static int 3928 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) 3929 { 3930 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3931 int mode, error = 0; 3932 3933 struct i40e_aq_get_phy_abilities_resp abilities; 3934 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); 3935 if (error) 3936 return (error); 3937 /* Read in new mode */ 3938 error = sysctl_handle_int(oidp, &mode, 0, req); 3939 if ((error) || (req->newptr == NULL)) 3940 return (error); 3941 3942 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); 3943 } 3944 3945 static int 3946 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) 3947 { 3948 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3949 int mode, error = 0; 3950 3951 struct i40e_aq_get_phy_abilities_resp abilities; 3952 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); 3953 if (error) 3954 return (error); 3955 /* Read in new mode */ 3956 error = sysctl_handle_int(oidp, &mode, 0, req); 3957 if ((error) || (req->newptr == NULL)) 3958 return (error); 3959 3960 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); 3961 } 3962 3963 static int 3964 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) 3965 { 3966 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3967 int mode, error = 0; 3968 3969 struct i40e_aq_get_phy_abilities_resp abilities; 3970 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); 3971 if (error) 3972 return (error); 3973 /* Read in new mode */ 3974 error = sysctl_handle_int(oidp, &mode, 0, req); 3975 if ((error) || (req->newptr == NULL)) 3976 return (error); 3977 3978 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); 3979 } 3980 3981 static int 3982 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) 3983 { 3984 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3985 struct i40e_hw *hw = &pf->hw; 3986 device_t dev = pf->dev; 3987 struct sbuf *buf; 3988 int error = 0; 3989 enum i40e_status_code status; 3990 3991 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3992 if (!buf) { 3993 device_printf(dev, "Could not allocate sbuf for output.\n"); 3994 return (ENOMEM); 3995 } 3996 3997 u8 *final_buff; 3998 /* This amount is only necessary if reading the entire cluster into memory */ 3999 #define IXL_FINAL_BUFF_SIZE (1280 * 1024) 4000 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_NOWAIT); 4001 if (final_buff == NULL) { 4002 device_printf(dev, "Could not allocate memory for output.\n"); 4003 goto out; 4004 } 4005 int final_buff_len = 0; 4006 4007 u8 cluster_id = 1; 4008 bool more = true; 4009 4010 u8 dump_buf[4096]; 4011 u16 curr_buff_size = 4096; 4012 u8 curr_next_table = 0; 4013 u32 curr_next_index = 0; 4014 4015 u16 ret_buff_size; 4016 u8 ret_next_table; 4017 u32 ret_next_index; 4018 4019 sbuf_cat(buf, "\n"); 4020 4021 while (more) { 4022 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, 4023 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); 4024 if (status) { 4025 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", 4026 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 4027 goto free_out; 4028 } 4029 4030 /* copy info out of temp buffer */ 4031 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); 4032 final_buff_len += ret_buff_size; 4033 4034 if (ret_next_table != curr_next_table) { 4035 /* We're done with the current table; we can dump out read data. */ 4036 sbuf_printf(buf, "%d:", curr_next_table); 4037 int bytes_printed = 0; 4038 while (bytes_printed <= final_buff_len) { 4039 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); 4040 bytes_printed += 16; 4041 } 4042 sbuf_cat(buf, "\n"); 4043 4044 /* The entire cluster has been read; we're finished */ 4045 if (ret_next_table == 0xFF) 4046 break; 4047 4048 /* Otherwise clear the output buffer and continue reading */ 4049 bzero(final_buff, IXL_FINAL_BUFF_SIZE); 4050 final_buff_len = 0; 4051 } 4052 4053 if (ret_next_index == 0xFFFFFFFF) 4054 ret_next_index = 0; 4055 4056 bzero(dump_buf, sizeof(dump_buf)); 4057 curr_next_table = ret_next_table; 4058 curr_next_index = ret_next_index; 4059 } 4060 4061 free_out: 4062 free(final_buff, M_DEVBUF); 4063 out: 4064 error = sbuf_finish(buf); 4065 if (error) 4066 device_printf(dev, "Error finishing sbuf: %d\n", error); 4067 sbuf_delete(buf); 4068 4069 return (error); 4070 } 4071 4072 static int 4073 ixl_start_fw_lldp(struct ixl_pf *pf) 4074 { 4075 struct i40e_hw *hw = &pf->hw; 4076 enum i40e_status_code status; 4077 4078 status = i40e_aq_start_lldp(hw, false, NULL); 4079 if (status != I40E_SUCCESS) { 4080 switch (hw->aq.asq_last_status) { 4081 case I40E_AQ_RC_EEXIST: 4082 device_printf(pf->dev, 4083 "FW LLDP agent is already running\n"); 4084 break; 4085 case I40E_AQ_RC_EPERM: 4086 device_printf(pf->dev, 4087 "Device configuration forbids SW from starting " 4088 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII " 4089 "attribute to \"Enabled\" to use this sysctl\n"); 4090 return (EINVAL); 4091 default: 4092 device_printf(pf->dev, 4093 "Starting FW LLDP agent failed: error: %s, %s\n", 4094 i40e_stat_str(hw, status), 4095 i40e_aq_str(hw, hw->aq.asq_last_status)); 4096 return (EINVAL); 4097 } 4098 } 4099 4100 atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 4101 return (0); 4102 } 4103 4104 static int 4105 ixl_stop_fw_lldp(struct ixl_pf *pf) 4106 { 4107 struct i40e_hw *hw = &pf->hw; 4108 device_t dev = pf->dev; 4109 enum i40e_status_code status; 4110 4111 if (hw->func_caps.npar_enable != 0) { 4112 device_printf(dev, 4113 "Disabling FW LLDP agent is not supported on this device\n"); 4114 return (EINVAL); 4115 } 4116 4117 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) { 4118 device_printf(dev, 4119 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); 4120 return (EINVAL); 4121 } 4122 4123 status = i40e_aq_stop_lldp(hw, true, false, NULL); 4124 if (status != I40E_SUCCESS) { 4125 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) { 4126 device_printf(dev, 4127 "Disabling FW LLDP agent failed: error: %s, %s\n", 4128 i40e_stat_str(hw, status), 4129 i40e_aq_str(hw, hw->aq.asq_last_status)); 4130 return (EINVAL); 4131 } 4132 4133 device_printf(dev, "FW LLDP agent is already stopped\n"); 4134 } 4135 4136 #ifndef EXTERNAL_RELEASE 4137 /* Let the FW set default DCB configuration on link UP as described in DCR 307.1 */ 4138 #endif 4139 i40e_aq_set_dcb_parameters(hw, true, NULL); 4140 atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 4141 return (0); 4142 } 4143 4144 static int 4145 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) 4146 { 4147 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4148 int state, new_state, error = 0; 4149 4150 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0); 4151 4152 /* Read in new mode */ 4153 error = sysctl_handle_int(oidp, &new_state, 0, req); 4154 if ((error) || (req->newptr == NULL)) 4155 return (error); 4156 4157 /* Already in requested state */ 4158 if (new_state == state) 4159 return (error); 4160 4161 if (new_state == 0) 4162 return ixl_stop_fw_lldp(pf); 4163 4164 return ixl_start_fw_lldp(pf); 4165 } 4166 4167 static int 4168 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 4169 { 4170 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4171 int state, new_state; 4172 int sysctl_handle_status = 0; 4173 enum i40e_status_code cmd_status; 4174 4175 /* Init states' values */ 4176 state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED)); 4177 4178 /* Get requested mode */ 4179 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req); 4180 if ((sysctl_handle_status) || (req->newptr == NULL)) 4181 return (sysctl_handle_status); 4182 4183 /* Check if state has changed */ 4184 if (new_state == state) 4185 return (0); 4186 4187 /* Set new state */ 4188 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state)); 4189 4190 /* Save new state or report error */ 4191 if (!cmd_status) { 4192 if (new_state == 0) 4193 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 4194 else 4195 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 4196 } else if (cmd_status == I40E_ERR_CONFIG) 4197 return (EPERM); 4198 else 4199 return (EIO); 4200 4201 return (0); 4202 } 4203 4204 int 4205 ixl_attach_get_link_status(struct ixl_pf *pf) 4206 { 4207 struct i40e_hw *hw = &pf->hw; 4208 device_t dev = pf->dev; 4209 int error = 0; 4210 4211 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 4212 (hw->aq.fw_maj_ver < 4)) { 4213 i40e_msec_delay(75); 4214 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); 4215 if (error) { 4216 device_printf(dev, "link restart failed, aq_err=%d\n", 4217 pf->hw.aq.asq_last_status); 4218 return error; 4219 } 4220 } 4221 4222 /* Determine link state */ 4223 hw->phy.get_link_info = TRUE; 4224 i40e_get_link_status(hw, &pf->link_up); 4225 return (0); 4226 } 4227 4228 static int 4229 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS) 4230 { 4231 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4232 int requested = 0, error = 0; 4233 4234 /* Read in new mode */ 4235 error = sysctl_handle_int(oidp, &requested, 0, req); 4236 if ((error) || (req->newptr == NULL)) 4237 return (error); 4238 4239 /* Initiate the PF reset later in the admin task */ 4240 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ); 4241 4242 return (error); 4243 } 4244 4245 static int 4246 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS) 4247 { 4248 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4249 struct i40e_hw *hw = &pf->hw; 4250 int requested = 0, error = 0; 4251 4252 /* Read in new mode */ 4253 error = sysctl_handle_int(oidp, &requested, 0, req); 4254 if ((error) || (req->newptr == NULL)) 4255 return (error); 4256 4257 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 4258 4259 return (error); 4260 } 4261 4262 static int 4263 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS) 4264 { 4265 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4266 struct i40e_hw *hw = &pf->hw; 4267 int requested = 0, error = 0; 4268 4269 /* Read in new mode */ 4270 error = sysctl_handle_int(oidp, &requested, 0, req); 4271 if ((error) || (req->newptr == NULL)) 4272 return (error); 4273 4274 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK); 4275 4276 return (error); 4277 } 4278 4279 /* 4280 * Print out mapping of TX queue indexes and Rx queue indexes 4281 * to MSI-X vectors. 4282 */ 4283 static int 4284 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) 4285 { 4286 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4287 struct ixl_vsi *vsi = &pf->vsi; 4288 device_t dev = pf->dev; 4289 struct sbuf *buf; 4290 int error = 0; 4291 4292 struct ixl_rx_queue *rx_que = vsi->rx_queues; 4293 struct ixl_tx_queue *tx_que = vsi->tx_queues; 4294 4295 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4296 if (!buf) { 4297 device_printf(dev, "Could not allocate sbuf for output.\n"); 4298 return (ENOMEM); 4299 } 4300 4301 sbuf_cat(buf, "\n"); 4302 for (int i = 0; i < vsi->num_rx_queues; i++) { 4303 rx_que = &vsi->rx_queues[i]; 4304 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); 4305 } 4306 for (int i = 0; i < vsi->num_tx_queues; i++) { 4307 tx_que = &vsi->tx_queues[i]; 4308 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); 4309 } 4310 4311 error = sbuf_finish(buf); 4312 if (error) 4313 device_printf(dev, "Error finishing sbuf: %d\n", error); 4314 sbuf_delete(buf); 4315 4316 return (error); 4317 } 4318