1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "ixl_pf.h" 37 38 #ifdef PCI_IOV 39 #include "ixl_pf_iov.h" 40 #endif 41 42 #ifdef IXL_IW 43 #include "ixl_iw.h" 44 #include "ixl_iw_int.h" 45 #endif 46 47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); 48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); 49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed); 50 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int); 51 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int); 52 static char * ixl_switch_element_string(struct sbuf *, u8, u16); 53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *); 54 55 /* Sysctls */ 56 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); 57 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); 58 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 59 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 60 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); 61 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); 62 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); 63 64 /* Debug Sysctls */ 65 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); 66 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); 67 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); 68 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); 69 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); 70 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); 71 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); 72 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); 73 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); 74 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); 75 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); 76 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); 77 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); 78 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); 79 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); 80 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); 81 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); 82 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); 83 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 84 85 /* Debug Sysctls */ 86 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS); 87 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS); 88 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS); 89 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); 90 #ifdef IXL_DEBUG 91 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); 92 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); 93 #endif 94 95 #ifdef IXL_IW 96 extern int ixl_enable_iwarp; 97 extern int ixl_limit_iwarp_msix; 98 #endif 99 100 static const char * const ixl_fc_string[6] = { 101 "None", 102 "Rx", 103 "Tx", 104 "Full", 105 "Priority", 106 "Default" 107 }; 108 109 static char *ixl_fec_string[3] = { 110 "CL108 RS-FEC", 111 "CL74 FC-FEC/BASE-R", 112 "None" 113 }; 114 115 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); 116 117 /* 118 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string 119 */ 120 void 121 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) 122 { 123 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); 124 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); 125 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); 126 127 sbuf_printf(buf, 128 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", 129 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 130 hw->aq.api_maj_ver, hw->aq.api_min_ver, 131 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> 132 IXL_NVM_VERSION_HI_SHIFT, 133 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> 134 IXL_NVM_VERSION_LO_SHIFT, 135 hw->nvm.eetrack, 136 oem_ver, oem_build, oem_patch); 137 } 138 139 void 140 ixl_print_nvm_version(struct ixl_pf *pf) 141 { 142 struct i40e_hw *hw = &pf->hw; 143 device_t dev = pf->dev; 144 struct sbuf *sbuf; 145 146 sbuf = sbuf_new_auto(); 147 ixl_nvm_version_str(hw, sbuf); 148 sbuf_finish(sbuf); 149 device_printf(dev, "%s\n", sbuf_data(sbuf)); 150 sbuf_delete(sbuf); 151 } 152 153 /** 154 * ixl_get_fw_mode - Check the state of FW 155 * @hw: device hardware structure 156 * 157 * Identify state of FW. It might be in a recovery mode 158 * which limits functionality and requires special handling 159 * from the driver. 160 * 161 * @returns FW mode (normal, recovery, unexpected EMP reset) 162 */ 163 static enum ixl_fw_mode 164 ixl_get_fw_mode(struct ixl_pf *pf) 165 { 166 struct i40e_hw *hw = &pf->hw; 167 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL; 168 u32 fwsts; 169 170 #ifdef IXL_DEBUG 171 if (pf->recovery_mode) 172 return IXL_FW_MODE_RECOVERY; 173 #endif 174 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; 175 176 /* Is set and has one of expected values */ 177 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK && 178 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) || 179 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || 180 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK) 181 fw_mode = IXL_FW_MODE_RECOVERY; 182 else { 183 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 && 184 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10) 185 fw_mode = IXL_FW_MODE_UEMPR; 186 } 187 return (fw_mode); 188 } 189 190 /** 191 * ixl_pf_reset - Reset the PF 192 * @pf: PF structure 193 * 194 * Ensure that FW is in the right state and do the reset 195 * if needed. 196 * 197 * @returns zero on success, or an error code on failure. 198 */ 199 int 200 ixl_pf_reset(struct ixl_pf *pf) 201 { 202 struct i40e_hw *hw = &pf->hw; 203 enum i40e_status_code status; 204 enum ixl_fw_mode fw_mode; 205 206 fw_mode = ixl_get_fw_mode(pf); 207 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode); 208 if (fw_mode == IXL_FW_MODE_RECOVERY) { 209 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 210 /* Don't try to reset device if it's in recovery mode */ 211 return (0); 212 } 213 214 status = i40e_pf_reset(hw); 215 if (status == I40E_SUCCESS) 216 return (0); 217 218 /* Check FW mode again in case it has changed while 219 * waiting for reset to complete */ 220 fw_mode = ixl_get_fw_mode(pf); 221 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode); 222 if (fw_mode == IXL_FW_MODE_RECOVERY) { 223 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 224 return (0); 225 } 226 227 if (fw_mode == IXL_FW_MODE_UEMPR) 228 device_printf(pf->dev, 229 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); 230 else 231 device_printf(pf->dev, "PF reset failure %s\n", 232 i40e_stat_str(hw, status)); 233 return (EIO); 234 } 235 236 /** 237 * ixl_setup_hmc - Setup LAN Host Memory Cache 238 * @pf: PF structure 239 * 240 * Init and configure LAN Host Memory Cache 241 * 242 * @returns 0 on success, EIO on error 243 */ 244 int 245 ixl_setup_hmc(struct ixl_pf *pf) 246 { 247 struct i40e_hw *hw = &pf->hw; 248 enum i40e_status_code status; 249 250 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 251 hw->func_caps.num_rx_qp, 0, 0); 252 if (status) { 253 device_printf(pf->dev, "init_lan_hmc failed: %s\n", 254 i40e_stat_str(hw, status)); 255 return (EIO); 256 } 257 258 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 259 if (status) { 260 device_printf(pf->dev, "configure_lan_hmc failed: %s\n", 261 i40e_stat_str(hw, status)); 262 return (EIO); 263 } 264 265 return (0); 266 } 267 268 /** 269 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache 270 * @pf: PF structure 271 * 272 * Shutdown Host Memory Cache if configured. 273 * 274 */ 275 void 276 ixl_shutdown_hmc(struct ixl_pf *pf) 277 { 278 struct i40e_hw *hw = &pf->hw; 279 enum i40e_status_code status; 280 281 /* HMC not configured, no need to shutdown */ 282 if (hw->hmc.hmc_obj == NULL) 283 return; 284 285 status = i40e_shutdown_lan_hmc(hw); 286 if (status) 287 device_printf(pf->dev, 288 "Shutdown LAN HMC failed with code %s\n", 289 i40e_stat_str(hw, status)); 290 } 291 /* 292 * Write PF ITR values to queue ITR registers. 293 */ 294 void 295 ixl_configure_itr(struct ixl_pf *pf) 296 { 297 ixl_configure_tx_itr(pf); 298 ixl_configure_rx_itr(pf); 299 } 300 301 /********************************************************************* 302 * 303 * Get the hardware capabilities 304 * 305 **********************************************************************/ 306 307 int 308 ixl_get_hw_capabilities(struct ixl_pf *pf) 309 { 310 struct i40e_aqc_list_capabilities_element_resp *buf; 311 struct i40e_hw *hw = &pf->hw; 312 device_t dev = pf->dev; 313 enum i40e_status_code status; 314 int len, i2c_intfc_num; 315 bool again = TRUE; 316 u16 needed; 317 318 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 319 hw->func_caps.iwarp = 0; 320 return (0); 321 } 322 323 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 324 retry: 325 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) 326 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) { 327 device_printf(dev, "Unable to allocate cap memory\n"); 328 return (ENOMEM); 329 } 330 331 /* This populates the hw struct */ 332 status = i40e_aq_discover_capabilities(hw, buf, len, 333 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 334 free(buf, M_DEVBUF); 335 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && 336 (again == TRUE)) { 337 /* retry once with a larger buffer */ 338 again = FALSE; 339 len = needed; 340 goto retry; 341 } else if (status != I40E_SUCCESS) { 342 device_printf(dev, "capability discovery failed; status %s, error %s\n", 343 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 344 return (ENODEV); 345 } 346 347 /* 348 * Some devices have both MDIO and I2C; since this isn't reported 349 * by the FW, check registers to see if an I2C interface exists. 350 */ 351 i2c_intfc_num = ixl_find_i2c_interface(pf); 352 if (i2c_intfc_num != -1) 353 pf->has_i2c = true; 354 355 /* Determine functions to use for driver I2C accesses */ 356 switch (pf->i2c_access_method) { 357 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: { 358 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 359 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 360 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 361 } else { 362 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 363 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 364 } 365 break; 366 } 367 case IXL_I2C_ACCESS_METHOD_AQ: 368 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 369 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 370 break; 371 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD: 372 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 373 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 374 break; 375 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS: 376 pf->read_i2c_byte = ixl_read_i2c_byte_bb; 377 pf->write_i2c_byte = ixl_write_i2c_byte_bb; 378 break; 379 default: 380 /* Should not happen */ 381 device_printf(dev, "Error setting I2C access functions\n"); 382 break; 383 } 384 385 /* Print a subset of the capability information. */ 386 device_printf(dev, 387 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n", 388 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, 389 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, 390 (hw->func_caps.mdio_port_mode == 2) ? "I2C" : 391 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : 392 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : 393 "MDIO shared"); 394 395 return (0); 396 } 397 398 /* For the set_advertise sysctl */ 399 void 400 ixl_set_initial_advertised_speeds(struct ixl_pf *pf) 401 { 402 device_t dev = pf->dev; 403 int err; 404 405 /* Make sure to initialize the device to the complete list of 406 * supported speeds on driver load, to ensure unloading and 407 * reloading the driver will restore this value. 408 */ 409 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); 410 if (err) { 411 /* Non-fatal error */ 412 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", 413 __func__, err); 414 return; 415 } 416 417 pf->advertised_speed = 418 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 419 } 420 421 int 422 ixl_teardown_hw_structs(struct ixl_pf *pf) 423 { 424 enum i40e_status_code status = 0; 425 struct i40e_hw *hw = &pf->hw; 426 device_t dev = pf->dev; 427 428 /* Shutdown LAN HMC */ 429 if (hw->hmc.hmc_obj) { 430 status = i40e_shutdown_lan_hmc(hw); 431 if (status) { 432 device_printf(dev, 433 "init: LAN HMC shutdown failure; status %s\n", 434 i40e_stat_str(hw, status)); 435 goto err_out; 436 } 437 } 438 439 /* Shutdown admin queue */ 440 ixl_disable_intr0(hw); 441 status = i40e_shutdown_adminq(hw); 442 if (status) 443 device_printf(dev, 444 "init: Admin Queue shutdown failure; status %s\n", 445 i40e_stat_str(hw, status)); 446 447 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); 448 err_out: 449 return (status); 450 } 451 452 static u_int 453 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 454 { 455 struct ixl_vsi *vsi = arg; 456 457 ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl)); 458 459 return (1); 460 } 461 462 /********************************************************************* 463 * Filter Routines 464 * 465 * Routines for multicast and vlan filter management. 466 * 467 *********************************************************************/ 468 void 469 ixl_add_multi(struct ixl_vsi *vsi) 470 { 471 struct ifnet *ifp = vsi->ifp; 472 struct i40e_hw *hw = vsi->hw; 473 int mcnt = 0, flags; 474 475 IOCTL_DEBUGOUT("ixl_add_multi: begin"); 476 477 /* 478 ** First just get a count, to decide if we 479 ** we simply use multicast promiscuous. 480 */ 481 mcnt = if_llmaddr_count(ifp); 482 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { 483 /* delete existing MC filters */ 484 ixl_del_hw_filters(vsi, mcnt); 485 i40e_aq_set_vsi_multicast_promiscuous(hw, 486 vsi->seid, TRUE, NULL); 487 return; 488 } 489 490 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, vsi); 491 if (mcnt > 0) { 492 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); 493 ixl_add_hw_filters(vsi, flags, mcnt); 494 } 495 496 IOCTL_DEBUGOUT("ixl_add_multi: end"); 497 } 498 499 static u_int 500 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 501 { 502 struct ixl_mac_filter *f = arg; 503 504 if (cmp_etheraddr(f->macaddr, (u8 *)LLADDR(sdl))) 505 return (1); 506 else 507 return (0); 508 } 509 510 int 511 ixl_del_multi(struct ixl_vsi *vsi) 512 { 513 struct ifnet *ifp = vsi->ifp; 514 struct ixl_mac_filter *f; 515 int mcnt = 0; 516 517 IOCTL_DEBUGOUT("ixl_del_multi: begin"); 518 519 /* Search for removed multicast addresses */ 520 SLIST_FOREACH(f, &vsi->ftl, next) 521 if ((f->flags & IXL_FILTER_USED) && 522 (f->flags & IXL_FILTER_MC) && 523 (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)) { 524 f->flags |= IXL_FILTER_DEL; 525 mcnt++; 526 } 527 528 if (mcnt > 0) 529 ixl_del_hw_filters(vsi, mcnt); 530 531 return (mcnt); 532 } 533 534 void 535 ixl_link_up_msg(struct ixl_pf *pf) 536 { 537 struct i40e_hw *hw = &pf->hw; 538 struct ifnet *ifp = pf->vsi.ifp; 539 char *req_fec_string, *neg_fec_string; 540 u8 fec_abilities; 541 542 fec_abilities = hw->phy.link_info.req_fec_info; 543 /* If both RS and KR are requested, only show RS */ 544 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) 545 req_fec_string = ixl_fec_string[0]; 546 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) 547 req_fec_string = ixl_fec_string[1]; 548 else 549 req_fec_string = ixl_fec_string[2]; 550 551 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) 552 neg_fec_string = ixl_fec_string[0]; 553 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) 554 neg_fec_string = ixl_fec_string[1]; 555 else 556 neg_fec_string = ixl_fec_string[2]; 557 558 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 559 ifp->if_xname, 560 ixl_link_speed_string(hw->phy.link_info.link_speed), 561 req_fec_string, neg_fec_string, 562 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", 563 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && 564 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 565 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? 566 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 567 ixl_fc_string[1] : ixl_fc_string[0]); 568 } 569 570 /* 571 * Configure admin queue/misc interrupt cause registers in hardware. 572 */ 573 void 574 ixl_configure_intr0_msix(struct ixl_pf *pf) 575 { 576 struct i40e_hw *hw = &pf->hw; 577 u32 reg; 578 579 /* First set up the adminq - vector 0 */ 580 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 581 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 582 583 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 584 I40E_PFINT_ICR0_ENA_GRST_MASK | 585 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 586 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 587 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 588 I40E_PFINT_ICR0_ENA_VFLR_MASK | 589 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 590 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; 591 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 592 593 /* 594 * 0x7FF is the end of the queue list. 595 * This means we won't use MSI-X vector 0 for a queue interrupt 596 * in MSI-X mode. 597 */ 598 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 599 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ 600 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); 601 602 wr32(hw, I40E_PFINT_DYN_CTL0, 603 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 604 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 605 606 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 607 } 608 609 void 610 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types) 611 { 612 /* Display supported media types */ 613 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) 614 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL); 615 616 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) 617 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL); 618 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) 619 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL); 620 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) 621 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL); 622 623 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || 624 phy_types & (I40E_CAP_PHY_TYPE_XFI) || 625 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) 626 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 627 628 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) 629 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL); 630 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) 631 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL); 632 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) 633 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL); 634 635 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || 636 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || 637 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || 638 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || 639 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 640 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL); 641 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) 642 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL); 643 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) 644 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL); 645 646 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) 647 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL); 648 649 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) 650 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) 651 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL); 652 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) 653 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL); 654 if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) 655 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL); 656 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) 657 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 658 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) 659 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL); 660 661 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) 662 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL); 663 664 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 665 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL); 666 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) 667 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); 668 669 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) 670 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL); 671 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) 672 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL); 673 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) 674 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL); 675 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) 676 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL); 677 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) 678 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL); 679 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) 680 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL); 681 } 682 683 /********************************************************************* 684 * 685 * Get Firmware Switch configuration 686 * - this will need to be more robust when more complex 687 * switch configurations are enabled. 688 * 689 **********************************************************************/ 690 int 691 ixl_switch_config(struct ixl_pf *pf) 692 { 693 struct i40e_hw *hw = &pf->hw; 694 struct ixl_vsi *vsi = &pf->vsi; 695 device_t dev = iflib_get_dev(vsi->ctx); 696 struct i40e_aqc_get_switch_config_resp *sw_config; 697 u8 aq_buf[I40E_AQ_LARGE_BUF]; 698 int ret; 699 u16 next = 0; 700 701 memset(&aq_buf, 0, sizeof(aq_buf)); 702 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 703 ret = i40e_aq_get_switch_config(hw, sw_config, 704 sizeof(aq_buf), &next, NULL); 705 if (ret) { 706 device_printf(dev, "aq_get_switch_config() failed, error %d," 707 " aq_error %d\n", ret, pf->hw.aq.asq_last_status); 708 return (ret); 709 } 710 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { 711 device_printf(dev, 712 "Switch config: header reported: %d in structure, %d total\n", 713 LE16_TO_CPU(sw_config->header.num_reported), 714 LE16_TO_CPU(sw_config->header.num_total)); 715 for (int i = 0; 716 i < LE16_TO_CPU(sw_config->header.num_reported); i++) { 717 device_printf(dev, 718 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, 719 sw_config->element[i].element_type, 720 LE16_TO_CPU(sw_config->element[i].seid), 721 LE16_TO_CPU(sw_config->element[i].uplink_seid), 722 LE16_TO_CPU(sw_config->element[i].downlink_seid)); 723 } 724 } 725 /* Simplified due to a single VSI */ 726 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid); 727 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid); 728 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid); 729 return (ret); 730 } 731 732 void 733 ixl_free_mac_filters(struct ixl_vsi *vsi) 734 { 735 struct ixl_mac_filter *f; 736 737 while (!SLIST_EMPTY(&vsi->ftl)) { 738 f = SLIST_FIRST(&vsi->ftl); 739 SLIST_REMOVE_HEAD(&vsi->ftl, next); 740 free(f, M_DEVBUF); 741 } 742 743 vsi->num_hw_filters = 0; 744 } 745 746 void 747 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls) 748 { 749 struct sysctl_oid *tree; 750 struct sysctl_oid_list *child; 751 struct sysctl_oid_list *vsi_list; 752 753 tree = device_get_sysctl_tree(vsi->dev); 754 child = SYSCTL_CHILDREN(tree); 755 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name, 756 CTLFLAG_RD, NULL, "VSI Number"); 757 758 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 759 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats); 760 761 if (queues_sysctls) 762 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx); 763 } 764 765 /* 766 * Used to set the Tx ITR value for all of the PF LAN VSI's queues. 767 * Writes to the ITR registers immediately. 768 */ 769 static int 770 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) 771 { 772 struct ixl_pf *pf = (struct ixl_pf *)arg1; 773 device_t dev = pf->dev; 774 int error = 0; 775 int requested_tx_itr; 776 777 requested_tx_itr = pf->tx_itr; 778 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); 779 if ((error) || (req->newptr == NULL)) 780 return (error); 781 if (pf->dynamic_tx_itr) { 782 device_printf(dev, 783 "Cannot set TX itr value while dynamic TX itr is enabled\n"); 784 return (EINVAL); 785 } 786 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { 787 device_printf(dev, 788 "Invalid TX itr value; value must be between 0 and %d\n", 789 IXL_MAX_ITR); 790 return (EINVAL); 791 } 792 793 pf->tx_itr = requested_tx_itr; 794 ixl_configure_tx_itr(pf); 795 796 return (error); 797 } 798 799 /* 800 * Used to set the Rx ITR value for all of the PF LAN VSI's queues. 801 * Writes to the ITR registers immediately. 802 */ 803 static int 804 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) 805 { 806 struct ixl_pf *pf = (struct ixl_pf *)arg1; 807 device_t dev = pf->dev; 808 int error = 0; 809 int requested_rx_itr; 810 811 requested_rx_itr = pf->rx_itr; 812 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); 813 if ((error) || (req->newptr == NULL)) 814 return (error); 815 if (pf->dynamic_rx_itr) { 816 device_printf(dev, 817 "Cannot set RX itr value while dynamic RX itr is enabled\n"); 818 return (EINVAL); 819 } 820 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { 821 device_printf(dev, 822 "Invalid RX itr value; value must be between 0 and %d\n", 823 IXL_MAX_ITR); 824 return (EINVAL); 825 } 826 827 pf->rx_itr = requested_rx_itr; 828 ixl_configure_rx_itr(pf); 829 830 return (error); 831 } 832 833 void 834 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 835 struct sysctl_oid_list *child, 836 struct i40e_hw_port_stats *stats) 837 { 838 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 839 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics"); 840 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); 841 842 struct i40e_eth_stats *eth_stats = &stats->eth; 843 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); 844 845 struct ixl_sysctl_info ctls[] = 846 { 847 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 848 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 849 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 850 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 851 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, 852 /* Packet Reception Stats */ 853 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 854 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 855 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 856 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 857 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 858 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 859 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 860 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 861 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 862 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 863 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 864 {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, 865 /* Packet Transmission Stats */ 866 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 867 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 868 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 869 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 870 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 871 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 872 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 873 /* Flow control */ 874 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 875 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 876 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 877 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 878 /* End */ 879 {0,0,0} 880 }; 881 882 struct ixl_sysctl_info *entry = ctls; 883 while (entry->stat != 0) 884 { 885 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, 886 CTLFLAG_RD, entry->stat, 887 entry->description); 888 entry++; 889 } 890 } 891 892 void 893 ixl_set_rss_key(struct ixl_pf *pf) 894 { 895 struct i40e_hw *hw = &pf->hw; 896 struct ixl_vsi *vsi = &pf->vsi; 897 device_t dev = pf->dev; 898 u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; 899 enum i40e_status_code status; 900 901 #ifdef RSS 902 /* Fetch the configured RSS key */ 903 rss_getkey((uint8_t *) &rss_seed); 904 #else 905 ixl_get_default_rss_key(rss_seed); 906 #endif 907 /* Fill out hash function seed */ 908 if (hw->mac.type == I40E_MAC_X722) { 909 struct i40e_aqc_get_set_rss_key_data key_data; 910 bcopy(rss_seed, &key_data, 52); 911 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); 912 if (status) 913 device_printf(dev, 914 "i40e_aq_set_rss_key status %s, error %s\n", 915 i40e_stat_str(hw, status), 916 i40e_aq_str(hw, hw->aq.asq_last_status)); 917 } else { 918 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) 919 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); 920 } 921 } 922 923 /* 924 * Configure enabled PCTYPES for RSS. 925 */ 926 void 927 ixl_set_rss_pctypes(struct ixl_pf *pf) 928 { 929 struct i40e_hw *hw = &pf->hw; 930 u64 set_hena = 0, hena; 931 932 #ifdef RSS 933 u32 rss_hash_config; 934 935 rss_hash_config = rss_gethashconfig(); 936 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 937 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); 938 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 939 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 940 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 941 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 942 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 943 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); 944 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 945 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); 946 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 947 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 948 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 949 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 950 #else 951 if (hw->mac.type == I40E_MAC_X722) 952 set_hena = IXL_DEFAULT_RSS_HENA_X722; 953 else 954 set_hena = IXL_DEFAULT_RSS_HENA_XL710; 955 #endif 956 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 957 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 958 hena |= set_hena; 959 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 960 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 961 962 } 963 964 /* 965 ** Setup the PF's RSS parameters. 966 */ 967 void 968 ixl_config_rss(struct ixl_pf *pf) 969 { 970 ixl_set_rss_key(pf); 971 ixl_set_rss_pctypes(pf); 972 ixl_set_rss_hlut(pf); 973 } 974 975 /* 976 * In some firmware versions there is default MAC/VLAN filter 977 * configured which interferes with filters managed by driver. 978 * Make sure it's removed. 979 */ 980 void 981 ixl_del_default_hw_filters(struct ixl_vsi *vsi) 982 { 983 struct i40e_aqc_remove_macvlan_element_data e; 984 985 bzero(&e, sizeof(e)); 986 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 987 e.vlan_tag = 0; 988 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 989 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 990 991 bzero(&e, sizeof(e)); 992 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 993 e.vlan_tag = 0; 994 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 995 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 996 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 997 } 998 999 /* 1000 ** Initialize filter list and add filters that the hardware 1001 ** needs to know about. 1002 ** 1003 ** Requires VSI's seid to be set before calling. 1004 */ 1005 void 1006 ixl_init_filters(struct ixl_vsi *vsi) 1007 { 1008 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1009 1010 ixl_dbg_filter(pf, "%s: start\n", __func__); 1011 1012 /* Initialize mac filter list for VSI */ 1013 SLIST_INIT(&vsi->ftl); 1014 vsi->num_hw_filters = 0; 1015 1016 /* Receive broadcast Ethernet frames */ 1017 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL); 1018 1019 if (IXL_VSI_IS_VF(vsi)) 1020 return; 1021 1022 ixl_del_default_hw_filters(vsi); 1023 1024 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); 1025 1026 /* 1027 * Prevent Tx flow control frames from being sent out by 1028 * non-firmware transmitters. 1029 * This affects every VSI in the PF. 1030 */ 1031 #ifndef IXL_DEBUG_FC 1032 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1033 #else 1034 if (pf->enable_tx_fc_filter) 1035 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1036 #endif 1037 } 1038 1039 /* 1040 ** This routine adds mulicast filters 1041 */ 1042 void 1043 ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr) 1044 { 1045 struct ixl_mac_filter *f; 1046 1047 /* Does one already exist */ 1048 f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); 1049 if (f != NULL) 1050 return; 1051 1052 f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY); 1053 if (f != NULL) 1054 f->flags |= IXL_FILTER_MC; 1055 else 1056 printf("WARNING: no filter available!!\n"); 1057 } 1058 1059 void 1060 ixl_reconfigure_filters(struct ixl_vsi *vsi) 1061 { 1062 ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs); 1063 } 1064 1065 /* 1066 * This routine adds a MAC/VLAN filter to the software filter 1067 * list, then adds that new filter to the HW if it doesn't already 1068 * exist in the SW filter list. 1069 */ 1070 void 1071 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1072 { 1073 struct ixl_mac_filter *f, *tmp; 1074 struct ixl_pf *pf; 1075 device_t dev; 1076 1077 pf = vsi->back; 1078 dev = pf->dev; 1079 1080 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n", 1081 MAC_FORMAT_ARGS(macaddr), vlan); 1082 1083 /* Does one already exist */ 1084 f = ixl_find_filter(vsi, macaddr, vlan); 1085 if (f != NULL) 1086 return; 1087 /* 1088 ** Is this the first vlan being registered, if so we 1089 ** need to remove the ANY filter that indicates we are 1090 ** not in a vlan, and replace that with a 0 filter. 1091 */ 1092 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { 1093 tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); 1094 if (tmp != NULL) { 1095 ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY); 1096 ixl_add_filter(vsi, macaddr, 0); 1097 } 1098 } 1099 1100 f = ixl_new_filter(vsi, macaddr, vlan); 1101 if (f == NULL) { 1102 device_printf(dev, "WARNING: no filter available!!\n"); 1103 return; 1104 } 1105 if (f->vlan != IXL_VLAN_ANY) 1106 f->flags |= IXL_FILTER_VLAN; 1107 else 1108 vsi->num_macs++; 1109 1110 f->flags |= IXL_FILTER_USED; 1111 ixl_add_hw_filters(vsi, f->flags, 1); 1112 } 1113 1114 void 1115 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1116 { 1117 struct ixl_mac_filter *f; 1118 1119 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1120 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n", 1121 MAC_FORMAT_ARGS(macaddr), vlan); 1122 1123 f = ixl_find_filter(vsi, macaddr, vlan); 1124 if (f == NULL) 1125 return; 1126 1127 f->flags |= IXL_FILTER_DEL; 1128 ixl_del_hw_filters(vsi, 1); 1129 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0) 1130 vsi->num_macs--; 1131 1132 /* Check if this is the last vlan removal */ 1133 if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) { 1134 /* Switch back to a non-vlan filter */ 1135 ixl_del_filter(vsi, macaddr, 0); 1136 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1137 } 1138 return; 1139 } 1140 1141 /* 1142 ** Find the filter with both matching mac addr and vlan id 1143 */ 1144 struct ixl_mac_filter * 1145 ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1146 { 1147 struct ixl_mac_filter *f; 1148 1149 SLIST_FOREACH(f, &vsi->ftl, next) { 1150 if ((cmp_etheraddr(f->macaddr, macaddr) != 0) 1151 && (f->vlan == vlan)) { 1152 return (f); 1153 } 1154 } 1155 1156 return (NULL); 1157 } 1158 1159 /* 1160 ** This routine takes additions to the vsi filter 1161 ** table and creates an Admin Queue call to create 1162 ** the filters in the hardware. 1163 */ 1164 void 1165 ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) 1166 { 1167 struct i40e_aqc_add_macvlan_element_data *a, *b; 1168 struct ixl_mac_filter *f; 1169 struct ixl_pf *pf; 1170 struct i40e_hw *hw; 1171 device_t dev; 1172 enum i40e_status_code status; 1173 int j = 0; 1174 1175 pf = vsi->back; 1176 dev = vsi->dev; 1177 hw = &pf->hw; 1178 1179 ixl_dbg_filter(pf, 1180 "ixl_add_hw_filters: flags: %d cnt: %d\n", flags, cnt); 1181 1182 if (cnt < 1) { 1183 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n"); 1184 return; 1185 } 1186 1187 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, 1188 M_DEVBUF, M_NOWAIT | M_ZERO); 1189 if (a == NULL) { 1190 device_printf(dev, "add_hw_filters failed to get memory\n"); 1191 return; 1192 } 1193 1194 /* 1195 ** Scan the filter list, each time we find one 1196 ** we add it to the admin queue array and turn off 1197 ** the add bit. 1198 */ 1199 SLIST_FOREACH(f, &vsi->ftl, next) { 1200 if ((f->flags & flags) == flags) { 1201 b = &a[j]; // a pox on fvl long names :) 1202 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); 1203 if (f->vlan == IXL_VLAN_ANY) { 1204 b->vlan_tag = 0; 1205 b->flags = CPU_TO_LE16( 1206 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN); 1207 } else { 1208 b->vlan_tag = CPU_TO_LE16(f->vlan); 1209 b->flags = 0; 1210 } 1211 b->flags |= CPU_TO_LE16( 1212 I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); 1213 f->flags &= ~IXL_FILTER_ADD; 1214 j++; 1215 1216 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n", 1217 MAC_FORMAT_ARGS(f->macaddr)); 1218 } 1219 if (j == cnt) 1220 break; 1221 } 1222 if (j > 0) { 1223 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); 1224 if (status) 1225 device_printf(dev, "i40e_aq_add_macvlan status %s, " 1226 "error %s\n", i40e_stat_str(hw, status), 1227 i40e_aq_str(hw, hw->aq.asq_last_status)); 1228 else 1229 vsi->num_hw_filters += j; 1230 } 1231 free(a, M_DEVBUF); 1232 return; 1233 } 1234 1235 /* 1236 ** This routine takes removals in the vsi filter 1237 ** table and creates an Admin Queue call to delete 1238 ** the filters in the hardware. 1239 */ 1240 void 1241 ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) 1242 { 1243 struct i40e_aqc_remove_macvlan_element_data *d, *e; 1244 struct ixl_pf *pf; 1245 struct i40e_hw *hw; 1246 device_t dev; 1247 struct ixl_mac_filter *f, *f_temp; 1248 enum i40e_status_code status; 1249 int j = 0; 1250 1251 pf = vsi->back; 1252 hw = &pf->hw; 1253 dev = vsi->dev; 1254 1255 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt); 1256 1257 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, 1258 M_DEVBUF, M_NOWAIT | M_ZERO); 1259 if (d == NULL) { 1260 device_printf(dev, "%s: failed to get memory\n", __func__); 1261 return; 1262 } 1263 1264 SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { 1265 if (f->flags & IXL_FILTER_DEL) { 1266 e = &d[j]; // a pox on fvl long names :) 1267 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); 1268 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1269 if (f->vlan == IXL_VLAN_ANY) { 1270 e->vlan_tag = 0; 1271 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1272 } else { 1273 e->vlan_tag = f->vlan; 1274 } 1275 1276 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n", 1277 MAC_FORMAT_ARGS(f->macaddr)); 1278 1279 /* delete entry from vsi list */ 1280 SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); 1281 free(f, M_DEVBUF); 1282 j++; 1283 } 1284 if (j == cnt) 1285 break; 1286 } 1287 if (j > 0) { 1288 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); 1289 if (status) { 1290 int sc = 0; 1291 for (int i = 0; i < j; i++) 1292 sc += (!d[i].error_code); 1293 vsi->num_hw_filters -= sc; 1294 device_printf(dev, 1295 "Failed to remove %d/%d filters, error %s\n", 1296 j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status)); 1297 } else 1298 vsi->num_hw_filters -= j; 1299 } 1300 free(d, M_DEVBUF); 1301 1302 ixl_dbg_filter(pf, "%s: end\n", __func__); 1303 return; 1304 } 1305 1306 int 1307 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1308 { 1309 struct i40e_hw *hw = &pf->hw; 1310 int error = 0; 1311 u32 reg; 1312 u16 pf_qidx; 1313 1314 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1315 1316 ixl_dbg(pf, IXL_DBG_EN_DIS, 1317 "Enabling PF TX ring %4d / VSI TX ring %4d...\n", 1318 pf_qidx, vsi_qidx); 1319 1320 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); 1321 1322 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1323 reg |= I40E_QTX_ENA_QENA_REQ_MASK | 1324 I40E_QTX_ENA_QENA_STAT_MASK; 1325 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1326 /* Verify the enable took */ 1327 for (int j = 0; j < 10; j++) { 1328 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1329 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 1330 break; 1331 i40e_usec_delay(10); 1332 } 1333 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 1334 device_printf(pf->dev, "TX queue %d still disabled!\n", 1335 pf_qidx); 1336 error = ETIMEDOUT; 1337 } 1338 1339 return (error); 1340 } 1341 1342 int 1343 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1344 { 1345 struct i40e_hw *hw = &pf->hw; 1346 int error = 0; 1347 u32 reg; 1348 u16 pf_qidx; 1349 1350 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1351 1352 ixl_dbg(pf, IXL_DBG_EN_DIS, 1353 "Enabling PF RX ring %4d / VSI RX ring %4d...\n", 1354 pf_qidx, vsi_qidx); 1355 1356 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1357 reg |= I40E_QRX_ENA_QENA_REQ_MASK | 1358 I40E_QRX_ENA_QENA_STAT_MASK; 1359 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1360 /* Verify the enable took */ 1361 for (int j = 0; j < 10; j++) { 1362 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1363 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 1364 break; 1365 i40e_usec_delay(10); 1366 } 1367 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 1368 device_printf(pf->dev, "RX queue %d still disabled!\n", 1369 pf_qidx); 1370 error = ETIMEDOUT; 1371 } 1372 1373 return (error); 1374 } 1375 1376 int 1377 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1378 { 1379 int error = 0; 1380 1381 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); 1382 /* Called function already prints error message */ 1383 if (error) 1384 return (error); 1385 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); 1386 return (error); 1387 } 1388 1389 /* 1390 * Returns error on first ring that is detected hung. 1391 */ 1392 int 1393 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1394 { 1395 struct i40e_hw *hw = &pf->hw; 1396 int error = 0; 1397 u32 reg; 1398 u16 pf_qidx; 1399 1400 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1401 1402 ixl_dbg(pf, IXL_DBG_EN_DIS, 1403 "Disabling PF TX ring %4d / VSI TX ring %4d...\n", 1404 pf_qidx, vsi_qidx); 1405 1406 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); 1407 i40e_usec_delay(500); 1408 1409 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1410 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 1411 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1412 /* Verify the disable took */ 1413 for (int j = 0; j < 10; j++) { 1414 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1415 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) 1416 break; 1417 i40e_msec_delay(10); 1418 } 1419 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { 1420 device_printf(pf->dev, "TX queue %d still enabled!\n", 1421 pf_qidx); 1422 error = ETIMEDOUT; 1423 } 1424 1425 return (error); 1426 } 1427 1428 /* 1429 * Returns error on first ring that is detected hung. 1430 */ 1431 int 1432 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1433 { 1434 struct i40e_hw *hw = &pf->hw; 1435 int error = 0; 1436 u32 reg; 1437 u16 pf_qidx; 1438 1439 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1440 1441 ixl_dbg(pf, IXL_DBG_EN_DIS, 1442 "Disabling PF RX ring %4d / VSI RX ring %4d...\n", 1443 pf_qidx, vsi_qidx); 1444 1445 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1446 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 1447 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1448 /* Verify the disable took */ 1449 for (int j = 0; j < 10; j++) { 1450 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1451 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) 1452 break; 1453 i40e_msec_delay(10); 1454 } 1455 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { 1456 device_printf(pf->dev, "RX queue %d still enabled!\n", 1457 pf_qidx); 1458 error = ETIMEDOUT; 1459 } 1460 1461 return (error); 1462 } 1463 1464 int 1465 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1466 { 1467 int error = 0; 1468 1469 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); 1470 /* Called function already prints error message */ 1471 if (error) 1472 return (error); 1473 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); 1474 return (error); 1475 } 1476 1477 static void 1478 ixl_handle_tx_mdd_event(struct ixl_pf *pf) 1479 { 1480 struct i40e_hw *hw = &pf->hw; 1481 device_t dev = pf->dev; 1482 struct ixl_vf *vf; 1483 bool mdd_detected = false; 1484 bool pf_mdd_detected = false; 1485 bool vf_mdd_detected = false; 1486 u16 vf_num, queue; 1487 u8 pf_num, event; 1488 u8 pf_mdet_num, vp_mdet_num; 1489 u32 reg; 1490 1491 /* find what triggered the MDD event */ 1492 reg = rd32(hw, I40E_GL_MDET_TX); 1493 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 1494 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 1495 I40E_GL_MDET_TX_PF_NUM_SHIFT; 1496 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 1497 I40E_GL_MDET_TX_VF_NUM_SHIFT; 1498 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 1499 I40E_GL_MDET_TX_EVENT_SHIFT; 1500 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 1501 I40E_GL_MDET_TX_QUEUE_SHIFT; 1502 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 1503 mdd_detected = true; 1504 } 1505 1506 if (!mdd_detected) 1507 return; 1508 1509 reg = rd32(hw, I40E_PF_MDET_TX); 1510 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 1511 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 1512 pf_mdet_num = hw->pf_id; 1513 pf_mdd_detected = true; 1514 } 1515 1516 /* Check if MDD was caused by a VF */ 1517 for (int i = 0; i < pf->num_vfs; i++) { 1518 vf = &(pf->vfs[i]); 1519 reg = rd32(hw, I40E_VP_MDET_TX(i)); 1520 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 1521 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 1522 vp_mdet_num = i; 1523 vf->num_mdd_events++; 1524 vf_mdd_detected = true; 1525 } 1526 } 1527 1528 /* Print out an error message */ 1529 if (vf_mdd_detected && pf_mdd_detected) 1530 device_printf(dev, 1531 "Malicious Driver Detection event %d" 1532 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n", 1533 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num); 1534 else if (vf_mdd_detected && !pf_mdd_detected) 1535 device_printf(dev, 1536 "Malicious Driver Detection event %d" 1537 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n", 1538 event, queue, pf_num, vf_num, vp_mdet_num); 1539 else if (!vf_mdd_detected && pf_mdd_detected) 1540 device_printf(dev, 1541 "Malicious Driver Detection event %d" 1542 " on TX queue %d, pf number %d (PF-%d)\n", 1543 event, queue, pf_num, pf_mdet_num); 1544 /* Theoretically shouldn't happen */ 1545 else 1546 device_printf(dev, 1547 "TX Malicious Driver Detection event (unknown)\n"); 1548 } 1549 1550 static void 1551 ixl_handle_rx_mdd_event(struct ixl_pf *pf) 1552 { 1553 struct i40e_hw *hw = &pf->hw; 1554 device_t dev = pf->dev; 1555 struct ixl_vf *vf; 1556 bool mdd_detected = false; 1557 bool pf_mdd_detected = false; 1558 bool vf_mdd_detected = false; 1559 u16 queue; 1560 u8 pf_num, event; 1561 u8 pf_mdet_num, vp_mdet_num; 1562 u32 reg; 1563 1564 /* 1565 * GL_MDET_RX doesn't contain VF number information, unlike 1566 * GL_MDET_TX. 1567 */ 1568 reg = rd32(hw, I40E_GL_MDET_RX); 1569 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 1570 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 1571 I40E_GL_MDET_RX_FUNCTION_SHIFT; 1572 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 1573 I40E_GL_MDET_RX_EVENT_SHIFT; 1574 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 1575 I40E_GL_MDET_RX_QUEUE_SHIFT; 1576 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 1577 mdd_detected = true; 1578 } 1579 1580 if (!mdd_detected) 1581 return; 1582 1583 reg = rd32(hw, I40E_PF_MDET_RX); 1584 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 1585 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 1586 pf_mdet_num = hw->pf_id; 1587 pf_mdd_detected = true; 1588 } 1589 1590 /* Check if MDD was caused by a VF */ 1591 for (int i = 0; i < pf->num_vfs; i++) { 1592 vf = &(pf->vfs[i]); 1593 reg = rd32(hw, I40E_VP_MDET_RX(i)); 1594 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 1595 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 1596 vp_mdet_num = i; 1597 vf->num_mdd_events++; 1598 vf_mdd_detected = true; 1599 } 1600 } 1601 1602 /* Print out an error message */ 1603 if (vf_mdd_detected && pf_mdd_detected) 1604 device_printf(dev, 1605 "Malicious Driver Detection event %d" 1606 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n", 1607 event, queue, pf_num, pf_mdet_num, vp_mdet_num); 1608 else if (vf_mdd_detected && !pf_mdd_detected) 1609 device_printf(dev, 1610 "Malicious Driver Detection event %d" 1611 " on RX queue %d, pf number %d, (VF-%d)\n", 1612 event, queue, pf_num, vp_mdet_num); 1613 else if (!vf_mdd_detected && pf_mdd_detected) 1614 device_printf(dev, 1615 "Malicious Driver Detection event %d" 1616 " on RX queue %d, pf number %d (PF-%d)\n", 1617 event, queue, pf_num, pf_mdet_num); 1618 /* Theoretically shouldn't happen */ 1619 else 1620 device_printf(dev, 1621 "RX Malicious Driver Detection event (unknown)\n"); 1622 } 1623 1624 /** 1625 * ixl_handle_mdd_event 1626 * 1627 * Called from interrupt handler to identify possibly malicious vfs 1628 * (But also detects events from the PF, as well) 1629 **/ 1630 void 1631 ixl_handle_mdd_event(struct ixl_pf *pf) 1632 { 1633 struct i40e_hw *hw = &pf->hw; 1634 u32 reg; 1635 1636 /* 1637 * Handle both TX/RX because it's possible they could 1638 * both trigger in the same interrupt. 1639 */ 1640 ixl_handle_tx_mdd_event(pf); 1641 ixl_handle_rx_mdd_event(pf); 1642 1643 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING); 1644 1645 /* re-enable mdd interrupt cause */ 1646 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1647 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 1648 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1649 ixl_flush(hw); 1650 } 1651 1652 void 1653 ixl_enable_intr0(struct i40e_hw *hw) 1654 { 1655 u32 reg; 1656 1657 /* Use IXL_ITR_NONE so ITR isn't updated here */ 1658 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | 1659 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 1660 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 1661 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1662 } 1663 1664 void 1665 ixl_disable_intr0(struct i40e_hw *hw) 1666 { 1667 u32 reg; 1668 1669 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; 1670 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1671 ixl_flush(hw); 1672 } 1673 1674 void 1675 ixl_enable_queue(struct i40e_hw *hw, int id) 1676 { 1677 u32 reg; 1678 1679 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | 1680 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1681 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 1682 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1683 } 1684 1685 void 1686 ixl_disable_queue(struct i40e_hw *hw, int id) 1687 { 1688 u32 reg; 1689 1690 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1691 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1692 } 1693 1694 void 1695 ixl_handle_empr_reset(struct ixl_pf *pf) 1696 { 1697 struct ixl_vsi *vsi = &pf->vsi; 1698 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING); 1699 1700 ixl_prepare_for_reset(pf, is_up); 1701 /* 1702 * i40e_pf_reset checks the type of reset and acts 1703 * accordingly. If EMP or Core reset was performed 1704 * doing PF reset is not necessary and it sometimes 1705 * fails. 1706 */ 1707 ixl_pf_reset(pf); 1708 1709 if (!IXL_PF_IN_RECOVERY_MODE(pf) && 1710 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) { 1711 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 1712 device_printf(pf->dev, 1713 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 1714 pf->link_up = FALSE; 1715 ixl_update_link_status(pf); 1716 } 1717 1718 ixl_rebuild_hw_structs_after_reset(pf, is_up); 1719 1720 atomic_clear_32(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING); 1721 } 1722 1723 void 1724 ixl_update_stats_counters(struct ixl_pf *pf) 1725 { 1726 struct i40e_hw *hw = &pf->hw; 1727 struct ixl_vsi *vsi = &pf->vsi; 1728 struct ixl_vf *vf; 1729 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx; 1730 1731 struct i40e_hw_port_stats *nsd = &pf->stats; 1732 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 1733 1734 /* Update hw stats */ 1735 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 1736 pf->stat_offsets_loaded, 1737 &osd->crc_errors, &nsd->crc_errors); 1738 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 1739 pf->stat_offsets_loaded, 1740 &osd->illegal_bytes, &nsd->illegal_bytes); 1741 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 1742 I40E_GLPRT_GORCL(hw->port), 1743 pf->stat_offsets_loaded, 1744 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 1745 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 1746 I40E_GLPRT_GOTCL(hw->port), 1747 pf->stat_offsets_loaded, 1748 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 1749 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 1750 pf->stat_offsets_loaded, 1751 &osd->eth.rx_discards, 1752 &nsd->eth.rx_discards); 1753 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 1754 I40E_GLPRT_UPRCL(hw->port), 1755 pf->stat_offsets_loaded, 1756 &osd->eth.rx_unicast, 1757 &nsd->eth.rx_unicast); 1758 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 1759 I40E_GLPRT_UPTCL(hw->port), 1760 pf->stat_offsets_loaded, 1761 &osd->eth.tx_unicast, 1762 &nsd->eth.tx_unicast); 1763 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 1764 I40E_GLPRT_MPRCL(hw->port), 1765 pf->stat_offsets_loaded, 1766 &osd->eth.rx_multicast, 1767 &nsd->eth.rx_multicast); 1768 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 1769 I40E_GLPRT_MPTCL(hw->port), 1770 pf->stat_offsets_loaded, 1771 &osd->eth.tx_multicast, 1772 &nsd->eth.tx_multicast); 1773 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 1774 I40E_GLPRT_BPRCL(hw->port), 1775 pf->stat_offsets_loaded, 1776 &osd->eth.rx_broadcast, 1777 &nsd->eth.rx_broadcast); 1778 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 1779 I40E_GLPRT_BPTCL(hw->port), 1780 pf->stat_offsets_loaded, 1781 &osd->eth.tx_broadcast, 1782 &nsd->eth.tx_broadcast); 1783 1784 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 1785 pf->stat_offsets_loaded, 1786 &osd->tx_dropped_link_down, 1787 &nsd->tx_dropped_link_down); 1788 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 1789 pf->stat_offsets_loaded, 1790 &osd->mac_local_faults, 1791 &nsd->mac_local_faults); 1792 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 1793 pf->stat_offsets_loaded, 1794 &osd->mac_remote_faults, 1795 &nsd->mac_remote_faults); 1796 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 1797 pf->stat_offsets_loaded, 1798 &osd->rx_length_errors, 1799 &nsd->rx_length_errors); 1800 1801 /* Flow control (LFC) stats */ 1802 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 1803 pf->stat_offsets_loaded, 1804 &osd->link_xon_rx, &nsd->link_xon_rx); 1805 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 1806 pf->stat_offsets_loaded, 1807 &osd->link_xon_tx, &nsd->link_xon_tx); 1808 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 1809 pf->stat_offsets_loaded, 1810 &osd->link_xoff_rx, &nsd->link_xoff_rx); 1811 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 1812 pf->stat_offsets_loaded, 1813 &osd->link_xoff_tx, &nsd->link_xoff_tx); 1814 1815 /* 1816 * For watchdog management we need to know if we have been paused 1817 * during the last interval, so capture that here. 1818 */ 1819 if (pf->stats.link_xoff_rx != prev_link_xoff_rx) 1820 vsi->shared->isc_pause_frames = 1; 1821 1822 /* Packet size stats rx */ 1823 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 1824 I40E_GLPRT_PRC64L(hw->port), 1825 pf->stat_offsets_loaded, 1826 &osd->rx_size_64, &nsd->rx_size_64); 1827 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 1828 I40E_GLPRT_PRC127L(hw->port), 1829 pf->stat_offsets_loaded, 1830 &osd->rx_size_127, &nsd->rx_size_127); 1831 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 1832 I40E_GLPRT_PRC255L(hw->port), 1833 pf->stat_offsets_loaded, 1834 &osd->rx_size_255, &nsd->rx_size_255); 1835 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 1836 I40E_GLPRT_PRC511L(hw->port), 1837 pf->stat_offsets_loaded, 1838 &osd->rx_size_511, &nsd->rx_size_511); 1839 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 1840 I40E_GLPRT_PRC1023L(hw->port), 1841 pf->stat_offsets_loaded, 1842 &osd->rx_size_1023, &nsd->rx_size_1023); 1843 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 1844 I40E_GLPRT_PRC1522L(hw->port), 1845 pf->stat_offsets_loaded, 1846 &osd->rx_size_1522, &nsd->rx_size_1522); 1847 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 1848 I40E_GLPRT_PRC9522L(hw->port), 1849 pf->stat_offsets_loaded, 1850 &osd->rx_size_big, &nsd->rx_size_big); 1851 1852 /* Packet size stats tx */ 1853 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 1854 I40E_GLPRT_PTC64L(hw->port), 1855 pf->stat_offsets_loaded, 1856 &osd->tx_size_64, &nsd->tx_size_64); 1857 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 1858 I40E_GLPRT_PTC127L(hw->port), 1859 pf->stat_offsets_loaded, 1860 &osd->tx_size_127, &nsd->tx_size_127); 1861 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 1862 I40E_GLPRT_PTC255L(hw->port), 1863 pf->stat_offsets_loaded, 1864 &osd->tx_size_255, &nsd->tx_size_255); 1865 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 1866 I40E_GLPRT_PTC511L(hw->port), 1867 pf->stat_offsets_loaded, 1868 &osd->tx_size_511, &nsd->tx_size_511); 1869 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 1870 I40E_GLPRT_PTC1023L(hw->port), 1871 pf->stat_offsets_loaded, 1872 &osd->tx_size_1023, &nsd->tx_size_1023); 1873 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 1874 I40E_GLPRT_PTC1522L(hw->port), 1875 pf->stat_offsets_loaded, 1876 &osd->tx_size_1522, &nsd->tx_size_1522); 1877 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 1878 I40E_GLPRT_PTC9522L(hw->port), 1879 pf->stat_offsets_loaded, 1880 &osd->tx_size_big, &nsd->tx_size_big); 1881 1882 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 1883 pf->stat_offsets_loaded, 1884 &osd->rx_undersize, &nsd->rx_undersize); 1885 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 1886 pf->stat_offsets_loaded, 1887 &osd->rx_fragments, &nsd->rx_fragments); 1888 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 1889 pf->stat_offsets_loaded, 1890 &osd->rx_oversize, &nsd->rx_oversize); 1891 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 1892 pf->stat_offsets_loaded, 1893 &osd->rx_jabber, &nsd->rx_jabber); 1894 pf->stat_offsets_loaded = true; 1895 /* End hw stats */ 1896 1897 /* Update vsi stats */ 1898 ixl_update_vsi_stats(vsi); 1899 1900 for (int i = 0; i < pf->num_vfs; i++) { 1901 vf = &pf->vfs[i]; 1902 if (vf->vf_flags & VF_FLAG_ENABLED) 1903 ixl_update_eth_stats(&pf->vfs[i].vsi); 1904 } 1905 } 1906 1907 /** 1908 * Update VSI-specific ethernet statistics counters. 1909 **/ 1910 void 1911 ixl_update_eth_stats(struct ixl_vsi *vsi) 1912 { 1913 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1914 struct i40e_hw *hw = &pf->hw; 1915 struct i40e_eth_stats *es; 1916 struct i40e_eth_stats *oes; 1917 u16 stat_idx = vsi->info.stat_counter_idx; 1918 1919 es = &vsi->eth_stats; 1920 oes = &vsi->eth_stats_offsets; 1921 1922 /* Gather up the stats that the hw collects */ 1923 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 1924 vsi->stat_offsets_loaded, 1925 &oes->tx_errors, &es->tx_errors); 1926 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 1927 vsi->stat_offsets_loaded, 1928 &oes->rx_discards, &es->rx_discards); 1929 1930 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 1931 I40E_GLV_GORCL(stat_idx), 1932 vsi->stat_offsets_loaded, 1933 &oes->rx_bytes, &es->rx_bytes); 1934 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 1935 I40E_GLV_UPRCL(stat_idx), 1936 vsi->stat_offsets_loaded, 1937 &oes->rx_unicast, &es->rx_unicast); 1938 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 1939 I40E_GLV_MPRCL(stat_idx), 1940 vsi->stat_offsets_loaded, 1941 &oes->rx_multicast, &es->rx_multicast); 1942 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 1943 I40E_GLV_BPRCL(stat_idx), 1944 vsi->stat_offsets_loaded, 1945 &oes->rx_broadcast, &es->rx_broadcast); 1946 1947 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 1948 I40E_GLV_GOTCL(stat_idx), 1949 vsi->stat_offsets_loaded, 1950 &oes->tx_bytes, &es->tx_bytes); 1951 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 1952 I40E_GLV_UPTCL(stat_idx), 1953 vsi->stat_offsets_loaded, 1954 &oes->tx_unicast, &es->tx_unicast); 1955 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 1956 I40E_GLV_MPTCL(stat_idx), 1957 vsi->stat_offsets_loaded, 1958 &oes->tx_multicast, &es->tx_multicast); 1959 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 1960 I40E_GLV_BPTCL(stat_idx), 1961 vsi->stat_offsets_loaded, 1962 &oes->tx_broadcast, &es->tx_broadcast); 1963 vsi->stat_offsets_loaded = true; 1964 } 1965 1966 void 1967 ixl_update_vsi_stats(struct ixl_vsi *vsi) 1968 { 1969 struct ixl_pf *pf; 1970 struct ifnet *ifp; 1971 struct i40e_eth_stats *es; 1972 u64 tx_discards; 1973 1974 struct i40e_hw_port_stats *nsd; 1975 1976 pf = vsi->back; 1977 ifp = vsi->ifp; 1978 es = &vsi->eth_stats; 1979 nsd = &pf->stats; 1980 1981 ixl_update_eth_stats(vsi); 1982 1983 tx_discards = es->tx_discards + nsd->tx_dropped_link_down; 1984 1985 /* Update ifnet stats */ 1986 IXL_SET_IPACKETS(vsi, es->rx_unicast + 1987 es->rx_multicast + 1988 es->rx_broadcast); 1989 IXL_SET_OPACKETS(vsi, es->tx_unicast + 1990 es->tx_multicast + 1991 es->tx_broadcast); 1992 IXL_SET_IBYTES(vsi, es->rx_bytes); 1993 IXL_SET_OBYTES(vsi, es->tx_bytes); 1994 IXL_SET_IMCASTS(vsi, es->rx_multicast); 1995 IXL_SET_OMCASTS(vsi, es->tx_multicast); 1996 1997 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + 1998 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments + 1999 nsd->rx_jabber); 2000 IXL_SET_OERRORS(vsi, es->tx_errors); 2001 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); 2002 IXL_SET_OQDROPS(vsi, tx_discards); 2003 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); 2004 IXL_SET_COLLISIONS(vsi, 0); 2005 } 2006 2007 /** 2008 * Reset all of the stats for the given pf 2009 **/ 2010 void 2011 ixl_pf_reset_stats(struct ixl_pf *pf) 2012 { 2013 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); 2014 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); 2015 pf->stat_offsets_loaded = false; 2016 } 2017 2018 /** 2019 * Resets all stats of the given vsi 2020 **/ 2021 void 2022 ixl_vsi_reset_stats(struct ixl_vsi *vsi) 2023 { 2024 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); 2025 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); 2026 vsi->stat_offsets_loaded = false; 2027 } 2028 2029 /** 2030 * Read and update a 48 bit stat from the hw 2031 * 2032 * Since the device stats are not reset at PFReset, they likely will not 2033 * be zeroed when the driver starts. We'll save the first values read 2034 * and use them as offsets to be subtracted from the raw values in order 2035 * to report stats that count from zero. 2036 **/ 2037 void 2038 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 2039 bool offset_loaded, u64 *offset, u64 *stat) 2040 { 2041 u64 new_data; 2042 2043 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__) 2044 new_data = rd64(hw, loreg); 2045 #else 2046 /* 2047 * Use two rd32's instead of one rd64; FreeBSD versions before 2048 * 10 don't support 64-bit bus reads/writes. 2049 */ 2050 new_data = rd32(hw, loreg); 2051 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 2052 #endif 2053 2054 if (!offset_loaded) 2055 *offset = new_data; 2056 if (new_data >= *offset) 2057 *stat = new_data - *offset; 2058 else 2059 *stat = (new_data + ((u64)1 << 48)) - *offset; 2060 *stat &= 0xFFFFFFFFFFFFULL; 2061 } 2062 2063 /** 2064 * Read and update a 32 bit stat from the hw 2065 **/ 2066 void 2067 ixl_stat_update32(struct i40e_hw *hw, u32 reg, 2068 bool offset_loaded, u64 *offset, u64 *stat) 2069 { 2070 u32 new_data; 2071 2072 new_data = rd32(hw, reg); 2073 if (!offset_loaded) 2074 *offset = new_data; 2075 if (new_data >= *offset) 2076 *stat = (u32)(new_data - *offset); 2077 else 2078 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 2079 } 2080 2081 /** 2082 * Add subset of device sysctls safe to use in recovery mode 2083 */ 2084 void 2085 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf) 2086 { 2087 device_t dev = pf->dev; 2088 2089 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2090 struct sysctl_oid_list *ctx_list = 2091 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2092 2093 struct sysctl_oid *debug_node; 2094 struct sysctl_oid_list *debug_list; 2095 2096 SYSCTL_ADD_PROC(ctx, ctx_list, 2097 OID_AUTO, "fw_version", 2098 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2099 ixl_sysctl_show_fw, "A", "Firmware version"); 2100 2101 /* Add sysctls meant to print debug information, but don't list them 2102 * in "sysctl -a" output. */ 2103 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2104 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2105 "Debug Sysctls"); 2106 debug_list = SYSCTL_CHILDREN(debug_node); 2107 2108 SYSCTL_ADD_UINT(ctx, debug_list, 2109 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2110 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2111 2112 SYSCTL_ADD_UINT(ctx, debug_list, 2113 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2114 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2115 2116 SYSCTL_ADD_PROC(ctx, debug_list, 2117 OID_AUTO, "dump_debug_data", 2118 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2119 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2120 2121 SYSCTL_ADD_PROC(ctx, debug_list, 2122 OID_AUTO, "do_pf_reset", 2123 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2124 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2125 2126 SYSCTL_ADD_PROC(ctx, debug_list, 2127 OID_AUTO, "do_core_reset", 2128 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2129 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2130 2131 SYSCTL_ADD_PROC(ctx, debug_list, 2132 OID_AUTO, "do_global_reset", 2133 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2134 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2135 2136 SYSCTL_ADD_PROC(ctx, debug_list, 2137 OID_AUTO, "queue_interrupt_table", 2138 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2139 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2140 } 2141 2142 void 2143 ixl_add_device_sysctls(struct ixl_pf *pf) 2144 { 2145 device_t dev = pf->dev; 2146 struct i40e_hw *hw = &pf->hw; 2147 2148 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2149 struct sysctl_oid_list *ctx_list = 2150 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2151 2152 struct sysctl_oid *debug_node; 2153 struct sysctl_oid_list *debug_list; 2154 2155 struct sysctl_oid *fec_node; 2156 struct sysctl_oid_list *fec_list; 2157 2158 /* Set up sysctls */ 2159 SYSCTL_ADD_PROC(ctx, ctx_list, 2160 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2161 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); 2162 2163 SYSCTL_ADD_PROC(ctx, ctx_list, 2164 OID_AUTO, "advertise_speed", 2165 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2166 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); 2167 2168 SYSCTL_ADD_PROC(ctx, ctx_list, 2169 OID_AUTO, "supported_speeds", 2170 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2171 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); 2172 2173 SYSCTL_ADD_PROC(ctx, ctx_list, 2174 OID_AUTO, "current_speed", 2175 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2176 ixl_sysctl_current_speed, "A", "Current Port Speed"); 2177 2178 SYSCTL_ADD_PROC(ctx, ctx_list, 2179 OID_AUTO, "fw_version", 2180 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2181 ixl_sysctl_show_fw, "A", "Firmware version"); 2182 2183 SYSCTL_ADD_PROC(ctx, ctx_list, 2184 OID_AUTO, "unallocated_queues", 2185 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2186 ixl_sysctl_unallocated_queues, "I", 2187 "Queues not allocated to a PF or VF"); 2188 2189 SYSCTL_ADD_PROC(ctx, ctx_list, 2190 OID_AUTO, "tx_itr", 2191 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2192 ixl_sysctl_pf_tx_itr, "I", 2193 "Immediately set TX ITR value for all queues"); 2194 2195 SYSCTL_ADD_PROC(ctx, ctx_list, 2196 OID_AUTO, "rx_itr", 2197 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2198 ixl_sysctl_pf_rx_itr, "I", 2199 "Immediately set RX ITR value for all queues"); 2200 2201 SYSCTL_ADD_INT(ctx, ctx_list, 2202 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, 2203 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); 2204 2205 SYSCTL_ADD_INT(ctx, ctx_list, 2206 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, 2207 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); 2208 2209 /* Add FEC sysctls for 25G adapters */ 2210 if (i40e_is_25G_device(hw->device_id)) { 2211 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2212 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2213 "FEC Sysctls"); 2214 fec_list = SYSCTL_CHILDREN(fec_node); 2215 2216 SYSCTL_ADD_PROC(ctx, fec_list, 2217 OID_AUTO, "fc_ability", 2218 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2219 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); 2220 2221 SYSCTL_ADD_PROC(ctx, fec_list, 2222 OID_AUTO, "rs_ability", 2223 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2224 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); 2225 2226 SYSCTL_ADD_PROC(ctx, fec_list, 2227 OID_AUTO, "fc_requested", 2228 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2229 ixl_sysctl_fec_fc_request, "I", 2230 "FC FEC mode requested on link"); 2231 2232 SYSCTL_ADD_PROC(ctx, fec_list, 2233 OID_AUTO, "rs_requested", 2234 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2235 ixl_sysctl_fec_rs_request, "I", 2236 "RS FEC mode requested on link"); 2237 2238 SYSCTL_ADD_PROC(ctx, fec_list, 2239 OID_AUTO, "auto_fec_enabled", 2240 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2241 ixl_sysctl_fec_auto_enable, "I", 2242 "Let FW decide FEC ability/request modes"); 2243 } 2244 2245 SYSCTL_ADD_PROC(ctx, ctx_list, 2246 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2247 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); 2248 2249 /* Add sysctls meant to print debug information, but don't list them 2250 * in "sysctl -a" output. */ 2251 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2252 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2253 "Debug Sysctls"); 2254 debug_list = SYSCTL_CHILDREN(debug_node); 2255 2256 SYSCTL_ADD_UINT(ctx, debug_list, 2257 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2258 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2259 2260 SYSCTL_ADD_UINT(ctx, debug_list, 2261 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2262 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2263 2264 SYSCTL_ADD_PROC(ctx, debug_list, 2265 OID_AUTO, "link_status", 2266 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2267 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); 2268 2269 SYSCTL_ADD_PROC(ctx, debug_list, 2270 OID_AUTO, "phy_abilities", 2271 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2272 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); 2273 2274 SYSCTL_ADD_PROC(ctx, debug_list, 2275 OID_AUTO, "filter_list", 2276 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2277 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); 2278 2279 SYSCTL_ADD_PROC(ctx, debug_list, 2280 OID_AUTO, "hw_res_alloc", 2281 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2282 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); 2283 2284 SYSCTL_ADD_PROC(ctx, debug_list, 2285 OID_AUTO, "switch_config", 2286 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2287 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); 2288 2289 SYSCTL_ADD_PROC(ctx, debug_list, 2290 OID_AUTO, "rss_key", 2291 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2292 pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); 2293 2294 SYSCTL_ADD_PROC(ctx, debug_list, 2295 OID_AUTO, "rss_lut", 2296 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2297 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); 2298 2299 SYSCTL_ADD_PROC(ctx, debug_list, 2300 OID_AUTO, "rss_hena", 2301 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2302 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); 2303 2304 SYSCTL_ADD_PROC(ctx, debug_list, 2305 OID_AUTO, "disable_fw_link_management", 2306 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2307 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); 2308 2309 SYSCTL_ADD_PROC(ctx, debug_list, 2310 OID_AUTO, "dump_debug_data", 2311 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2312 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2313 2314 SYSCTL_ADD_PROC(ctx, debug_list, 2315 OID_AUTO, "do_pf_reset", 2316 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2317 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2318 2319 SYSCTL_ADD_PROC(ctx, debug_list, 2320 OID_AUTO, "do_core_reset", 2321 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2322 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2323 2324 SYSCTL_ADD_PROC(ctx, debug_list, 2325 OID_AUTO, "do_global_reset", 2326 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2327 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2328 2329 SYSCTL_ADD_PROC(ctx, debug_list, 2330 OID_AUTO, "queue_interrupt_table", 2331 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2332 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2333 2334 if (pf->has_i2c) { 2335 SYSCTL_ADD_PROC(ctx, debug_list, 2336 OID_AUTO, "read_i2c_byte", 2337 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2338 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C); 2339 2340 SYSCTL_ADD_PROC(ctx, debug_list, 2341 OID_AUTO, "write_i2c_byte", 2342 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2343 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C); 2344 2345 SYSCTL_ADD_PROC(ctx, debug_list, 2346 OID_AUTO, "read_i2c_diag_data", 2347 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2348 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); 2349 } 2350 } 2351 2352 /* 2353 * Primarily for finding out how many queues can be assigned to VFs, 2354 * at runtime. 2355 */ 2356 static int 2357 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) 2358 { 2359 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2360 int queues; 2361 2362 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); 2363 2364 return sysctl_handle_int(oidp, NULL, queues, req); 2365 } 2366 2367 static const char * 2368 ixl_link_speed_string(enum i40e_aq_link_speed link_speed) 2369 { 2370 const char * link_speed_str[] = { 2371 "Unknown", 2372 "100 Mbps", 2373 "1 Gbps", 2374 "10 Gbps", 2375 "40 Gbps", 2376 "20 Gbps", 2377 "25 Gbps", 2378 }; 2379 int index; 2380 2381 switch (link_speed) { 2382 case I40E_LINK_SPEED_100MB: 2383 index = 1; 2384 break; 2385 case I40E_LINK_SPEED_1GB: 2386 index = 2; 2387 break; 2388 case I40E_LINK_SPEED_10GB: 2389 index = 3; 2390 break; 2391 case I40E_LINK_SPEED_40GB: 2392 index = 4; 2393 break; 2394 case I40E_LINK_SPEED_20GB: 2395 index = 5; 2396 break; 2397 case I40E_LINK_SPEED_25GB: 2398 index = 6; 2399 break; 2400 case I40E_LINK_SPEED_UNKNOWN: 2401 default: 2402 index = 0; 2403 break; 2404 } 2405 2406 return (link_speed_str[index]); 2407 } 2408 2409 int 2410 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2411 { 2412 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2413 struct i40e_hw *hw = &pf->hw; 2414 int error = 0; 2415 2416 ixl_update_link_status(pf); 2417 2418 error = sysctl_handle_string(oidp, 2419 __DECONST(void *, 2420 ixl_link_speed_string(hw->phy.link_info.link_speed)), 2421 8, req); 2422 2423 return (error); 2424 } 2425 2426 /* 2427 * Converts 8-bit speeds value to and from sysctl flags and 2428 * Admin Queue flags. 2429 */ 2430 static u8 2431 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) 2432 { 2433 #define SPEED_MAP_SIZE 6 2434 static u16 speedmap[SPEED_MAP_SIZE] = { 2435 (I40E_LINK_SPEED_100MB | (0x1 << 8)), 2436 (I40E_LINK_SPEED_1GB | (0x2 << 8)), 2437 (I40E_LINK_SPEED_10GB | (0x4 << 8)), 2438 (I40E_LINK_SPEED_20GB | (0x8 << 8)), 2439 (I40E_LINK_SPEED_25GB | (0x10 << 8)), 2440 (I40E_LINK_SPEED_40GB | (0x20 << 8)) 2441 }; 2442 u8 retval = 0; 2443 2444 for (int i = 0; i < SPEED_MAP_SIZE; i++) { 2445 if (to_aq) 2446 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; 2447 else 2448 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; 2449 } 2450 2451 return (retval); 2452 } 2453 2454 int 2455 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) 2456 { 2457 struct i40e_hw *hw = &pf->hw; 2458 device_t dev = pf->dev; 2459 struct i40e_aq_get_phy_abilities_resp abilities; 2460 struct i40e_aq_set_phy_config config; 2461 enum i40e_status_code aq_error = 0; 2462 2463 /* Get current capability information */ 2464 aq_error = i40e_aq_get_phy_capabilities(hw, 2465 FALSE, FALSE, &abilities, NULL); 2466 if (aq_error) { 2467 device_printf(dev, 2468 "%s: Error getting phy capabilities %d," 2469 " aq error: %d\n", __func__, aq_error, 2470 hw->aq.asq_last_status); 2471 return (EIO); 2472 } 2473 2474 /* Prepare new config */ 2475 bzero(&config, sizeof(config)); 2476 if (from_aq) 2477 config.link_speed = speeds; 2478 else 2479 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); 2480 config.phy_type = abilities.phy_type; 2481 config.phy_type_ext = abilities.phy_type_ext; 2482 config.abilities = abilities.abilities 2483 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 2484 config.eee_capability = abilities.eee_capability; 2485 config.eeer = abilities.eeer_val; 2486 config.low_power_ctrl = abilities.d3_lpan; 2487 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 2488 & I40E_AQ_PHY_FEC_CONFIG_MASK; 2489 2490 /* Do aq command & restart link */ 2491 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 2492 if (aq_error) { 2493 device_printf(dev, 2494 "%s: Error setting new phy config %d," 2495 " aq error: %d\n", __func__, aq_error, 2496 hw->aq.asq_last_status); 2497 return (EIO); 2498 } 2499 2500 return (0); 2501 } 2502 2503 /* 2504 ** Supported link speeds 2505 ** Flags: 2506 ** 0x1 - 100 Mb 2507 ** 0x2 - 1G 2508 ** 0x4 - 10G 2509 ** 0x8 - 20G 2510 ** 0x10 - 25G 2511 ** 0x20 - 40G 2512 */ 2513 static int 2514 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) 2515 { 2516 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2517 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 2518 2519 return sysctl_handle_int(oidp, NULL, supported, req); 2520 } 2521 2522 /* 2523 ** Control link advertise speed: 2524 ** Flags: 2525 ** 0x1 - advertise 100 Mb 2526 ** 0x2 - advertise 1G 2527 ** 0x4 - advertise 10G 2528 ** 0x8 - advertise 20G 2529 ** 0x10 - advertise 25G 2530 ** 0x20 - advertise 40G 2531 ** 2532 ** Set to 0 to disable link 2533 */ 2534 int 2535 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) 2536 { 2537 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2538 device_t dev = pf->dev; 2539 u8 converted_speeds; 2540 int requested_ls = 0; 2541 int error = 0; 2542 2543 /* Read in new mode */ 2544 requested_ls = pf->advertised_speed; 2545 error = sysctl_handle_int(oidp, &requested_ls, 0, req); 2546 if ((error) || (req->newptr == NULL)) 2547 return (error); 2548 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 2549 device_printf(dev, "Interface is currently in FW recovery mode. " 2550 "Setting advertise speed not supported\n"); 2551 return (EINVAL); 2552 } 2553 2554 /* Error out if bits outside of possible flag range are set */ 2555 if ((requested_ls & ~((u8)0x3F)) != 0) { 2556 device_printf(dev, "Input advertised speed out of range; " 2557 "valid flags are: 0x%02x\n", 2558 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2559 return (EINVAL); 2560 } 2561 2562 /* Check if adapter supports input value */ 2563 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); 2564 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { 2565 device_printf(dev, "Invalid advertised speed; " 2566 "valid flags are: 0x%02x\n", 2567 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2568 return (EINVAL); 2569 } 2570 2571 error = ixl_set_advertised_speeds(pf, requested_ls, false); 2572 if (error) 2573 return (error); 2574 2575 pf->advertised_speed = requested_ls; 2576 ixl_update_link_status(pf); 2577 return (0); 2578 } 2579 2580 /* 2581 * Input: bitmap of enum i40e_aq_link_speed 2582 */ 2583 u64 2584 ixl_max_aq_speed_to_value(u8 link_speeds) 2585 { 2586 if (link_speeds & I40E_LINK_SPEED_40GB) 2587 return IF_Gbps(40); 2588 if (link_speeds & I40E_LINK_SPEED_25GB) 2589 return IF_Gbps(25); 2590 if (link_speeds & I40E_LINK_SPEED_20GB) 2591 return IF_Gbps(20); 2592 if (link_speeds & I40E_LINK_SPEED_10GB) 2593 return IF_Gbps(10); 2594 if (link_speeds & I40E_LINK_SPEED_1GB) 2595 return IF_Gbps(1); 2596 if (link_speeds & I40E_LINK_SPEED_100MB) 2597 return IF_Mbps(100); 2598 else 2599 /* Minimum supported link speed */ 2600 return IF_Mbps(100); 2601 } 2602 2603 /* 2604 ** Get the width and transaction speed of 2605 ** the bus this adapter is plugged into. 2606 */ 2607 void 2608 ixl_get_bus_info(struct ixl_pf *pf) 2609 { 2610 struct i40e_hw *hw = &pf->hw; 2611 device_t dev = pf->dev; 2612 u16 link; 2613 u32 offset, num_ports; 2614 u64 max_speed; 2615 2616 /* Some devices don't use PCIE */ 2617 if (hw->mac.type == I40E_MAC_X722) 2618 return; 2619 2620 /* Read PCI Express Capabilities Link Status Register */ 2621 pci_find_cap(dev, PCIY_EXPRESS, &offset); 2622 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2623 2624 /* Fill out hw struct with PCIE info */ 2625 i40e_set_pci_config_data(hw, link); 2626 2627 /* Use info to print out bandwidth messages */ 2628 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 2629 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": 2630 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": 2631 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), 2632 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : 2633 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : 2634 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : 2635 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : 2636 ("Unknown")); 2637 2638 /* 2639 * If adapter is in slot with maximum supported speed, 2640 * no warning message needs to be printed out. 2641 */ 2642 if (hw->bus.speed >= i40e_bus_speed_8000 2643 && hw->bus.width >= i40e_bus_width_pcie_x8) 2644 return; 2645 2646 num_ports = bitcount32(hw->func_caps.valid_functions); 2647 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; 2648 2649 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { 2650 device_printf(dev, "PCI-Express bandwidth available" 2651 " for this device may be insufficient for" 2652 " optimal performance.\n"); 2653 device_printf(dev, "Please move the device to a different" 2654 " PCI-e link with more lanes and/or higher" 2655 " transfer rate.\n"); 2656 } 2657 } 2658 2659 static int 2660 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 2661 { 2662 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2663 struct i40e_hw *hw = &pf->hw; 2664 struct sbuf *sbuf; 2665 2666 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2667 ixl_nvm_version_str(hw, sbuf); 2668 sbuf_finish(sbuf); 2669 sbuf_delete(sbuf); 2670 2671 return (0); 2672 } 2673 2674 void 2675 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) 2676 { 2677 u8 nvma_ptr = nvma->config & 0xFF; 2678 u8 nvma_flags = (nvma->config & 0xF00) >> 8; 2679 const char * cmd_str; 2680 2681 switch (nvma->command) { 2682 case I40E_NVM_READ: 2683 if (nvma_ptr == 0xF && nvma_flags == 0xF && 2684 nvma->offset == 0 && nvma->data_size == 1) { 2685 device_printf(dev, "NVMUPD: Get Driver Status Command\n"); 2686 return; 2687 } 2688 cmd_str = "READ "; 2689 break; 2690 case I40E_NVM_WRITE: 2691 cmd_str = "WRITE"; 2692 break; 2693 default: 2694 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command); 2695 return; 2696 } 2697 device_printf(dev, 2698 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n", 2699 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size); 2700 } 2701 2702 int 2703 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) 2704 { 2705 struct i40e_hw *hw = &pf->hw; 2706 struct i40e_nvm_access *nvma; 2707 device_t dev = pf->dev; 2708 enum i40e_status_code status = 0; 2709 size_t nvma_size, ifd_len, exp_len; 2710 int err, perrno; 2711 2712 DEBUGFUNC("ixl_handle_nvmupd_cmd"); 2713 2714 /* Sanity checks */ 2715 nvma_size = sizeof(struct i40e_nvm_access); 2716 ifd_len = ifd->ifd_len; 2717 2718 if (ifd_len < nvma_size || 2719 ifd->ifd_data == NULL) { 2720 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", 2721 __func__); 2722 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", 2723 __func__, ifd_len, nvma_size); 2724 device_printf(dev, "%s: data pointer: %p\n", __func__, 2725 ifd->ifd_data); 2726 return (EINVAL); 2727 } 2728 2729 nvma = malloc(ifd_len, M_IXL, M_WAITOK); 2730 err = copyin(ifd->ifd_data, nvma, ifd_len); 2731 if (err) { 2732 device_printf(dev, "%s: Cannot get request from user space\n", 2733 __func__); 2734 free(nvma, M_IXL); 2735 return (err); 2736 } 2737 2738 if (pf->dbg_mask & IXL_DBG_NVMUPD) 2739 ixl_print_nvm_cmd(dev, nvma); 2740 2741 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) { 2742 int count = 0; 2743 while (count++ < 100) { 2744 i40e_msec_delay(100); 2745 if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING)) 2746 break; 2747 } 2748 } 2749 2750 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) { 2751 device_printf(dev, 2752 "%s: timeout waiting for EMP reset to finish\n", 2753 __func__); 2754 free(nvma, M_IXL); 2755 return (-EBUSY); 2756 } 2757 2758 if (nvma->data_size < 1 || nvma->data_size > 4096) { 2759 device_printf(dev, 2760 "%s: invalid request, data size not in supported range\n", 2761 __func__); 2762 free(nvma, M_IXL); 2763 return (EINVAL); 2764 } 2765 2766 /* 2767 * Older versions of the NVM update tool don't set ifd_len to the size 2768 * of the entire buffer passed to the ioctl. Check the data_size field 2769 * in the contained i40e_nvm_access struct and ensure everything is 2770 * copied in from userspace. 2771 */ 2772 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */ 2773 2774 if (ifd_len < exp_len) { 2775 ifd_len = exp_len; 2776 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK); 2777 err = copyin(ifd->ifd_data, nvma, ifd_len); 2778 if (err) { 2779 device_printf(dev, "%s: Cannot get request from user space\n", 2780 __func__); 2781 free(nvma, M_IXL); 2782 return (err); 2783 } 2784 } 2785 2786 // TODO: Might need a different lock here 2787 // IXL_PF_LOCK(pf); 2788 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); 2789 // IXL_PF_UNLOCK(pf); 2790 2791 err = copyout(nvma, ifd->ifd_data, ifd_len); 2792 free(nvma, M_IXL); 2793 if (err) { 2794 device_printf(dev, "%s: Cannot return data to user space\n", 2795 __func__); 2796 return (err); 2797 } 2798 2799 /* Let the nvmupdate report errors, show them only when debug is enabled */ 2800 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) 2801 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", 2802 i40e_stat_str(hw, status), perrno); 2803 2804 /* 2805 * -EPERM is actually ERESTART, which the kernel interprets as it needing 2806 * to run this ioctl again. So use -EACCES for -EPERM instead. 2807 */ 2808 if (perrno == -EPERM) 2809 return (-EACCES); 2810 else 2811 return (perrno); 2812 } 2813 2814 int 2815 ixl_find_i2c_interface(struct ixl_pf *pf) 2816 { 2817 struct i40e_hw *hw = &pf->hw; 2818 bool i2c_en, port_matched; 2819 u32 reg; 2820 2821 for (int i = 0; i < 4; i++) { 2822 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); 2823 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); 2824 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) 2825 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) 2826 & BIT(hw->port); 2827 if (i2c_en && port_matched) 2828 return (i); 2829 } 2830 2831 return (-1); 2832 } 2833 2834 static char * 2835 ixl_phy_type_string(u32 bit_pos, bool ext) 2836 { 2837 static char * phy_types_str[32] = { 2838 "SGMII", 2839 "1000BASE-KX", 2840 "10GBASE-KX4", 2841 "10GBASE-KR", 2842 "40GBASE-KR4", 2843 "XAUI", 2844 "XFI", 2845 "SFI", 2846 "XLAUI", 2847 "XLPPI", 2848 "40GBASE-CR4", 2849 "10GBASE-CR1", 2850 "SFP+ Active DA", 2851 "QSFP+ Active DA", 2852 "Reserved (14)", 2853 "Reserved (15)", 2854 "Reserved (16)", 2855 "100BASE-TX", 2856 "1000BASE-T", 2857 "10GBASE-T", 2858 "10GBASE-SR", 2859 "10GBASE-LR", 2860 "10GBASE-SFP+Cu", 2861 "10GBASE-CR1", 2862 "40GBASE-CR4", 2863 "40GBASE-SR4", 2864 "40GBASE-LR4", 2865 "1000BASE-SX", 2866 "1000BASE-LX", 2867 "1000BASE-T Optical", 2868 "20GBASE-KR2", 2869 "Reserved (31)" 2870 }; 2871 static char * ext_phy_types_str[8] = { 2872 "25GBASE-KR", 2873 "25GBASE-CR", 2874 "25GBASE-SR", 2875 "25GBASE-LR", 2876 "25GBASE-AOC", 2877 "25GBASE-ACC", 2878 "Reserved (6)", 2879 "Reserved (7)" 2880 }; 2881 2882 if (ext && bit_pos > 7) return "Invalid_Ext"; 2883 if (bit_pos > 31) return "Invalid"; 2884 2885 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; 2886 } 2887 2888 /* TODO: ERJ: I don't this is necessary anymore. */ 2889 int 2890 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) 2891 { 2892 device_t dev = pf->dev; 2893 struct i40e_hw *hw = &pf->hw; 2894 struct i40e_aq_desc desc; 2895 enum i40e_status_code status; 2896 2897 struct i40e_aqc_get_link_status *aq_link_status = 2898 (struct i40e_aqc_get_link_status *)&desc.params.raw; 2899 2900 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 2901 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); 2902 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 2903 if (status) { 2904 device_printf(dev, 2905 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", 2906 __func__, i40e_stat_str(hw, status), 2907 i40e_aq_str(hw, hw->aq.asq_last_status)); 2908 return (EIO); 2909 } 2910 2911 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); 2912 return (0); 2913 } 2914 2915 static char * 2916 ixl_phy_type_string_ls(u8 val) 2917 { 2918 if (val >= 0x1F) 2919 return ixl_phy_type_string(val - 0x1F, true); 2920 else 2921 return ixl_phy_type_string(val, false); 2922 } 2923 2924 static int 2925 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) 2926 { 2927 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2928 device_t dev = pf->dev; 2929 struct sbuf *buf; 2930 int error = 0; 2931 2932 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2933 if (!buf) { 2934 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 2935 return (ENOMEM); 2936 } 2937 2938 struct i40e_aqc_get_link_status link_status; 2939 error = ixl_aq_get_link_status(pf, &link_status); 2940 if (error) { 2941 sbuf_delete(buf); 2942 return (error); 2943 } 2944 2945 sbuf_printf(buf, "\n" 2946 "PHY Type : 0x%02x<%s>\n" 2947 "Speed : 0x%02x\n" 2948 "Link info: 0x%02x\n" 2949 "AN info : 0x%02x\n" 2950 "Ext info : 0x%02x\n" 2951 "Loopback : 0x%02x\n" 2952 "Max Frame: %d\n" 2953 "Config : 0x%02x\n" 2954 "Power : 0x%02x", 2955 link_status.phy_type, 2956 ixl_phy_type_string_ls(link_status.phy_type), 2957 link_status.link_speed, 2958 link_status.link_info, 2959 link_status.an_info, 2960 link_status.ext_info, 2961 link_status.loopback, 2962 link_status.max_frame_size, 2963 link_status.config, 2964 link_status.power_desc); 2965 2966 error = sbuf_finish(buf); 2967 if (error) 2968 device_printf(dev, "Error finishing sbuf: %d\n", error); 2969 2970 sbuf_delete(buf); 2971 return (error); 2972 } 2973 2974 static int 2975 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) 2976 { 2977 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2978 struct i40e_hw *hw = &pf->hw; 2979 device_t dev = pf->dev; 2980 enum i40e_status_code status; 2981 struct i40e_aq_get_phy_abilities_resp abilities; 2982 struct sbuf *buf; 2983 int error = 0; 2984 2985 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2986 if (!buf) { 2987 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 2988 return (ENOMEM); 2989 } 2990 2991 status = i40e_aq_get_phy_capabilities(hw, 2992 FALSE, FALSE, &abilities, NULL); 2993 if (status) { 2994 device_printf(dev, 2995 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 2996 __func__, i40e_stat_str(hw, status), 2997 i40e_aq_str(hw, hw->aq.asq_last_status)); 2998 sbuf_delete(buf); 2999 return (EIO); 3000 } 3001 3002 sbuf_printf(buf, "\n" 3003 "PHY Type : %08x", 3004 abilities.phy_type); 3005 3006 if (abilities.phy_type != 0) { 3007 sbuf_printf(buf, "<"); 3008 for (int i = 0; i < 32; i++) 3009 if ((1 << i) & abilities.phy_type) 3010 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); 3011 sbuf_printf(buf, ">"); 3012 } 3013 3014 sbuf_printf(buf, "\nPHY Ext : %02x", 3015 abilities.phy_type_ext); 3016 3017 if (abilities.phy_type_ext != 0) { 3018 sbuf_printf(buf, "<"); 3019 for (int i = 0; i < 4; i++) 3020 if ((1 << i) & abilities.phy_type_ext) 3021 sbuf_printf(buf, "%s,", 3022 ixl_phy_type_string(i, true)); 3023 sbuf_printf(buf, ">"); 3024 } 3025 3026 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed); 3027 if (abilities.link_speed != 0) { 3028 u8 link_speed; 3029 sbuf_printf(buf, " <"); 3030 for (int i = 0; i < 8; i++) { 3031 link_speed = (1 << i) & abilities.link_speed; 3032 if (link_speed) 3033 sbuf_printf(buf, "%s, ", 3034 ixl_link_speed_string(link_speed)); 3035 } 3036 sbuf_printf(buf, ">"); 3037 } 3038 3039 sbuf_printf(buf, "\n" 3040 "Abilities: %02x\n" 3041 "EEE cap : %04x\n" 3042 "EEER reg : %08x\n" 3043 "D3 Lpan : %02x\n" 3044 "ID : %02x %02x %02x %02x\n" 3045 "ModType : %02x %02x %02x\n" 3046 "ModType E: %01x\n" 3047 "FEC Cfg : %02x\n" 3048 "Ext CC : %02x", 3049 abilities.abilities, abilities.eee_capability, 3050 abilities.eeer_val, abilities.d3_lpan, 3051 abilities.phy_id[0], abilities.phy_id[1], 3052 abilities.phy_id[2], abilities.phy_id[3], 3053 abilities.module_type[0], abilities.module_type[1], 3054 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, 3055 abilities.fec_cfg_curr_mod_ext_info & 0x1F, 3056 abilities.ext_comp_code); 3057 3058 error = sbuf_finish(buf); 3059 if (error) 3060 device_printf(dev, "Error finishing sbuf: %d\n", error); 3061 3062 sbuf_delete(buf); 3063 return (error); 3064 } 3065 3066 static int 3067 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) 3068 { 3069 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3070 struct ixl_vsi *vsi = &pf->vsi; 3071 struct ixl_mac_filter *f; 3072 device_t dev = pf->dev; 3073 int error = 0, ftl_len = 0, ftl_counter = 0; 3074 3075 struct sbuf *buf; 3076 3077 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3078 if (!buf) { 3079 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3080 return (ENOMEM); 3081 } 3082 3083 sbuf_printf(buf, "\n"); 3084 3085 /* Print MAC filters */ 3086 sbuf_printf(buf, "PF Filters:\n"); 3087 SLIST_FOREACH(f, &vsi->ftl, next) 3088 ftl_len++; 3089 3090 if (ftl_len < 1) 3091 sbuf_printf(buf, "(none)\n"); 3092 else { 3093 SLIST_FOREACH(f, &vsi->ftl, next) { 3094 sbuf_printf(buf, 3095 MAC_FORMAT ", vlan %4d, flags %#06x", 3096 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3097 /* don't print '\n' for last entry */ 3098 if (++ftl_counter != ftl_len) 3099 sbuf_printf(buf, "\n"); 3100 } 3101 } 3102 3103 #ifdef PCI_IOV 3104 /* TODO: Give each VF its own filter list sysctl */ 3105 struct ixl_vf *vf; 3106 if (pf->num_vfs > 0) { 3107 sbuf_printf(buf, "\n\n"); 3108 for (int i = 0; i < pf->num_vfs; i++) { 3109 vf = &pf->vfs[i]; 3110 if (!(vf->vf_flags & VF_FLAG_ENABLED)) 3111 continue; 3112 3113 vsi = &vf->vsi; 3114 ftl_len = 0, ftl_counter = 0; 3115 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num); 3116 SLIST_FOREACH(f, &vsi->ftl, next) 3117 ftl_len++; 3118 3119 if (ftl_len < 1) 3120 sbuf_printf(buf, "(none)\n"); 3121 else { 3122 SLIST_FOREACH(f, &vsi->ftl, next) { 3123 sbuf_printf(buf, 3124 MAC_FORMAT ", vlan %4d, flags %#06x\n", 3125 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3126 } 3127 } 3128 } 3129 } 3130 #endif 3131 3132 error = sbuf_finish(buf); 3133 if (error) 3134 device_printf(dev, "Error finishing sbuf: %d\n", error); 3135 sbuf_delete(buf); 3136 3137 return (error); 3138 } 3139 3140 #define IXL_SW_RES_SIZE 0x14 3141 int 3142 ixl_res_alloc_cmp(const void *a, const void *b) 3143 { 3144 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; 3145 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; 3146 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; 3147 3148 return ((int)one->resource_type - (int)two->resource_type); 3149 } 3150 3151 /* 3152 * Longest string length: 25 3153 */ 3154 const char * 3155 ixl_switch_res_type_string(u8 type) 3156 { 3157 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = { 3158 "VEB", 3159 "VSI", 3160 "Perfect Match MAC address", 3161 "S-tag", 3162 "(Reserved)", 3163 "Multicast hash entry", 3164 "Unicast hash entry", 3165 "VLAN", 3166 "VSI List entry", 3167 "(Reserved)", 3168 "VLAN Statistic Pool", 3169 "Mirror Rule", 3170 "Queue Set", 3171 "Inner VLAN Forward filter", 3172 "(Reserved)", 3173 "Inner MAC", 3174 "IP", 3175 "GRE/VN1 Key", 3176 "VN2 Key", 3177 "Tunneling Port" 3178 }; 3179 3180 if (type < IXL_SW_RES_SIZE) 3181 return ixl_switch_res_type_strings[type]; 3182 else 3183 return "(Reserved)"; 3184 } 3185 3186 static int 3187 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) 3188 { 3189 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3190 struct i40e_hw *hw = &pf->hw; 3191 device_t dev = pf->dev; 3192 struct sbuf *buf; 3193 enum i40e_status_code status; 3194 int error = 0; 3195 3196 u8 num_entries; 3197 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; 3198 3199 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3200 if (!buf) { 3201 device_printf(dev, "Could not allocate sbuf for output.\n"); 3202 return (ENOMEM); 3203 } 3204 3205 bzero(resp, sizeof(resp)); 3206 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, 3207 resp, 3208 IXL_SW_RES_SIZE, 3209 NULL); 3210 if (status) { 3211 device_printf(dev, 3212 "%s: get_switch_resource_alloc() error %s, aq error %s\n", 3213 __func__, i40e_stat_str(hw, status), 3214 i40e_aq_str(hw, hw->aq.asq_last_status)); 3215 sbuf_delete(buf); 3216 return (error); 3217 } 3218 3219 /* Sort entries by type for display */ 3220 qsort(resp, num_entries, 3221 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), 3222 &ixl_res_alloc_cmp); 3223 3224 sbuf_cat(buf, "\n"); 3225 sbuf_printf(buf, "# of entries: %d\n", num_entries); 3226 sbuf_printf(buf, 3227 " Type | Guaranteed | Total | Used | Un-allocated\n" 3228 " | (this) | (all) | (this) | (all) \n"); 3229 for (int i = 0; i < num_entries; i++) { 3230 sbuf_printf(buf, 3231 "%25s | %10d %5d %6d %12d", 3232 ixl_switch_res_type_string(resp[i].resource_type), 3233 resp[i].guaranteed, 3234 resp[i].total, 3235 resp[i].used, 3236 resp[i].total_unalloced); 3237 if (i < num_entries - 1) 3238 sbuf_cat(buf, "\n"); 3239 } 3240 3241 error = sbuf_finish(buf); 3242 if (error) 3243 device_printf(dev, "Error finishing sbuf: %d\n", error); 3244 3245 sbuf_delete(buf); 3246 return (error); 3247 } 3248 3249 enum ixl_sw_seid_offset { 3250 IXL_SW_SEID_EMP = 1, 3251 IXL_SW_SEID_MAC_START = 2, 3252 IXL_SW_SEID_MAC_END = 5, 3253 IXL_SW_SEID_PF_START = 16, 3254 IXL_SW_SEID_PF_END = 31, 3255 IXL_SW_SEID_VF_START = 32, 3256 IXL_SW_SEID_VF_END = 159, 3257 }; 3258 3259 /* 3260 * Caller must init and delete sbuf; this function will clear and 3261 * finish it for caller. 3262 * 3263 * Note: The SEID argument only applies for elements defined by FW at 3264 * power-on; these include the EMP, Ports, PFs and VFs. 3265 */ 3266 static char * 3267 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid) 3268 { 3269 sbuf_clear(s); 3270 3271 /* If SEID is in certain ranges, then we can infer the 3272 * mapping of SEID to switch element. 3273 */ 3274 if (seid == IXL_SW_SEID_EMP) { 3275 sbuf_cat(s, "EMP"); 3276 goto out; 3277 } else if (seid >= IXL_SW_SEID_MAC_START && 3278 seid <= IXL_SW_SEID_MAC_END) { 3279 sbuf_printf(s, "MAC %2d", 3280 seid - IXL_SW_SEID_MAC_START); 3281 goto out; 3282 } else if (seid >= IXL_SW_SEID_PF_START && 3283 seid <= IXL_SW_SEID_PF_END) { 3284 sbuf_printf(s, "PF %3d", 3285 seid - IXL_SW_SEID_PF_START); 3286 goto out; 3287 } else if (seid >= IXL_SW_SEID_VF_START && 3288 seid <= IXL_SW_SEID_VF_END) { 3289 sbuf_printf(s, "VF %3d", 3290 seid - IXL_SW_SEID_VF_START); 3291 goto out; 3292 } 3293 3294 switch (element_type) { 3295 case I40E_AQ_SW_ELEM_TYPE_BMC: 3296 sbuf_cat(s, "BMC"); 3297 break; 3298 case I40E_AQ_SW_ELEM_TYPE_PV: 3299 sbuf_cat(s, "PV"); 3300 break; 3301 case I40E_AQ_SW_ELEM_TYPE_VEB: 3302 sbuf_cat(s, "VEB"); 3303 break; 3304 case I40E_AQ_SW_ELEM_TYPE_PA: 3305 sbuf_cat(s, "PA"); 3306 break; 3307 case I40E_AQ_SW_ELEM_TYPE_VSI: 3308 sbuf_printf(s, "VSI"); 3309 break; 3310 default: 3311 sbuf_cat(s, "?"); 3312 break; 3313 } 3314 3315 out: 3316 sbuf_finish(s); 3317 return sbuf_data(s); 3318 } 3319 3320 static int 3321 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b) 3322 { 3323 const struct i40e_aqc_switch_config_element_resp *one, *two; 3324 one = (const struct i40e_aqc_switch_config_element_resp *)a; 3325 two = (const struct i40e_aqc_switch_config_element_resp *)b; 3326 3327 return ((int)one->seid - (int)two->seid); 3328 } 3329 3330 static int 3331 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) 3332 { 3333 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3334 struct i40e_hw *hw = &pf->hw; 3335 device_t dev = pf->dev; 3336 struct sbuf *buf; 3337 struct sbuf *nmbuf; 3338 enum i40e_status_code status; 3339 int error = 0; 3340 u16 next = 0; 3341 u8 aq_buf[I40E_AQ_LARGE_BUF]; 3342 3343 struct i40e_aqc_switch_config_element_resp *elem; 3344 struct i40e_aqc_get_switch_config_resp *sw_config; 3345 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 3346 3347 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3348 if (!buf) { 3349 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3350 return (ENOMEM); 3351 } 3352 3353 status = i40e_aq_get_switch_config(hw, sw_config, 3354 sizeof(aq_buf), &next, NULL); 3355 if (status) { 3356 device_printf(dev, 3357 "%s: aq_get_switch_config() error %s, aq error %s\n", 3358 __func__, i40e_stat_str(hw, status), 3359 i40e_aq_str(hw, hw->aq.asq_last_status)); 3360 sbuf_delete(buf); 3361 return error; 3362 } 3363 if (next) 3364 device_printf(dev, "%s: TODO: get more config with SEID %d\n", 3365 __func__, next); 3366 3367 nmbuf = sbuf_new_auto(); 3368 if (!nmbuf) { 3369 device_printf(dev, "Could not allocate sbuf for name output.\n"); 3370 sbuf_delete(buf); 3371 return (ENOMEM); 3372 } 3373 3374 /* Sort entries by SEID for display */ 3375 qsort(sw_config->element, sw_config->header.num_reported, 3376 sizeof(struct i40e_aqc_switch_config_element_resp), 3377 &ixl_sw_cfg_elem_seid_cmp); 3378 3379 sbuf_cat(buf, "\n"); 3380 /* Assuming <= 255 elements in switch */ 3381 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); 3382 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); 3383 /* Exclude: 3384 * Revision -- all elements are revision 1 for now 3385 */ 3386 sbuf_printf(buf, 3387 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n" 3388 " | | | (uplink)\n"); 3389 for (int i = 0; i < sw_config->header.num_reported; i++) { 3390 elem = &sw_config->element[i]; 3391 3392 // "%4d (%8s) | %8s %8s %#8x", 3393 sbuf_printf(buf, "%4d", elem->seid); 3394 sbuf_cat(buf, " "); 3395 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3396 elem->element_type, elem->seid)); 3397 sbuf_cat(buf, " | "); 3398 sbuf_printf(buf, "%4d", elem->uplink_seid); 3399 sbuf_cat(buf, " "); 3400 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3401 0, elem->uplink_seid)); 3402 sbuf_cat(buf, " | "); 3403 sbuf_printf(buf, "%4d", elem->downlink_seid); 3404 sbuf_cat(buf, " "); 3405 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3406 0, elem->downlink_seid)); 3407 sbuf_cat(buf, " | "); 3408 sbuf_printf(buf, "%8d", elem->connection_type); 3409 if (i < sw_config->header.num_reported - 1) 3410 sbuf_cat(buf, "\n"); 3411 } 3412 sbuf_delete(nmbuf); 3413 3414 error = sbuf_finish(buf); 3415 if (error) 3416 device_printf(dev, "Error finishing sbuf: %d\n", error); 3417 3418 sbuf_delete(buf); 3419 3420 return (error); 3421 } 3422 3423 static int 3424 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) 3425 { 3426 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3427 struct i40e_hw *hw = &pf->hw; 3428 device_t dev = pf->dev; 3429 struct sbuf *buf; 3430 int error = 0; 3431 enum i40e_status_code status; 3432 u32 reg; 3433 3434 struct i40e_aqc_get_set_rss_key_data key_data; 3435 3436 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3437 if (!buf) { 3438 device_printf(dev, "Could not allocate sbuf for output.\n"); 3439 return (ENOMEM); 3440 } 3441 3442 bzero(&key_data, sizeof(key_data)); 3443 3444 sbuf_cat(buf, "\n"); 3445 if (hw->mac.type == I40E_MAC_X722) { 3446 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); 3447 if (status) 3448 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", 3449 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3450 } else { 3451 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 3452 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 3453 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); 3454 } 3455 } 3456 3457 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); 3458 3459 error = sbuf_finish(buf); 3460 if (error) 3461 device_printf(dev, "Error finishing sbuf: %d\n", error); 3462 sbuf_delete(buf); 3463 3464 return (error); 3465 } 3466 3467 static void 3468 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) 3469 { 3470 int i, j, k, width; 3471 char c; 3472 3473 if (length < 1 || buf == NULL) return; 3474 3475 int byte_stride = 16; 3476 int lines = length / byte_stride; 3477 int rem = length % byte_stride; 3478 if (rem > 0) 3479 lines++; 3480 3481 for (i = 0; i < lines; i++) { 3482 width = (rem > 0 && i == lines - 1) 3483 ? rem : byte_stride; 3484 3485 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); 3486 3487 for (j = 0; j < width; j++) 3488 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); 3489 3490 if (width < byte_stride) { 3491 for (k = 0; k < (byte_stride - width); k++) 3492 sbuf_printf(sb, " "); 3493 } 3494 3495 if (!text) { 3496 sbuf_printf(sb, "\n"); 3497 continue; 3498 } 3499 3500 for (j = 0; j < width; j++) { 3501 c = (char)buf[i * byte_stride + j]; 3502 if (c < 32 || c > 126) 3503 sbuf_printf(sb, "."); 3504 else 3505 sbuf_printf(sb, "%c", c); 3506 3507 if (j == width - 1) 3508 sbuf_printf(sb, "\n"); 3509 } 3510 } 3511 } 3512 3513 static int 3514 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) 3515 { 3516 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3517 struct i40e_hw *hw = &pf->hw; 3518 device_t dev = pf->dev; 3519 struct sbuf *buf; 3520 int error = 0; 3521 enum i40e_status_code status; 3522 u8 hlut[512]; 3523 u32 reg; 3524 3525 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3526 if (!buf) { 3527 device_printf(dev, "Could not allocate sbuf for output.\n"); 3528 return (ENOMEM); 3529 } 3530 3531 bzero(hlut, sizeof(hlut)); 3532 sbuf_cat(buf, "\n"); 3533 if (hw->mac.type == I40E_MAC_X722) { 3534 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); 3535 if (status) 3536 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", 3537 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3538 } else { 3539 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { 3540 reg = rd32(hw, I40E_PFQF_HLUT(i)); 3541 bcopy(®, &hlut[i << 2], 4); 3542 } 3543 } 3544 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); 3545 3546 error = sbuf_finish(buf); 3547 if (error) 3548 device_printf(dev, "Error finishing sbuf: %d\n", error); 3549 sbuf_delete(buf); 3550 3551 return (error); 3552 } 3553 3554 static int 3555 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) 3556 { 3557 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3558 struct i40e_hw *hw = &pf->hw; 3559 u64 hena; 3560 3561 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 3562 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 3563 3564 return sysctl_handle_long(oidp, NULL, hena, req); 3565 } 3566 3567 /* 3568 * Sysctl to disable firmware's link management 3569 * 3570 * 1 - Disable link management on this port 3571 * 0 - Re-enable link management 3572 * 3573 * On normal NVMs, firmware manages link by default. 3574 */ 3575 static int 3576 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) 3577 { 3578 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3579 struct i40e_hw *hw = &pf->hw; 3580 device_t dev = pf->dev; 3581 int requested_mode = -1; 3582 enum i40e_status_code status = 0; 3583 int error = 0; 3584 3585 /* Read in new mode */ 3586 error = sysctl_handle_int(oidp, &requested_mode, 0, req); 3587 if ((error) || (req->newptr == NULL)) 3588 return (error); 3589 /* Check for sane value */ 3590 if (requested_mode < 0 || requested_mode > 1) { 3591 device_printf(dev, "Valid modes are 0 or 1\n"); 3592 return (EINVAL); 3593 } 3594 3595 /* Set new mode */ 3596 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); 3597 if (status) { 3598 device_printf(dev, 3599 "%s: Error setting new phy debug mode %s," 3600 " aq error: %s\n", __func__, i40e_stat_str(hw, status), 3601 i40e_aq_str(hw, hw->aq.asq_last_status)); 3602 return (EIO); 3603 } 3604 3605 return (0); 3606 } 3607 3608 /* 3609 * Read some diagnostic data from a (Q)SFP+ module 3610 * 3611 * SFP A2 QSFP Lower Page 3612 * Temperature 96-97 22-23 3613 * Vcc 98-99 26-27 3614 * TX power 102-103 34-35..40-41 3615 * RX power 104-105 50-51..56-57 3616 */ 3617 static int 3618 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 3619 { 3620 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3621 device_t dev = pf->dev; 3622 struct sbuf *sbuf; 3623 int error = 0; 3624 u8 output; 3625 3626 if (req->oldptr == NULL) { 3627 error = SYSCTL_OUT(req, 0, 128); 3628 return (0); 3629 } 3630 3631 error = pf->read_i2c_byte(pf, 0, 0xA0, &output); 3632 if (error) { 3633 device_printf(dev, "Error reading from i2c\n"); 3634 return (error); 3635 } 3636 3637 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 3638 if (output == 0x3) { 3639 /* 3640 * Check for: 3641 * - Internally calibrated data 3642 * - Diagnostic monitoring is implemented 3643 */ 3644 pf->read_i2c_byte(pf, 92, 0xA0, &output); 3645 if (!(output & 0x60)) { 3646 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); 3647 return (0); 3648 } 3649 3650 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3651 3652 for (u8 offset = 96; offset < 100; offset++) { 3653 pf->read_i2c_byte(pf, offset, 0xA2, &output); 3654 sbuf_printf(sbuf, "%02X ", output); 3655 } 3656 for (u8 offset = 102; offset < 106; offset++) { 3657 pf->read_i2c_byte(pf, offset, 0xA2, &output); 3658 sbuf_printf(sbuf, "%02X ", output); 3659 } 3660 } else if (output == 0xD || output == 0x11) { 3661 /* 3662 * QSFP+ modules are always internally calibrated, and must indicate 3663 * what types of diagnostic monitoring are implemented 3664 */ 3665 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3666 3667 for (u8 offset = 22; offset < 24; offset++) { 3668 pf->read_i2c_byte(pf, offset, 0xA0, &output); 3669 sbuf_printf(sbuf, "%02X ", output); 3670 } 3671 for (u8 offset = 26; offset < 28; offset++) { 3672 pf->read_i2c_byte(pf, offset, 0xA0, &output); 3673 sbuf_printf(sbuf, "%02X ", output); 3674 } 3675 /* Read the data from the first lane */ 3676 for (u8 offset = 34; offset < 36; offset++) { 3677 pf->read_i2c_byte(pf, offset, 0xA0, &output); 3678 sbuf_printf(sbuf, "%02X ", output); 3679 } 3680 for (u8 offset = 50; offset < 52; offset++) { 3681 pf->read_i2c_byte(pf, offset, 0xA0, &output); 3682 sbuf_printf(sbuf, "%02X ", output); 3683 } 3684 } else { 3685 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output); 3686 return (0); 3687 } 3688 3689 sbuf_finish(sbuf); 3690 sbuf_delete(sbuf); 3691 3692 return (0); 3693 } 3694 3695 /* 3696 * Sysctl to read a byte from I2C bus. 3697 * 3698 * Input: 32-bit value: 3699 * bits 0-7: device address (0xA0 or 0xA2) 3700 * bits 8-15: offset (0-255) 3701 * bits 16-31: unused 3702 * Output: 8-bit value read 3703 */ 3704 static int 3705 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) 3706 { 3707 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3708 device_t dev = pf->dev; 3709 int input = -1, error = 0; 3710 u8 dev_addr, offset, output; 3711 3712 /* Read in I2C read parameters */ 3713 error = sysctl_handle_int(oidp, &input, 0, req); 3714 if ((error) || (req->newptr == NULL)) 3715 return (error); 3716 /* Validate device address */ 3717 dev_addr = input & 0xFF; 3718 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 3719 return (EINVAL); 3720 } 3721 offset = (input >> 8) & 0xFF; 3722 3723 error = pf->read_i2c_byte(pf, offset, dev_addr, &output); 3724 if (error) 3725 return (error); 3726 3727 device_printf(dev, "%02X\n", output); 3728 return (0); 3729 } 3730 3731 /* 3732 * Sysctl to write a byte to the I2C bus. 3733 * 3734 * Input: 32-bit value: 3735 * bits 0-7: device address (0xA0 or 0xA2) 3736 * bits 8-15: offset (0-255) 3737 * bits 16-23: value to write 3738 * bits 24-31: unused 3739 * Output: 8-bit value written 3740 */ 3741 static int 3742 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) 3743 { 3744 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3745 device_t dev = pf->dev; 3746 int input = -1, error = 0; 3747 u8 dev_addr, offset, value; 3748 3749 /* Read in I2C write parameters */ 3750 error = sysctl_handle_int(oidp, &input, 0, req); 3751 if ((error) || (req->newptr == NULL)) 3752 return (error); 3753 /* Validate device address */ 3754 dev_addr = input & 0xFF; 3755 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 3756 return (EINVAL); 3757 } 3758 offset = (input >> 8) & 0xFF; 3759 value = (input >> 16) & 0xFF; 3760 3761 error = pf->write_i2c_byte(pf, offset, dev_addr, value); 3762 if (error) 3763 return (error); 3764 3765 device_printf(dev, "%02X written\n", value); 3766 return (0); 3767 } 3768 3769 static int 3770 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 3771 u8 bit_pos, int *is_set) 3772 { 3773 device_t dev = pf->dev; 3774 struct i40e_hw *hw = &pf->hw; 3775 enum i40e_status_code status; 3776 3777 if (IXL_PF_IN_RECOVERY_MODE(pf)) 3778 return (EIO); 3779 3780 status = i40e_aq_get_phy_capabilities(hw, 3781 FALSE, FALSE, abilities, NULL); 3782 if (status) { 3783 device_printf(dev, 3784 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 3785 __func__, i40e_stat_str(hw, status), 3786 i40e_aq_str(hw, hw->aq.asq_last_status)); 3787 return (EIO); 3788 } 3789 3790 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); 3791 return (0); 3792 } 3793 3794 static int 3795 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 3796 u8 bit_pos, int set) 3797 { 3798 device_t dev = pf->dev; 3799 struct i40e_hw *hw = &pf->hw; 3800 struct i40e_aq_set_phy_config config; 3801 enum i40e_status_code status; 3802 3803 /* Set new PHY config */ 3804 memset(&config, 0, sizeof(config)); 3805 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); 3806 if (set) 3807 config.fec_config |= bit_pos; 3808 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { 3809 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 3810 config.phy_type = abilities->phy_type; 3811 config.phy_type_ext = abilities->phy_type_ext; 3812 config.link_speed = abilities->link_speed; 3813 config.eee_capability = abilities->eee_capability; 3814 config.eeer = abilities->eeer_val; 3815 config.low_power_ctrl = abilities->d3_lpan; 3816 status = i40e_aq_set_phy_config(hw, &config, NULL); 3817 3818 if (status) { 3819 device_printf(dev, 3820 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", 3821 __func__, i40e_stat_str(hw, status), 3822 i40e_aq_str(hw, hw->aq.asq_last_status)); 3823 return (EIO); 3824 } 3825 } 3826 3827 return (0); 3828 } 3829 3830 static int 3831 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) 3832 { 3833 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3834 int mode, error = 0; 3835 3836 struct i40e_aq_get_phy_abilities_resp abilities; 3837 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); 3838 if (error) 3839 return (error); 3840 /* Read in new mode */ 3841 error = sysctl_handle_int(oidp, &mode, 0, req); 3842 if ((error) || (req->newptr == NULL)) 3843 return (error); 3844 3845 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); 3846 } 3847 3848 static int 3849 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) 3850 { 3851 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3852 int mode, error = 0; 3853 3854 struct i40e_aq_get_phy_abilities_resp abilities; 3855 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); 3856 if (error) 3857 return (error); 3858 /* Read in new mode */ 3859 error = sysctl_handle_int(oidp, &mode, 0, req); 3860 if ((error) || (req->newptr == NULL)) 3861 return (error); 3862 3863 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); 3864 } 3865 3866 static int 3867 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) 3868 { 3869 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3870 int mode, error = 0; 3871 3872 struct i40e_aq_get_phy_abilities_resp abilities; 3873 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); 3874 if (error) 3875 return (error); 3876 /* Read in new mode */ 3877 error = sysctl_handle_int(oidp, &mode, 0, req); 3878 if ((error) || (req->newptr == NULL)) 3879 return (error); 3880 3881 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); 3882 } 3883 3884 static int 3885 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) 3886 { 3887 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3888 int mode, error = 0; 3889 3890 struct i40e_aq_get_phy_abilities_resp abilities; 3891 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); 3892 if (error) 3893 return (error); 3894 /* Read in new mode */ 3895 error = sysctl_handle_int(oidp, &mode, 0, req); 3896 if ((error) || (req->newptr == NULL)) 3897 return (error); 3898 3899 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); 3900 } 3901 3902 static int 3903 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) 3904 { 3905 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3906 int mode, error = 0; 3907 3908 struct i40e_aq_get_phy_abilities_resp abilities; 3909 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); 3910 if (error) 3911 return (error); 3912 /* Read in new mode */ 3913 error = sysctl_handle_int(oidp, &mode, 0, req); 3914 if ((error) || (req->newptr == NULL)) 3915 return (error); 3916 3917 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); 3918 } 3919 3920 static int 3921 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) 3922 { 3923 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3924 struct i40e_hw *hw = &pf->hw; 3925 device_t dev = pf->dev; 3926 struct sbuf *buf; 3927 int error = 0; 3928 enum i40e_status_code status; 3929 3930 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3931 if (!buf) { 3932 device_printf(dev, "Could not allocate sbuf for output.\n"); 3933 return (ENOMEM); 3934 } 3935 3936 u8 *final_buff; 3937 /* This amount is only necessary if reading the entire cluster into memory */ 3938 #define IXL_FINAL_BUFF_SIZE (1280 * 1024) 3939 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_NOWAIT); 3940 if (final_buff == NULL) { 3941 device_printf(dev, "Could not allocate memory for output.\n"); 3942 goto out; 3943 } 3944 int final_buff_len = 0; 3945 3946 u8 cluster_id = 1; 3947 bool more = true; 3948 3949 u8 dump_buf[4096]; 3950 u16 curr_buff_size = 4096; 3951 u8 curr_next_table = 0; 3952 u32 curr_next_index = 0; 3953 3954 u16 ret_buff_size; 3955 u8 ret_next_table; 3956 u32 ret_next_index; 3957 3958 sbuf_cat(buf, "\n"); 3959 3960 while (more) { 3961 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, 3962 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); 3963 if (status) { 3964 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", 3965 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3966 goto free_out; 3967 } 3968 3969 /* copy info out of temp buffer */ 3970 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); 3971 final_buff_len += ret_buff_size; 3972 3973 if (ret_next_table != curr_next_table) { 3974 /* We're done with the current table; we can dump out read data. */ 3975 sbuf_printf(buf, "%d:", curr_next_table); 3976 int bytes_printed = 0; 3977 while (bytes_printed <= final_buff_len) { 3978 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); 3979 bytes_printed += 16; 3980 } 3981 sbuf_cat(buf, "\n"); 3982 3983 /* The entire cluster has been read; we're finished */ 3984 if (ret_next_table == 0xFF) 3985 break; 3986 3987 /* Otherwise clear the output buffer and continue reading */ 3988 bzero(final_buff, IXL_FINAL_BUFF_SIZE); 3989 final_buff_len = 0; 3990 } 3991 3992 if (ret_next_index == 0xFFFFFFFF) 3993 ret_next_index = 0; 3994 3995 bzero(dump_buf, sizeof(dump_buf)); 3996 curr_next_table = ret_next_table; 3997 curr_next_index = ret_next_index; 3998 } 3999 4000 free_out: 4001 free(final_buff, M_DEVBUF); 4002 out: 4003 error = sbuf_finish(buf); 4004 if (error) 4005 device_printf(dev, "Error finishing sbuf: %d\n", error); 4006 sbuf_delete(buf); 4007 4008 return (error); 4009 } 4010 4011 static int 4012 ixl_start_fw_lldp(struct ixl_pf *pf) 4013 { 4014 struct i40e_hw *hw = &pf->hw; 4015 enum i40e_status_code status; 4016 4017 status = i40e_aq_start_lldp(hw, false, NULL); 4018 if (status != I40E_SUCCESS) { 4019 switch (hw->aq.asq_last_status) { 4020 case I40E_AQ_RC_EEXIST: 4021 device_printf(pf->dev, 4022 "FW LLDP agent is already running\n"); 4023 break; 4024 case I40E_AQ_RC_EPERM: 4025 device_printf(pf->dev, 4026 "Device configuration forbids SW from starting " 4027 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII " 4028 "attribute to \"Enabled\" to use this sysctl\n"); 4029 return (EINVAL); 4030 default: 4031 device_printf(pf->dev, 4032 "Starting FW LLDP agent failed: error: %s, %s\n", 4033 i40e_stat_str(hw, status), 4034 i40e_aq_str(hw, hw->aq.asq_last_status)); 4035 return (EINVAL); 4036 } 4037 } 4038 4039 atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 4040 return (0); 4041 } 4042 4043 static int 4044 ixl_stop_fw_lldp(struct ixl_pf *pf) 4045 { 4046 struct i40e_hw *hw = &pf->hw; 4047 device_t dev = pf->dev; 4048 enum i40e_status_code status; 4049 4050 if (hw->func_caps.npar_enable != 0) { 4051 device_printf(dev, 4052 "Disabling FW LLDP agent is not supported on this device\n"); 4053 return (EINVAL); 4054 } 4055 4056 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) { 4057 device_printf(dev, 4058 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); 4059 return (EINVAL); 4060 } 4061 4062 status = i40e_aq_stop_lldp(hw, true, false, NULL); 4063 if (status != I40E_SUCCESS) { 4064 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) { 4065 device_printf(dev, 4066 "Disabling FW LLDP agent failed: error: %s, %s\n", 4067 i40e_stat_str(hw, status), 4068 i40e_aq_str(hw, hw->aq.asq_last_status)); 4069 return (EINVAL); 4070 } 4071 4072 device_printf(dev, "FW LLDP agent is already stopped\n"); 4073 } 4074 4075 #ifndef EXTERNAL_RELEASE 4076 /* Let the FW set default DCB configuration on link UP as described in DCR 307.1 */ 4077 #endif 4078 i40e_aq_set_dcb_parameters(hw, true, NULL); 4079 atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 4080 return (0); 4081 } 4082 4083 static int 4084 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) 4085 { 4086 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4087 int state, new_state, error = 0; 4088 4089 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0); 4090 4091 /* Read in new mode */ 4092 error = sysctl_handle_int(oidp, &new_state, 0, req); 4093 if ((error) || (req->newptr == NULL)) 4094 return (error); 4095 4096 /* Already in requested state */ 4097 if (new_state == state) 4098 return (error); 4099 4100 if (new_state == 0) 4101 return ixl_stop_fw_lldp(pf); 4102 4103 return ixl_start_fw_lldp(pf); 4104 } 4105 4106 int 4107 ixl_attach_get_link_status(struct ixl_pf *pf) 4108 { 4109 struct i40e_hw *hw = &pf->hw; 4110 device_t dev = pf->dev; 4111 int error = 0; 4112 4113 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 4114 (hw->aq.fw_maj_ver < 4)) { 4115 i40e_msec_delay(75); 4116 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); 4117 if (error) { 4118 device_printf(dev, "link restart failed, aq_err=%d\n", 4119 pf->hw.aq.asq_last_status); 4120 return error; 4121 } 4122 } 4123 4124 /* Determine link state */ 4125 hw->phy.get_link_info = TRUE; 4126 i40e_get_link_status(hw, &pf->link_up); 4127 return (0); 4128 } 4129 4130 static int 4131 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS) 4132 { 4133 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4134 int requested = 0, error = 0; 4135 4136 /* Read in new mode */ 4137 error = sysctl_handle_int(oidp, &requested, 0, req); 4138 if ((error) || (req->newptr == NULL)) 4139 return (error); 4140 4141 /* Initiate the PF reset later in the admin task */ 4142 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ); 4143 4144 return (error); 4145 } 4146 4147 static int 4148 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS) 4149 { 4150 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4151 struct i40e_hw *hw = &pf->hw; 4152 int requested = 0, error = 0; 4153 4154 /* Read in new mode */ 4155 error = sysctl_handle_int(oidp, &requested, 0, req); 4156 if ((error) || (req->newptr == NULL)) 4157 return (error); 4158 4159 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 4160 4161 return (error); 4162 } 4163 4164 static int 4165 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS) 4166 { 4167 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4168 struct i40e_hw *hw = &pf->hw; 4169 int requested = 0, error = 0; 4170 4171 /* Read in new mode */ 4172 error = sysctl_handle_int(oidp, &requested, 0, req); 4173 if ((error) || (req->newptr == NULL)) 4174 return (error); 4175 4176 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK); 4177 4178 return (error); 4179 } 4180 4181 /* 4182 * Print out mapping of TX queue indexes and Rx queue indexes 4183 * to MSI-X vectors. 4184 */ 4185 static int 4186 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) 4187 { 4188 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4189 struct ixl_vsi *vsi = &pf->vsi; 4190 device_t dev = pf->dev; 4191 struct sbuf *buf; 4192 int error = 0; 4193 4194 struct ixl_rx_queue *rx_que = vsi->rx_queues; 4195 struct ixl_tx_queue *tx_que = vsi->tx_queues; 4196 4197 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4198 if (!buf) { 4199 device_printf(dev, "Could not allocate sbuf for output.\n"); 4200 return (ENOMEM); 4201 } 4202 4203 sbuf_cat(buf, "\n"); 4204 for (int i = 0; i < vsi->num_rx_queues; i++) { 4205 rx_que = &vsi->rx_queues[i]; 4206 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); 4207 } 4208 for (int i = 0; i < vsi->num_tx_queues; i++) { 4209 tx_que = &vsi->tx_queues[i]; 4210 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); 4211 } 4212 4213 error = sbuf_finish(buf); 4214 if (error) 4215 device_printf(dev, "Error finishing sbuf: %d\n", error); 4216 sbuf_delete(buf); 4217 4218 return (error); 4219 } 4220