1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "ixl_pf.h" 37 38 #ifdef PCI_IOV 39 #include "ixl_pf_iov.h" 40 #endif 41 42 #ifdef IXL_IW 43 #include "ixl_iw.h" 44 #include "ixl_iw_int.h" 45 #endif 46 47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); 48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); 49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed); 50 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int); 51 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int); 52 static char * ixl_switch_element_string(struct sbuf *, u8, u16); 53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *); 54 55 /* Sysctls */ 56 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); 57 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); 58 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 59 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 60 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); 61 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); 62 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); 63 64 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); 65 66 /* Debug Sysctls */ 67 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); 68 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); 69 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); 70 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); 71 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); 72 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS); 73 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); 74 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); 75 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); 76 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); 77 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); 78 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); 79 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); 80 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); 81 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); 82 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); 83 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); 84 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); 85 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); 86 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 87 88 /* Debug Sysctls */ 89 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS); 90 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS); 91 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS); 92 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); 93 #ifdef IXL_DEBUG 94 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); 95 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); 96 #endif 97 98 #ifdef IXL_IW 99 extern int ixl_enable_iwarp; 100 extern int ixl_limit_iwarp_msix; 101 #endif 102 103 static const char * const ixl_fc_string[6] = { 104 "None", 105 "Rx", 106 "Tx", 107 "Full", 108 "Priority", 109 "Default" 110 }; 111 112 static char *ixl_fec_string[3] = { 113 "CL108 RS-FEC", 114 "CL74 FC-FEC/BASE-R", 115 "None" 116 }; 117 118 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); 119 120 /* 121 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string 122 */ 123 void 124 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) 125 { 126 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); 127 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); 128 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); 129 130 sbuf_printf(buf, 131 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", 132 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, 133 hw->aq.api_maj_ver, hw->aq.api_min_ver, 134 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> 135 IXL_NVM_VERSION_HI_SHIFT, 136 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> 137 IXL_NVM_VERSION_LO_SHIFT, 138 hw->nvm.eetrack, 139 oem_ver, oem_build, oem_patch); 140 } 141 142 void 143 ixl_print_nvm_version(struct ixl_pf *pf) 144 { 145 struct i40e_hw *hw = &pf->hw; 146 device_t dev = pf->dev; 147 struct sbuf *sbuf; 148 149 sbuf = sbuf_new_auto(); 150 ixl_nvm_version_str(hw, sbuf); 151 sbuf_finish(sbuf); 152 device_printf(dev, "%s\n", sbuf_data(sbuf)); 153 sbuf_delete(sbuf); 154 } 155 156 /** 157 * ixl_get_fw_mode - Check the state of FW 158 * @hw: device hardware structure 159 * 160 * Identify state of FW. It might be in a recovery mode 161 * which limits functionality and requires special handling 162 * from the driver. 163 * 164 * @returns FW mode (normal, recovery, unexpected EMP reset) 165 */ 166 static enum ixl_fw_mode 167 ixl_get_fw_mode(struct ixl_pf *pf) 168 { 169 struct i40e_hw *hw = &pf->hw; 170 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL; 171 u32 fwsts; 172 173 #ifdef IXL_DEBUG 174 if (pf->recovery_mode) 175 return IXL_FW_MODE_RECOVERY; 176 #endif 177 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; 178 179 /* Is set and has one of expected values */ 180 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK && 181 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) || 182 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || 183 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK) 184 fw_mode = IXL_FW_MODE_RECOVERY; 185 else { 186 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 && 187 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10) 188 fw_mode = IXL_FW_MODE_UEMPR; 189 } 190 return (fw_mode); 191 } 192 193 /** 194 * ixl_pf_reset - Reset the PF 195 * @pf: PF structure 196 * 197 * Ensure that FW is in the right state and do the reset 198 * if needed. 199 * 200 * @returns zero on success, or an error code on failure. 201 */ 202 int 203 ixl_pf_reset(struct ixl_pf *pf) 204 { 205 struct i40e_hw *hw = &pf->hw; 206 enum i40e_status_code status; 207 enum ixl_fw_mode fw_mode; 208 209 fw_mode = ixl_get_fw_mode(pf); 210 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode); 211 if (fw_mode == IXL_FW_MODE_RECOVERY) { 212 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 213 /* Don't try to reset device if it's in recovery mode */ 214 return (0); 215 } 216 217 status = i40e_pf_reset(hw); 218 if (status == I40E_SUCCESS) 219 return (0); 220 221 /* Check FW mode again in case it has changed while 222 * waiting for reset to complete */ 223 fw_mode = ixl_get_fw_mode(pf); 224 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode); 225 if (fw_mode == IXL_FW_MODE_RECOVERY) { 226 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 227 return (0); 228 } 229 230 if (fw_mode == IXL_FW_MODE_UEMPR) 231 device_printf(pf->dev, 232 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); 233 else 234 device_printf(pf->dev, "PF reset failure %s\n", 235 i40e_stat_str(hw, status)); 236 return (EIO); 237 } 238 239 /** 240 * ixl_setup_hmc - Setup LAN Host Memory Cache 241 * @pf: PF structure 242 * 243 * Init and configure LAN Host Memory Cache 244 * 245 * @returns 0 on success, EIO on error 246 */ 247 int 248 ixl_setup_hmc(struct ixl_pf *pf) 249 { 250 struct i40e_hw *hw = &pf->hw; 251 enum i40e_status_code status; 252 253 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 254 hw->func_caps.num_rx_qp, 0, 0); 255 if (status) { 256 device_printf(pf->dev, "init_lan_hmc failed: %s\n", 257 i40e_stat_str(hw, status)); 258 return (EIO); 259 } 260 261 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 262 if (status) { 263 device_printf(pf->dev, "configure_lan_hmc failed: %s\n", 264 i40e_stat_str(hw, status)); 265 return (EIO); 266 } 267 268 return (0); 269 } 270 271 /** 272 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache 273 * @pf: PF structure 274 * 275 * Shutdown Host Memory Cache if configured. 276 * 277 */ 278 void 279 ixl_shutdown_hmc(struct ixl_pf *pf) 280 { 281 struct i40e_hw *hw = &pf->hw; 282 enum i40e_status_code status; 283 284 /* HMC not configured, no need to shutdown */ 285 if (hw->hmc.hmc_obj == NULL) 286 return; 287 288 status = i40e_shutdown_lan_hmc(hw); 289 if (status) 290 device_printf(pf->dev, 291 "Shutdown LAN HMC failed with code %s\n", 292 i40e_stat_str(hw, status)); 293 } 294 /* 295 * Write PF ITR values to queue ITR registers. 296 */ 297 void 298 ixl_configure_itr(struct ixl_pf *pf) 299 { 300 ixl_configure_tx_itr(pf); 301 ixl_configure_rx_itr(pf); 302 } 303 304 /********************************************************************* 305 * 306 * Get the hardware capabilities 307 * 308 **********************************************************************/ 309 310 int 311 ixl_get_hw_capabilities(struct ixl_pf *pf) 312 { 313 struct i40e_aqc_list_capabilities_element_resp *buf; 314 struct i40e_hw *hw = &pf->hw; 315 device_t dev = pf->dev; 316 enum i40e_status_code status; 317 int len, i2c_intfc_num; 318 bool again = TRUE; 319 u16 needed; 320 321 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 322 hw->func_caps.iwarp = 0; 323 return (0); 324 } 325 326 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); 327 retry: 328 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) 329 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) { 330 device_printf(dev, "Unable to allocate cap memory\n"); 331 return (ENOMEM); 332 } 333 334 /* This populates the hw struct */ 335 status = i40e_aq_discover_capabilities(hw, buf, len, 336 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 337 free(buf, M_IXL); 338 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && 339 (again == TRUE)) { 340 /* retry once with a larger buffer */ 341 again = FALSE; 342 len = needed; 343 goto retry; 344 } else if (status != I40E_SUCCESS) { 345 device_printf(dev, "capability discovery failed; status %s, error %s\n", 346 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 347 return (ENODEV); 348 } 349 350 /* 351 * Some devices have both MDIO and I2C; since this isn't reported 352 * by the FW, check registers to see if an I2C interface exists. 353 */ 354 i2c_intfc_num = ixl_find_i2c_interface(pf); 355 if (i2c_intfc_num != -1) 356 pf->has_i2c = true; 357 358 /* Determine functions to use for driver I2C accesses */ 359 switch (pf->i2c_access_method) { 360 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: { 361 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 362 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 363 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 364 } else { 365 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 366 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 367 } 368 break; 369 } 370 case IXL_I2C_ACCESS_METHOD_AQ: 371 pf->read_i2c_byte = ixl_read_i2c_byte_aq; 372 pf->write_i2c_byte = ixl_write_i2c_byte_aq; 373 break; 374 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD: 375 pf->read_i2c_byte = ixl_read_i2c_byte_reg; 376 pf->write_i2c_byte = ixl_write_i2c_byte_reg; 377 break; 378 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS: 379 pf->read_i2c_byte = ixl_read_i2c_byte_bb; 380 pf->write_i2c_byte = ixl_write_i2c_byte_bb; 381 break; 382 default: 383 /* Should not happen */ 384 device_printf(dev, "Error setting I2C access functions\n"); 385 break; 386 } 387 388 /* Print a subset of the capability information. */ 389 device_printf(dev, 390 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n", 391 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, 392 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, 393 (hw->func_caps.mdio_port_mode == 2) ? "I2C" : 394 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : 395 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : 396 "MDIO shared"); 397 398 return (0); 399 } 400 401 /* For the set_advertise sysctl */ 402 void 403 ixl_set_initial_advertised_speeds(struct ixl_pf *pf) 404 { 405 device_t dev = pf->dev; 406 int err; 407 408 /* Make sure to initialize the device to the complete list of 409 * supported speeds on driver load, to ensure unloading and 410 * reloading the driver will restore this value. 411 */ 412 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); 413 if (err) { 414 /* Non-fatal error */ 415 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", 416 __func__, err); 417 return; 418 } 419 420 pf->advertised_speed = 421 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 422 } 423 424 int 425 ixl_teardown_hw_structs(struct ixl_pf *pf) 426 { 427 enum i40e_status_code status = 0; 428 struct i40e_hw *hw = &pf->hw; 429 device_t dev = pf->dev; 430 431 /* Shutdown LAN HMC */ 432 if (hw->hmc.hmc_obj) { 433 status = i40e_shutdown_lan_hmc(hw); 434 if (status) { 435 device_printf(dev, 436 "init: LAN HMC shutdown failure; status %s\n", 437 i40e_stat_str(hw, status)); 438 goto err_out; 439 } 440 } 441 442 /* Shutdown admin queue */ 443 ixl_disable_intr0(hw); 444 status = i40e_shutdown_adminq(hw); 445 if (status) 446 device_printf(dev, 447 "init: Admin Queue shutdown failure; status %s\n", 448 i40e_stat_str(hw, status)); 449 450 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); 451 err_out: 452 return (status); 453 } 454 455 /* 456 ** Creates new filter with given MAC address and VLAN ID 457 */ 458 static struct ixl_mac_filter * 459 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 460 { 461 struct ixl_mac_filter *f; 462 463 /* create a new empty filter */ 464 f = malloc(sizeof(struct ixl_mac_filter), 465 M_IXL, M_NOWAIT | M_ZERO); 466 if (f) { 467 LIST_INSERT_HEAD(headp, f, ftle); 468 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); 469 f->vlan = vlan; 470 } 471 472 return (f); 473 } 474 475 /** 476 * ixl_free_filters - Free all filters in given list 477 * headp - pointer to list head 478 * 479 * Frees memory used by each entry in the list. 480 * Does not remove filters from HW. 481 */ 482 void 483 ixl_free_filters(struct ixl_ftl_head *headp) 484 { 485 struct ixl_mac_filter *f, *nf; 486 487 f = LIST_FIRST(headp); 488 while (f != NULL) { 489 nf = LIST_NEXT(f, ftle); 490 free(f, M_IXL); 491 f = nf; 492 } 493 494 LIST_INIT(headp); 495 } 496 497 static u_int 498 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 499 { 500 struct ixl_add_maddr_arg *ama = arg; 501 struct ixl_vsi *vsi = ama->vsi; 502 const u8 *macaddr = (u8*)LLADDR(sdl); 503 struct ixl_mac_filter *f; 504 505 /* Does one already exist */ 506 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 507 if (f != NULL) 508 return (0); 509 510 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY); 511 if (f == NULL) { 512 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 513 return (0); 514 } 515 f->flags |= IXL_FILTER_MC; 516 517 return (1); 518 } 519 520 /********************************************************************* 521 * Filter Routines 522 * 523 * Routines for multicast and vlan filter management. 524 * 525 *********************************************************************/ 526 void 527 ixl_add_multi(struct ixl_vsi *vsi) 528 { 529 struct ifnet *ifp = vsi->ifp; 530 struct i40e_hw *hw = vsi->hw; 531 int mcnt = 0; 532 struct ixl_add_maddr_arg cb_arg; 533 534 IOCTL_DEBUGOUT("ixl_add_multi: begin"); 535 536 mcnt = if_llmaddr_count(ifp); 537 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { 538 i40e_aq_set_vsi_multicast_promiscuous(hw, 539 vsi->seid, TRUE, NULL); 540 /* delete all existing MC filters */ 541 ixl_del_multi(vsi, true); 542 return; 543 } 544 545 cb_arg.vsi = vsi; 546 LIST_INIT(&cb_arg.to_add); 547 548 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg); 549 if (mcnt > 0) 550 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt); 551 552 IOCTL_DEBUGOUT("ixl_add_multi: end"); 553 } 554 555 static u_int 556 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 557 { 558 struct ixl_mac_filter *f = arg; 559 560 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl))) 561 return (1); 562 else 563 return (0); 564 } 565 566 void 567 ixl_del_multi(struct ixl_vsi *vsi, bool all) 568 { 569 struct ixl_ftl_head to_del; 570 struct ifnet *ifp = vsi->ifp; 571 struct ixl_mac_filter *f, *fn; 572 int mcnt = 0; 573 574 IOCTL_DEBUGOUT("ixl_del_multi: begin"); 575 576 LIST_INIT(&to_del); 577 /* Search for removed multicast addresses */ 578 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) { 579 if ((f->flags & IXL_FILTER_MC) == 0 || 580 (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0))) 581 continue; 582 583 LIST_REMOVE(f, ftle); 584 LIST_INSERT_HEAD(&to_del, f, ftle); 585 mcnt++; 586 } 587 588 if (mcnt > 0) 589 ixl_del_hw_filters(vsi, &to_del, mcnt); 590 } 591 592 void 593 ixl_link_up_msg(struct ixl_pf *pf) 594 { 595 struct i40e_hw *hw = &pf->hw; 596 struct ifnet *ifp = pf->vsi.ifp; 597 char *req_fec_string, *neg_fec_string; 598 u8 fec_abilities; 599 600 fec_abilities = hw->phy.link_info.req_fec_info; 601 /* If both RS and KR are requested, only show RS */ 602 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) 603 req_fec_string = ixl_fec_string[0]; 604 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) 605 req_fec_string = ixl_fec_string[1]; 606 else 607 req_fec_string = ixl_fec_string[2]; 608 609 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) 610 neg_fec_string = ixl_fec_string[0]; 611 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) 612 neg_fec_string = ixl_fec_string[1]; 613 else 614 neg_fec_string = ixl_fec_string[2]; 615 616 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 617 ifp->if_xname, 618 ixl_link_speed_string(hw->phy.link_info.link_speed), 619 req_fec_string, neg_fec_string, 620 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", 621 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && 622 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 623 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? 624 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? 625 ixl_fc_string[1] : ixl_fc_string[0]); 626 } 627 628 /* 629 * Configure admin queue/misc interrupt cause registers in hardware. 630 */ 631 void 632 ixl_configure_intr0_msix(struct ixl_pf *pf) 633 { 634 struct i40e_hw *hw = &pf->hw; 635 u32 reg; 636 637 /* First set up the adminq - vector 0 */ 638 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ 639 rd32(hw, I40E_PFINT_ICR0); /* read to clear */ 640 641 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | 642 I40E_PFINT_ICR0_ENA_GRST_MASK | 643 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 644 I40E_PFINT_ICR0_ENA_ADMINQ_MASK | 645 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | 646 I40E_PFINT_ICR0_ENA_VFLR_MASK | 647 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | 648 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; 649 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 650 651 /* 652 * 0x7FF is the end of the queue list. 653 * This means we won't use MSI-X vector 0 for a queue interrupt 654 * in MSI-X mode. 655 */ 656 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 657 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ 658 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); 659 660 wr32(hw, I40E_PFINT_DYN_CTL0, 661 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | 662 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); 663 664 wr32(hw, I40E_PFINT_STAT_CTL0, 0); 665 } 666 667 void 668 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types) 669 { 670 /* Display supported media types */ 671 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) 672 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL); 673 674 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) 675 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL); 676 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) 677 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL); 678 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) 679 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL); 680 681 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T)) 682 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL); 683 684 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T)) 685 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL); 686 687 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || 688 phy_types & (I40E_CAP_PHY_TYPE_XFI) || 689 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) 690 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); 691 692 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) 693 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL); 694 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) 695 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL); 696 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) 697 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL); 698 699 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || 700 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || 701 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || 702 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || 703 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 704 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL); 705 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) 706 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL); 707 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) 708 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL); 709 710 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) 711 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL); 712 713 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) 714 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) 715 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL); 716 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) 717 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL); 718 if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) 719 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL); 720 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) 721 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL); 722 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) 723 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL); 724 725 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) 726 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL); 727 728 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) 729 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL); 730 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) 731 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); 732 733 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) 734 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL); 735 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) 736 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL); 737 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) 738 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL); 739 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) 740 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL); 741 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) 742 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL); 743 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) 744 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL); 745 } 746 747 /********************************************************************* 748 * 749 * Get Firmware Switch configuration 750 * - this will need to be more robust when more complex 751 * switch configurations are enabled. 752 * 753 **********************************************************************/ 754 int 755 ixl_switch_config(struct ixl_pf *pf) 756 { 757 struct i40e_hw *hw = &pf->hw; 758 struct ixl_vsi *vsi = &pf->vsi; 759 device_t dev = iflib_get_dev(vsi->ctx); 760 struct i40e_aqc_get_switch_config_resp *sw_config; 761 u8 aq_buf[I40E_AQ_LARGE_BUF]; 762 int ret; 763 u16 next = 0; 764 765 memset(&aq_buf, 0, sizeof(aq_buf)); 766 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 767 ret = i40e_aq_get_switch_config(hw, sw_config, 768 sizeof(aq_buf), &next, NULL); 769 if (ret) { 770 device_printf(dev, "aq_get_switch_config() failed, error %d," 771 " aq_error %d\n", ret, pf->hw.aq.asq_last_status); 772 return (ret); 773 } 774 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { 775 device_printf(dev, 776 "Switch config: header reported: %d in structure, %d total\n", 777 LE16_TO_CPU(sw_config->header.num_reported), 778 LE16_TO_CPU(sw_config->header.num_total)); 779 for (int i = 0; 780 i < LE16_TO_CPU(sw_config->header.num_reported); i++) { 781 device_printf(dev, 782 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, 783 sw_config->element[i].element_type, 784 LE16_TO_CPU(sw_config->element[i].seid), 785 LE16_TO_CPU(sw_config->element[i].uplink_seid), 786 LE16_TO_CPU(sw_config->element[i].downlink_seid)); 787 } 788 } 789 /* Simplified due to a single VSI */ 790 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid); 791 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid); 792 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid); 793 return (ret); 794 } 795 796 void 797 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls) 798 { 799 struct sysctl_oid *tree; 800 struct sysctl_oid_list *child; 801 struct sysctl_oid_list *vsi_list; 802 803 tree = device_get_sysctl_tree(vsi->dev); 804 child = SYSCTL_CHILDREN(tree); 805 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name, 806 CTLFLAG_RD, NULL, "VSI Number"); 807 808 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 809 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats); 810 811 if (queues_sysctls) 812 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx); 813 } 814 815 /* 816 * Used to set the Tx ITR value for all of the PF LAN VSI's queues. 817 * Writes to the ITR registers immediately. 818 */ 819 static int 820 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) 821 { 822 struct ixl_pf *pf = (struct ixl_pf *)arg1; 823 device_t dev = pf->dev; 824 int error = 0; 825 int requested_tx_itr; 826 827 requested_tx_itr = pf->tx_itr; 828 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); 829 if ((error) || (req->newptr == NULL)) 830 return (error); 831 if (pf->dynamic_tx_itr) { 832 device_printf(dev, 833 "Cannot set TX itr value while dynamic TX itr is enabled\n"); 834 return (EINVAL); 835 } 836 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { 837 device_printf(dev, 838 "Invalid TX itr value; value must be between 0 and %d\n", 839 IXL_MAX_ITR); 840 return (EINVAL); 841 } 842 843 pf->tx_itr = requested_tx_itr; 844 ixl_configure_tx_itr(pf); 845 846 return (error); 847 } 848 849 /* 850 * Used to set the Rx ITR value for all of the PF LAN VSI's queues. 851 * Writes to the ITR registers immediately. 852 */ 853 static int 854 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) 855 { 856 struct ixl_pf *pf = (struct ixl_pf *)arg1; 857 device_t dev = pf->dev; 858 int error = 0; 859 int requested_rx_itr; 860 861 requested_rx_itr = pf->rx_itr; 862 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); 863 if ((error) || (req->newptr == NULL)) 864 return (error); 865 if (pf->dynamic_rx_itr) { 866 device_printf(dev, 867 "Cannot set RX itr value while dynamic RX itr is enabled\n"); 868 return (EINVAL); 869 } 870 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { 871 device_printf(dev, 872 "Invalid RX itr value; value must be between 0 and %d\n", 873 IXL_MAX_ITR); 874 return (EINVAL); 875 } 876 877 pf->rx_itr = requested_rx_itr; 878 ixl_configure_rx_itr(pf); 879 880 return (error); 881 } 882 883 void 884 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 885 struct sysctl_oid_list *child, 886 struct i40e_hw_port_stats *stats) 887 { 888 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, 889 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics"); 890 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); 891 892 struct i40e_eth_stats *eth_stats = &stats->eth; 893 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); 894 895 struct ixl_sysctl_info ctls[] = 896 { 897 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 898 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 899 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 900 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 901 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, 902 /* Packet Reception Stats */ 903 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 904 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 905 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 906 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 907 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 908 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 909 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 910 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 911 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 912 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 913 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 914 {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, 915 /* Packet Transmission Stats */ 916 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 917 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 918 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 919 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 920 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 921 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 922 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 923 /* Flow control */ 924 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 925 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 926 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 927 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 928 /* End */ 929 {0,0,0} 930 }; 931 932 struct ixl_sysctl_info *entry = ctls; 933 while (entry->stat != 0) 934 { 935 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, 936 CTLFLAG_RD, entry->stat, 937 entry->description); 938 entry++; 939 } 940 } 941 942 void 943 ixl_set_rss_key(struct ixl_pf *pf) 944 { 945 struct i40e_hw *hw = &pf->hw; 946 struct ixl_vsi *vsi = &pf->vsi; 947 device_t dev = pf->dev; 948 u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; 949 enum i40e_status_code status; 950 951 #ifdef RSS 952 /* Fetch the configured RSS key */ 953 rss_getkey((uint8_t *) &rss_seed); 954 #else 955 ixl_get_default_rss_key(rss_seed); 956 #endif 957 /* Fill out hash function seed */ 958 if (hw->mac.type == I40E_MAC_X722) { 959 struct i40e_aqc_get_set_rss_key_data key_data; 960 bcopy(rss_seed, &key_data, 52); 961 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); 962 if (status) 963 device_printf(dev, 964 "i40e_aq_set_rss_key status %s, error %s\n", 965 i40e_stat_str(hw, status), 966 i40e_aq_str(hw, hw->aq.asq_last_status)); 967 } else { 968 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) 969 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); 970 } 971 } 972 973 /* 974 * Configure enabled PCTYPES for RSS. 975 */ 976 void 977 ixl_set_rss_pctypes(struct ixl_pf *pf) 978 { 979 struct i40e_hw *hw = &pf->hw; 980 u64 set_hena = 0, hena; 981 982 #ifdef RSS 983 u32 rss_hash_config; 984 985 rss_hash_config = rss_gethashconfig(); 986 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 987 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); 988 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 989 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); 990 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 991 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); 992 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 993 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); 994 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 995 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); 996 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 997 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); 998 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 999 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); 1000 #else 1001 if (hw->mac.type == I40E_MAC_X722) 1002 set_hena = IXL_DEFAULT_RSS_HENA_X722; 1003 else 1004 set_hena = IXL_DEFAULT_RSS_HENA_XL710; 1005 #endif 1006 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 1007 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 1008 hena |= set_hena; 1009 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 1010 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 1011 1012 } 1013 1014 /* 1015 ** Setup the PF's RSS parameters. 1016 */ 1017 void 1018 ixl_config_rss(struct ixl_pf *pf) 1019 { 1020 ixl_set_rss_key(pf); 1021 ixl_set_rss_pctypes(pf); 1022 ixl_set_rss_hlut(pf); 1023 } 1024 1025 /* 1026 * In some firmware versions there is default MAC/VLAN filter 1027 * configured which interferes with filters managed by driver. 1028 * Make sure it's removed. 1029 */ 1030 void 1031 ixl_del_default_hw_filters(struct ixl_vsi *vsi) 1032 { 1033 struct i40e_aqc_remove_macvlan_element_data e; 1034 1035 bzero(&e, sizeof(e)); 1036 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1037 e.vlan_tag = 0; 1038 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1039 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1040 1041 bzero(&e, sizeof(e)); 1042 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); 1043 e.vlan_tag = 0; 1044 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1045 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1046 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); 1047 } 1048 1049 /* 1050 ** Initialize filter list and add filters that the hardware 1051 ** needs to know about. 1052 ** 1053 ** Requires VSI's seid to be set before calling. 1054 */ 1055 void 1056 ixl_init_filters(struct ixl_vsi *vsi) 1057 { 1058 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 1059 1060 ixl_dbg_filter(pf, "%s: start\n", __func__); 1061 1062 /* Initialize mac filter list for VSI */ 1063 LIST_INIT(&vsi->ftl); 1064 vsi->num_hw_filters = 0; 1065 1066 /* Receive broadcast Ethernet frames */ 1067 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL); 1068 1069 if (IXL_VSI_IS_VF(vsi)) 1070 return; 1071 1072 ixl_del_default_hw_filters(vsi); 1073 1074 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); 1075 1076 /* 1077 * Prevent Tx flow control frames from being sent out by 1078 * non-firmware transmitters. 1079 * This affects every VSI in the PF. 1080 */ 1081 #ifndef IXL_DEBUG_FC 1082 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1083 #else 1084 if (pf->enable_tx_fc_filter) 1085 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); 1086 #endif 1087 } 1088 1089 void 1090 ixl_reconfigure_filters(struct ixl_vsi *vsi) 1091 { 1092 struct i40e_hw *hw = vsi->hw; 1093 struct ixl_ftl_head tmp; 1094 int cnt; 1095 1096 /* 1097 * The ixl_add_hw_filters function adds filters configured 1098 * in HW to a list in VSI. Move all filters to a temporary 1099 * list to avoid corrupting it by concatenating to itself. 1100 */ 1101 LIST_INIT(&tmp); 1102 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle); 1103 cnt = vsi->num_hw_filters; 1104 vsi->num_hw_filters = 0; 1105 1106 ixl_add_hw_filters(vsi, &tmp, cnt); 1107 1108 /* Filter could be removed if MAC address was changed */ 1109 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 1110 1111 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0) 1112 return; 1113 /* 1114 * VLAN HW filtering is enabled, make sure that filters 1115 * for all registered VLAN tags are configured 1116 */ 1117 ixl_add_vlan_filters(vsi, hw->mac.addr); 1118 } 1119 1120 /* 1121 * This routine adds a MAC/VLAN filter to the software filter 1122 * list, then adds that new filter to the HW if it doesn't already 1123 * exist in the SW filter list. 1124 */ 1125 void 1126 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1127 { 1128 struct ixl_mac_filter *f, *tmp; 1129 struct ixl_pf *pf; 1130 device_t dev; 1131 struct ixl_ftl_head to_add; 1132 int to_add_cnt; 1133 1134 pf = vsi->back; 1135 dev = pf->dev; 1136 to_add_cnt = 1; 1137 1138 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n", 1139 MAC_FORMAT_ARGS(macaddr), vlan); 1140 1141 /* Does one already exist */ 1142 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1143 if (f != NULL) 1144 return; 1145 1146 LIST_INIT(&to_add); 1147 f = ixl_new_filter(&to_add, macaddr, vlan); 1148 if (f == NULL) { 1149 device_printf(dev, "WARNING: no filter available!!\n"); 1150 return; 1151 } 1152 if (f->vlan != IXL_VLAN_ANY) 1153 f->flags |= IXL_FILTER_VLAN; 1154 else 1155 vsi->num_macs++; 1156 1157 /* 1158 ** Is this the first vlan being registered, if so we 1159 ** need to remove the ANY filter that indicates we are 1160 ** not in a vlan, and replace that with a 0 filter. 1161 */ 1162 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { 1163 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY); 1164 if (tmp != NULL) { 1165 struct ixl_ftl_head to_del; 1166 1167 /* Prepare new filter first to avoid removing 1168 * VLAN_ANY filter if allocation fails */ 1169 f = ixl_new_filter(&to_add, macaddr, 0); 1170 if (f == NULL) { 1171 device_printf(dev, "WARNING: no filter available!!\n"); 1172 free(LIST_FIRST(&to_add), M_IXL); 1173 return; 1174 } 1175 to_add_cnt++; 1176 1177 LIST_REMOVE(tmp, ftle); 1178 LIST_INIT(&to_del); 1179 LIST_INSERT_HEAD(&to_del, tmp, ftle); 1180 ixl_del_hw_filters(vsi, &to_del, 1); 1181 } 1182 } 1183 1184 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1185 } 1186 1187 /** 1188 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs 1189 * @vsi: pointer to VSI 1190 * @macaddr: MAC address 1191 * 1192 * Adds MAC/VLAN filter for each VLAN configured on the interface 1193 * if there is enough HW filters. Otherwise adds a single filter 1194 * for all tagged and untagged frames to allow all configured VLANs 1195 * to recieve traffic. 1196 */ 1197 void 1198 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1199 { 1200 struct ixl_ftl_head to_add; 1201 struct ixl_mac_filter *f; 1202 int to_add_cnt = 0; 1203 int i, vlan = 0; 1204 1205 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) { 1206 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1207 return; 1208 } 1209 LIST_INIT(&to_add); 1210 1211 /* Add filter for untagged frames if it does not exist yet */ 1212 f = ixl_find_filter(&vsi->ftl, macaddr, 0); 1213 if (f == NULL) { 1214 f = ixl_new_filter(&to_add, macaddr, 0); 1215 if (f == NULL) { 1216 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1217 return; 1218 } 1219 to_add_cnt++; 1220 } 1221 1222 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) { 1223 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan); 1224 if (vlan == -1) 1225 break; 1226 1227 /* Does one already exist */ 1228 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1229 if (f != NULL) 1230 continue; 1231 1232 f = ixl_new_filter(&to_add, macaddr, vlan); 1233 if (f == NULL) { 1234 device_printf(vsi->dev, "WARNING: no filter available!!\n"); 1235 ixl_free_filters(&to_add); 1236 return; 1237 } 1238 to_add_cnt++; 1239 } 1240 1241 ixl_add_hw_filters(vsi, &to_add, to_add_cnt); 1242 } 1243 1244 void 1245 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) 1246 { 1247 struct ixl_mac_filter *f, *tmp; 1248 struct ixl_ftl_head ftl_head; 1249 int to_del_cnt = 1; 1250 1251 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1252 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n", 1253 MAC_FORMAT_ARGS(macaddr), vlan); 1254 1255 f = ixl_find_filter(&vsi->ftl, macaddr, vlan); 1256 if (f == NULL) 1257 return; 1258 1259 LIST_REMOVE(f, ftle); 1260 LIST_INIT(&ftl_head); 1261 LIST_INSERT_HEAD(&ftl_head, f, ftle); 1262 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0) 1263 vsi->num_macs--; 1264 1265 /* If this is not the last vlan just remove the filter */ 1266 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) { 1267 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1268 return; 1269 } 1270 1271 /* It's the last vlan, we need to switch back to a non-vlan filter */ 1272 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0); 1273 if (tmp != NULL) { 1274 LIST_REMOVE(tmp, ftle); 1275 LIST_INSERT_AFTER(f, tmp, ftle); 1276 to_del_cnt++; 1277 } 1278 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt); 1279 1280 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); 1281 } 1282 1283 /** 1284 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC 1285 * @vsi: VSI which filters need to be removed 1286 * @macaddr: MAC address 1287 * 1288 * Remove all MAC/VLAN filters with a given MAC address. For multicast 1289 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY) 1290 * so skip them to speed up processing. Those filters should be removed 1291 * using ixl_del_filter function. 1292 */ 1293 void 1294 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr) 1295 { 1296 struct ixl_mac_filter *f, *tmp; 1297 struct ixl_ftl_head to_del; 1298 int to_del_cnt = 0; 1299 1300 LIST_INIT(&to_del); 1301 1302 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) { 1303 if ((f->flags & IXL_FILTER_MC) != 0 || 1304 !ixl_ether_is_equal(f->macaddr, macaddr)) 1305 continue; 1306 1307 LIST_REMOVE(f, ftle); 1308 LIST_INSERT_HEAD(&to_del, f, ftle); 1309 to_del_cnt++; 1310 } 1311 1312 ixl_dbg_filter((struct ixl_pf *)vsi->back, 1313 "%s: " MAC_FORMAT ", to_del_cnt: %d\n", 1314 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt); 1315 if (to_del_cnt > 0) 1316 ixl_del_hw_filters(vsi, &to_del, to_del_cnt); 1317 } 1318 1319 /* 1320 ** Find the filter with both matching mac addr and vlan id 1321 */ 1322 struct ixl_mac_filter * 1323 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan) 1324 { 1325 struct ixl_mac_filter *f; 1326 1327 LIST_FOREACH(f, headp, ftle) { 1328 if (ixl_ether_is_equal(f->macaddr, macaddr) && 1329 (f->vlan == vlan)) { 1330 return (f); 1331 } 1332 } 1333 1334 return (NULL); 1335 } 1336 1337 /* 1338 ** This routine takes additions to the vsi filter 1339 ** table and creates an Admin Queue call to create 1340 ** the filters in the hardware. 1341 */ 1342 void 1343 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt) 1344 { 1345 struct i40e_aqc_add_macvlan_element_data *a, *b; 1346 struct ixl_mac_filter *f, *fn; 1347 struct ixl_pf *pf; 1348 struct i40e_hw *hw; 1349 device_t dev; 1350 enum i40e_status_code status; 1351 int j = 0; 1352 1353 pf = vsi->back; 1354 dev = vsi->dev; 1355 hw = &pf->hw; 1356 1357 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt); 1358 1359 if (cnt < 1) { 1360 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n"); 1361 return; 1362 } 1363 1364 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, 1365 M_IXL, M_NOWAIT | M_ZERO); 1366 if (a == NULL) { 1367 device_printf(dev, "add_hw_filters failed to get memory\n"); 1368 return; 1369 } 1370 1371 LIST_FOREACH(f, to_add, ftle) { 1372 b = &a[j]; // a pox on fvl long names :) 1373 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); 1374 if (f->vlan == IXL_VLAN_ANY) { 1375 b->vlan_tag = 0; 1376 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; 1377 } else { 1378 b->vlan_tag = f->vlan; 1379 b->flags = 0; 1380 } 1381 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; 1382 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n", 1383 MAC_FORMAT_ARGS(f->macaddr)); 1384 1385 if (++j == cnt) 1386 break; 1387 } 1388 if (j != cnt) { 1389 /* Something went wrong */ 1390 device_printf(dev, 1391 "%s ERROR: list of filters to short expected: %d, found: %d\n", 1392 __func__, cnt, j); 1393 ixl_free_filters(to_add); 1394 goto out_free; 1395 } 1396 1397 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); 1398 if (status == I40E_SUCCESS) { 1399 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle); 1400 vsi->num_hw_filters += j; 1401 goto out_free; 1402 } 1403 1404 device_printf(dev, 1405 "i40e_aq_add_macvlan status %s, error %s\n", 1406 i40e_stat_str(hw, status), 1407 i40e_aq_str(hw, hw->aq.asq_last_status)); 1408 j = 0; 1409 1410 /* Verify which filters were actually configured in HW 1411 * and add them to the list */ 1412 LIST_FOREACH_SAFE(f, to_add, ftle, fn) { 1413 LIST_REMOVE(f, ftle); 1414 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) { 1415 ixl_dbg_filter(pf, 1416 "%s filter " MAC_FORMAT " VTAG: %d not added\n", 1417 __func__, 1418 MAC_FORMAT_ARGS(f->macaddr), 1419 f->vlan); 1420 free(f, M_IXL); 1421 } else { 1422 LIST_INSERT_HEAD(&vsi->ftl, f, ftle); 1423 vsi->num_hw_filters++; 1424 } 1425 j++; 1426 } 1427 1428 out_free: 1429 free(a, M_IXL); 1430 } 1431 1432 /* 1433 ** This routine takes removals in the vsi filter 1434 ** table and creates an Admin Queue call to delete 1435 ** the filters in the hardware. 1436 */ 1437 void 1438 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt) 1439 { 1440 struct i40e_aqc_remove_macvlan_element_data *d, *e; 1441 struct ixl_pf *pf; 1442 struct i40e_hw *hw; 1443 device_t dev; 1444 struct ixl_mac_filter *f, *f_temp; 1445 enum i40e_status_code status; 1446 int j = 0; 1447 1448 pf = vsi->back; 1449 hw = &pf->hw; 1450 dev = vsi->dev; 1451 1452 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt); 1453 1454 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, 1455 M_IXL, M_NOWAIT | M_ZERO); 1456 if (d == NULL) { 1457 device_printf(dev, "%s: failed to get memory\n", __func__); 1458 return; 1459 } 1460 1461 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) { 1462 e = &d[j]; // a pox on fvl long names :) 1463 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); 1464 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 1465 if (f->vlan == IXL_VLAN_ANY) { 1466 e->vlan_tag = 0; 1467 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1468 } else { 1469 e->vlan_tag = f->vlan; 1470 } 1471 1472 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n", 1473 MAC_FORMAT_ARGS(f->macaddr)); 1474 1475 /* delete entry from the list */ 1476 LIST_REMOVE(f, ftle); 1477 free(f, M_IXL); 1478 if (++j == cnt) 1479 break; 1480 } 1481 if (j != cnt || !LIST_EMPTY(to_del)) { 1482 /* Something went wrong */ 1483 device_printf(dev, 1484 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n", 1485 __func__, cnt, j); 1486 ixl_free_filters(to_del); 1487 goto out_free; 1488 } 1489 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); 1490 if (status) { 1491 device_printf(dev, 1492 "%s: i40e_aq_remove_macvlan status %s, error %s\n", 1493 __func__, i40e_stat_str(hw, status), 1494 i40e_aq_str(hw, hw->aq.asq_last_status)); 1495 for (int i = 0; i < j; i++) { 1496 if (d[i].error_code == 0) 1497 continue; 1498 device_printf(dev, 1499 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n", 1500 __func__, MAC_FORMAT_ARGS(d[i].mac_addr), 1501 d[i].vlan_tag); 1502 } 1503 } 1504 1505 vsi->num_hw_filters -= j; 1506 1507 out_free: 1508 free(d, M_IXL); 1509 1510 ixl_dbg_filter(pf, "%s: end\n", __func__); 1511 } 1512 1513 int 1514 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1515 { 1516 struct i40e_hw *hw = &pf->hw; 1517 int error = 0; 1518 u32 reg; 1519 u16 pf_qidx; 1520 1521 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1522 1523 ixl_dbg(pf, IXL_DBG_EN_DIS, 1524 "Enabling PF TX ring %4d / VSI TX ring %4d...\n", 1525 pf_qidx, vsi_qidx); 1526 1527 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); 1528 1529 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1530 reg |= I40E_QTX_ENA_QENA_REQ_MASK | 1531 I40E_QTX_ENA_QENA_STAT_MASK; 1532 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1533 /* Verify the enable took */ 1534 for (int j = 0; j < 10; j++) { 1535 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1536 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 1537 break; 1538 i40e_usec_delay(10); 1539 } 1540 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 1541 device_printf(pf->dev, "TX queue %d still disabled!\n", 1542 pf_qidx); 1543 error = ETIMEDOUT; 1544 } 1545 1546 return (error); 1547 } 1548 1549 int 1550 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1551 { 1552 struct i40e_hw *hw = &pf->hw; 1553 int error = 0; 1554 u32 reg; 1555 u16 pf_qidx; 1556 1557 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1558 1559 ixl_dbg(pf, IXL_DBG_EN_DIS, 1560 "Enabling PF RX ring %4d / VSI RX ring %4d...\n", 1561 pf_qidx, vsi_qidx); 1562 1563 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1564 reg |= I40E_QRX_ENA_QENA_REQ_MASK | 1565 I40E_QRX_ENA_QENA_STAT_MASK; 1566 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1567 /* Verify the enable took */ 1568 for (int j = 0; j < 10; j++) { 1569 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1570 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 1571 break; 1572 i40e_usec_delay(10); 1573 } 1574 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 1575 device_printf(pf->dev, "RX queue %d still disabled!\n", 1576 pf_qidx); 1577 error = ETIMEDOUT; 1578 } 1579 1580 return (error); 1581 } 1582 1583 int 1584 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1585 { 1586 int error = 0; 1587 1588 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); 1589 /* Called function already prints error message */ 1590 if (error) 1591 return (error); 1592 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); 1593 return (error); 1594 } 1595 1596 /* 1597 * Returns error on first ring that is detected hung. 1598 */ 1599 int 1600 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1601 { 1602 struct i40e_hw *hw = &pf->hw; 1603 int error = 0; 1604 u32 reg; 1605 u16 pf_qidx; 1606 1607 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1608 1609 ixl_dbg(pf, IXL_DBG_EN_DIS, 1610 "Disabling PF TX ring %4d / VSI TX ring %4d...\n", 1611 pf_qidx, vsi_qidx); 1612 1613 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); 1614 i40e_usec_delay(500); 1615 1616 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1617 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 1618 wr32(hw, I40E_QTX_ENA(pf_qidx), reg); 1619 /* Verify the disable took */ 1620 for (int j = 0; j < 10; j++) { 1621 reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); 1622 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) 1623 break; 1624 i40e_msec_delay(10); 1625 } 1626 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { 1627 device_printf(pf->dev, "TX queue %d still enabled!\n", 1628 pf_qidx); 1629 error = ETIMEDOUT; 1630 } 1631 1632 return (error); 1633 } 1634 1635 /* 1636 * Returns error on first ring that is detected hung. 1637 */ 1638 int 1639 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1640 { 1641 struct i40e_hw *hw = &pf->hw; 1642 int error = 0; 1643 u32 reg; 1644 u16 pf_qidx; 1645 1646 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); 1647 1648 ixl_dbg(pf, IXL_DBG_EN_DIS, 1649 "Disabling PF RX ring %4d / VSI RX ring %4d...\n", 1650 pf_qidx, vsi_qidx); 1651 1652 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1653 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 1654 wr32(hw, I40E_QRX_ENA(pf_qidx), reg); 1655 /* Verify the disable took */ 1656 for (int j = 0; j < 10; j++) { 1657 reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); 1658 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) 1659 break; 1660 i40e_msec_delay(10); 1661 } 1662 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { 1663 device_printf(pf->dev, "RX queue %d still enabled!\n", 1664 pf_qidx); 1665 error = ETIMEDOUT; 1666 } 1667 1668 return (error); 1669 } 1670 1671 int 1672 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) 1673 { 1674 int error = 0; 1675 1676 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); 1677 /* Called function already prints error message */ 1678 if (error) 1679 return (error); 1680 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); 1681 return (error); 1682 } 1683 1684 static void 1685 ixl_handle_tx_mdd_event(struct ixl_pf *pf) 1686 { 1687 struct i40e_hw *hw = &pf->hw; 1688 device_t dev = pf->dev; 1689 struct ixl_vf *vf; 1690 bool mdd_detected = false; 1691 bool pf_mdd_detected = false; 1692 bool vf_mdd_detected = false; 1693 u16 vf_num, queue; 1694 u8 pf_num, event; 1695 u8 pf_mdet_num, vp_mdet_num; 1696 u32 reg; 1697 1698 /* find what triggered the MDD event */ 1699 reg = rd32(hw, I40E_GL_MDET_TX); 1700 if (reg & I40E_GL_MDET_TX_VALID_MASK) { 1701 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> 1702 I40E_GL_MDET_TX_PF_NUM_SHIFT; 1703 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 1704 I40E_GL_MDET_TX_VF_NUM_SHIFT; 1705 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> 1706 I40E_GL_MDET_TX_EVENT_SHIFT; 1707 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 1708 I40E_GL_MDET_TX_QUEUE_SHIFT; 1709 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 1710 mdd_detected = true; 1711 } 1712 1713 if (!mdd_detected) 1714 return; 1715 1716 reg = rd32(hw, I40E_PF_MDET_TX); 1717 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 1718 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 1719 pf_mdet_num = hw->pf_id; 1720 pf_mdd_detected = true; 1721 } 1722 1723 /* Check if MDD was caused by a VF */ 1724 for (int i = 0; i < pf->num_vfs; i++) { 1725 vf = &(pf->vfs[i]); 1726 reg = rd32(hw, I40E_VP_MDET_TX(i)); 1727 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 1728 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 1729 vp_mdet_num = i; 1730 vf->num_mdd_events++; 1731 vf_mdd_detected = true; 1732 } 1733 } 1734 1735 /* Print out an error message */ 1736 if (vf_mdd_detected && pf_mdd_detected) 1737 device_printf(dev, 1738 "Malicious Driver Detection event %d" 1739 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n", 1740 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num); 1741 else if (vf_mdd_detected && !pf_mdd_detected) 1742 device_printf(dev, 1743 "Malicious Driver Detection event %d" 1744 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n", 1745 event, queue, pf_num, vf_num, vp_mdet_num); 1746 else if (!vf_mdd_detected && pf_mdd_detected) 1747 device_printf(dev, 1748 "Malicious Driver Detection event %d" 1749 " on TX queue %d, pf number %d (PF-%d)\n", 1750 event, queue, pf_num, pf_mdet_num); 1751 /* Theoretically shouldn't happen */ 1752 else 1753 device_printf(dev, 1754 "TX Malicious Driver Detection event (unknown)\n"); 1755 } 1756 1757 static void 1758 ixl_handle_rx_mdd_event(struct ixl_pf *pf) 1759 { 1760 struct i40e_hw *hw = &pf->hw; 1761 device_t dev = pf->dev; 1762 struct ixl_vf *vf; 1763 bool mdd_detected = false; 1764 bool pf_mdd_detected = false; 1765 bool vf_mdd_detected = false; 1766 u16 queue; 1767 u8 pf_num, event; 1768 u8 pf_mdet_num, vp_mdet_num; 1769 u32 reg; 1770 1771 /* 1772 * GL_MDET_RX doesn't contain VF number information, unlike 1773 * GL_MDET_TX. 1774 */ 1775 reg = rd32(hw, I40E_GL_MDET_RX); 1776 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 1777 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 1778 I40E_GL_MDET_RX_FUNCTION_SHIFT; 1779 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> 1780 I40E_GL_MDET_RX_EVENT_SHIFT; 1781 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 1782 I40E_GL_MDET_RX_QUEUE_SHIFT; 1783 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 1784 mdd_detected = true; 1785 } 1786 1787 if (!mdd_detected) 1788 return; 1789 1790 reg = rd32(hw, I40E_PF_MDET_RX); 1791 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 1792 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 1793 pf_mdet_num = hw->pf_id; 1794 pf_mdd_detected = true; 1795 } 1796 1797 /* Check if MDD was caused by a VF */ 1798 for (int i = 0; i < pf->num_vfs; i++) { 1799 vf = &(pf->vfs[i]); 1800 reg = rd32(hw, I40E_VP_MDET_RX(i)); 1801 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 1802 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 1803 vp_mdet_num = i; 1804 vf->num_mdd_events++; 1805 vf_mdd_detected = true; 1806 } 1807 } 1808 1809 /* Print out an error message */ 1810 if (vf_mdd_detected && pf_mdd_detected) 1811 device_printf(dev, 1812 "Malicious Driver Detection event %d" 1813 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n", 1814 event, queue, pf_num, pf_mdet_num, vp_mdet_num); 1815 else if (vf_mdd_detected && !pf_mdd_detected) 1816 device_printf(dev, 1817 "Malicious Driver Detection event %d" 1818 " on RX queue %d, pf number %d, (VF-%d)\n", 1819 event, queue, pf_num, vp_mdet_num); 1820 else if (!vf_mdd_detected && pf_mdd_detected) 1821 device_printf(dev, 1822 "Malicious Driver Detection event %d" 1823 " on RX queue %d, pf number %d (PF-%d)\n", 1824 event, queue, pf_num, pf_mdet_num); 1825 /* Theoretically shouldn't happen */ 1826 else 1827 device_printf(dev, 1828 "RX Malicious Driver Detection event (unknown)\n"); 1829 } 1830 1831 /** 1832 * ixl_handle_mdd_event 1833 * 1834 * Called from interrupt handler to identify possibly malicious vfs 1835 * (But also detects events from the PF, as well) 1836 **/ 1837 void 1838 ixl_handle_mdd_event(struct ixl_pf *pf) 1839 { 1840 struct i40e_hw *hw = &pf->hw; 1841 u32 reg; 1842 1843 /* 1844 * Handle both TX/RX because it's possible they could 1845 * both trigger in the same interrupt. 1846 */ 1847 ixl_handle_tx_mdd_event(pf); 1848 ixl_handle_rx_mdd_event(pf); 1849 1850 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING); 1851 1852 /* re-enable mdd interrupt cause */ 1853 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1854 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; 1855 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1856 ixl_flush(hw); 1857 } 1858 1859 void 1860 ixl_enable_intr0(struct i40e_hw *hw) 1861 { 1862 u32 reg; 1863 1864 /* Use IXL_ITR_NONE so ITR isn't updated here */ 1865 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | 1866 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 1867 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); 1868 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1869 } 1870 1871 void 1872 ixl_disable_intr0(struct i40e_hw *hw) 1873 { 1874 u32 reg; 1875 1876 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; 1877 wr32(hw, I40E_PFINT_DYN_CTL0, reg); 1878 ixl_flush(hw); 1879 } 1880 1881 void 1882 ixl_enable_queue(struct i40e_hw *hw, int id) 1883 { 1884 u32 reg; 1885 1886 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | 1887 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1888 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); 1889 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1890 } 1891 1892 void 1893 ixl_disable_queue(struct i40e_hw *hw, int id) 1894 { 1895 u32 reg; 1896 1897 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1898 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); 1899 } 1900 1901 void 1902 ixl_handle_empr_reset(struct ixl_pf *pf) 1903 { 1904 struct ixl_vsi *vsi = &pf->vsi; 1905 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING); 1906 1907 ixl_prepare_for_reset(pf, is_up); 1908 /* 1909 * i40e_pf_reset checks the type of reset and acts 1910 * accordingly. If EMP or Core reset was performed 1911 * doing PF reset is not necessary and it sometimes 1912 * fails. 1913 */ 1914 ixl_pf_reset(pf); 1915 1916 if (!IXL_PF_IN_RECOVERY_MODE(pf) && 1917 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) { 1918 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE); 1919 device_printf(pf->dev, 1920 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 1921 pf->link_up = FALSE; 1922 ixl_update_link_status(pf); 1923 } 1924 1925 ixl_rebuild_hw_structs_after_reset(pf, is_up); 1926 1927 atomic_clear_32(&pf->state, IXL_PF_STATE_RESETTING); 1928 } 1929 1930 void 1931 ixl_update_stats_counters(struct ixl_pf *pf) 1932 { 1933 struct i40e_hw *hw = &pf->hw; 1934 struct ixl_vsi *vsi = &pf->vsi; 1935 struct ixl_vf *vf; 1936 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx; 1937 1938 struct i40e_hw_port_stats *nsd = &pf->stats; 1939 struct i40e_hw_port_stats *osd = &pf->stats_offsets; 1940 1941 /* Update hw stats */ 1942 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), 1943 pf->stat_offsets_loaded, 1944 &osd->crc_errors, &nsd->crc_errors); 1945 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), 1946 pf->stat_offsets_loaded, 1947 &osd->illegal_bytes, &nsd->illegal_bytes); 1948 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), 1949 I40E_GLPRT_GORCL(hw->port), 1950 pf->stat_offsets_loaded, 1951 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); 1952 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), 1953 I40E_GLPRT_GOTCL(hw->port), 1954 pf->stat_offsets_loaded, 1955 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); 1956 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), 1957 pf->stat_offsets_loaded, 1958 &osd->eth.rx_discards, 1959 &nsd->eth.rx_discards); 1960 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), 1961 I40E_GLPRT_UPRCL(hw->port), 1962 pf->stat_offsets_loaded, 1963 &osd->eth.rx_unicast, 1964 &nsd->eth.rx_unicast); 1965 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), 1966 I40E_GLPRT_UPTCL(hw->port), 1967 pf->stat_offsets_loaded, 1968 &osd->eth.tx_unicast, 1969 &nsd->eth.tx_unicast); 1970 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), 1971 I40E_GLPRT_MPRCL(hw->port), 1972 pf->stat_offsets_loaded, 1973 &osd->eth.rx_multicast, 1974 &nsd->eth.rx_multicast); 1975 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), 1976 I40E_GLPRT_MPTCL(hw->port), 1977 pf->stat_offsets_loaded, 1978 &osd->eth.tx_multicast, 1979 &nsd->eth.tx_multicast); 1980 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), 1981 I40E_GLPRT_BPRCL(hw->port), 1982 pf->stat_offsets_loaded, 1983 &osd->eth.rx_broadcast, 1984 &nsd->eth.rx_broadcast); 1985 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), 1986 I40E_GLPRT_BPTCL(hw->port), 1987 pf->stat_offsets_loaded, 1988 &osd->eth.tx_broadcast, 1989 &nsd->eth.tx_broadcast); 1990 1991 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), 1992 pf->stat_offsets_loaded, 1993 &osd->tx_dropped_link_down, 1994 &nsd->tx_dropped_link_down); 1995 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), 1996 pf->stat_offsets_loaded, 1997 &osd->mac_local_faults, 1998 &nsd->mac_local_faults); 1999 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), 2000 pf->stat_offsets_loaded, 2001 &osd->mac_remote_faults, 2002 &nsd->mac_remote_faults); 2003 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), 2004 pf->stat_offsets_loaded, 2005 &osd->rx_length_errors, 2006 &nsd->rx_length_errors); 2007 2008 /* Flow control (LFC) stats */ 2009 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), 2010 pf->stat_offsets_loaded, 2011 &osd->link_xon_rx, &nsd->link_xon_rx); 2012 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), 2013 pf->stat_offsets_loaded, 2014 &osd->link_xon_tx, &nsd->link_xon_tx); 2015 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), 2016 pf->stat_offsets_loaded, 2017 &osd->link_xoff_rx, &nsd->link_xoff_rx); 2018 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), 2019 pf->stat_offsets_loaded, 2020 &osd->link_xoff_tx, &nsd->link_xoff_tx); 2021 2022 /* 2023 * For watchdog management we need to know if we have been paused 2024 * during the last interval, so capture that here. 2025 */ 2026 if (pf->stats.link_xoff_rx != prev_link_xoff_rx) 2027 vsi->shared->isc_pause_frames = 1; 2028 2029 /* Packet size stats rx */ 2030 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), 2031 I40E_GLPRT_PRC64L(hw->port), 2032 pf->stat_offsets_loaded, 2033 &osd->rx_size_64, &nsd->rx_size_64); 2034 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), 2035 I40E_GLPRT_PRC127L(hw->port), 2036 pf->stat_offsets_loaded, 2037 &osd->rx_size_127, &nsd->rx_size_127); 2038 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), 2039 I40E_GLPRT_PRC255L(hw->port), 2040 pf->stat_offsets_loaded, 2041 &osd->rx_size_255, &nsd->rx_size_255); 2042 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), 2043 I40E_GLPRT_PRC511L(hw->port), 2044 pf->stat_offsets_loaded, 2045 &osd->rx_size_511, &nsd->rx_size_511); 2046 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), 2047 I40E_GLPRT_PRC1023L(hw->port), 2048 pf->stat_offsets_loaded, 2049 &osd->rx_size_1023, &nsd->rx_size_1023); 2050 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), 2051 I40E_GLPRT_PRC1522L(hw->port), 2052 pf->stat_offsets_loaded, 2053 &osd->rx_size_1522, &nsd->rx_size_1522); 2054 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), 2055 I40E_GLPRT_PRC9522L(hw->port), 2056 pf->stat_offsets_loaded, 2057 &osd->rx_size_big, &nsd->rx_size_big); 2058 2059 /* Packet size stats tx */ 2060 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), 2061 I40E_GLPRT_PTC64L(hw->port), 2062 pf->stat_offsets_loaded, 2063 &osd->tx_size_64, &nsd->tx_size_64); 2064 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), 2065 I40E_GLPRT_PTC127L(hw->port), 2066 pf->stat_offsets_loaded, 2067 &osd->tx_size_127, &nsd->tx_size_127); 2068 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), 2069 I40E_GLPRT_PTC255L(hw->port), 2070 pf->stat_offsets_loaded, 2071 &osd->tx_size_255, &nsd->tx_size_255); 2072 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), 2073 I40E_GLPRT_PTC511L(hw->port), 2074 pf->stat_offsets_loaded, 2075 &osd->tx_size_511, &nsd->tx_size_511); 2076 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), 2077 I40E_GLPRT_PTC1023L(hw->port), 2078 pf->stat_offsets_loaded, 2079 &osd->tx_size_1023, &nsd->tx_size_1023); 2080 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), 2081 I40E_GLPRT_PTC1522L(hw->port), 2082 pf->stat_offsets_loaded, 2083 &osd->tx_size_1522, &nsd->tx_size_1522); 2084 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), 2085 I40E_GLPRT_PTC9522L(hw->port), 2086 pf->stat_offsets_loaded, 2087 &osd->tx_size_big, &nsd->tx_size_big); 2088 2089 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), 2090 pf->stat_offsets_loaded, 2091 &osd->rx_undersize, &nsd->rx_undersize); 2092 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), 2093 pf->stat_offsets_loaded, 2094 &osd->rx_fragments, &nsd->rx_fragments); 2095 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), 2096 pf->stat_offsets_loaded, 2097 &osd->rx_oversize, &nsd->rx_oversize); 2098 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 2099 pf->stat_offsets_loaded, 2100 &osd->rx_jabber, &nsd->rx_jabber); 2101 /* EEE */ 2102 i40e_get_phy_lpi_status(hw, nsd); 2103 2104 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded, 2105 &osd->tx_lpi_count, &nsd->tx_lpi_count, 2106 &osd->rx_lpi_count, &nsd->rx_lpi_count); 2107 2108 pf->stat_offsets_loaded = true; 2109 /* End hw stats */ 2110 2111 /* Update vsi stats */ 2112 ixl_update_vsi_stats(vsi); 2113 2114 for (int i = 0; i < pf->num_vfs; i++) { 2115 vf = &pf->vfs[i]; 2116 if (vf->vf_flags & VF_FLAG_ENABLED) 2117 ixl_update_eth_stats(&pf->vfs[i].vsi); 2118 } 2119 } 2120 2121 /** 2122 * Update VSI-specific ethernet statistics counters. 2123 **/ 2124 void 2125 ixl_update_eth_stats(struct ixl_vsi *vsi) 2126 { 2127 struct ixl_pf *pf = (struct ixl_pf *)vsi->back; 2128 struct i40e_hw *hw = &pf->hw; 2129 struct i40e_eth_stats *es; 2130 struct i40e_eth_stats *oes; 2131 u16 stat_idx = vsi->info.stat_counter_idx; 2132 2133 es = &vsi->eth_stats; 2134 oes = &vsi->eth_stats_offsets; 2135 2136 /* Gather up the stats that the hw collects */ 2137 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), 2138 vsi->stat_offsets_loaded, 2139 &oes->tx_errors, &es->tx_errors); 2140 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), 2141 vsi->stat_offsets_loaded, 2142 &oes->rx_discards, &es->rx_discards); 2143 2144 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), 2145 I40E_GLV_GORCL(stat_idx), 2146 vsi->stat_offsets_loaded, 2147 &oes->rx_bytes, &es->rx_bytes); 2148 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), 2149 I40E_GLV_UPRCL(stat_idx), 2150 vsi->stat_offsets_loaded, 2151 &oes->rx_unicast, &es->rx_unicast); 2152 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), 2153 I40E_GLV_MPRCL(stat_idx), 2154 vsi->stat_offsets_loaded, 2155 &oes->rx_multicast, &es->rx_multicast); 2156 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), 2157 I40E_GLV_BPRCL(stat_idx), 2158 vsi->stat_offsets_loaded, 2159 &oes->rx_broadcast, &es->rx_broadcast); 2160 2161 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), 2162 I40E_GLV_GOTCL(stat_idx), 2163 vsi->stat_offsets_loaded, 2164 &oes->tx_bytes, &es->tx_bytes); 2165 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), 2166 I40E_GLV_UPTCL(stat_idx), 2167 vsi->stat_offsets_loaded, 2168 &oes->tx_unicast, &es->tx_unicast); 2169 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), 2170 I40E_GLV_MPTCL(stat_idx), 2171 vsi->stat_offsets_loaded, 2172 &oes->tx_multicast, &es->tx_multicast); 2173 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), 2174 I40E_GLV_BPTCL(stat_idx), 2175 vsi->stat_offsets_loaded, 2176 &oes->tx_broadcast, &es->tx_broadcast); 2177 vsi->stat_offsets_loaded = true; 2178 } 2179 2180 void 2181 ixl_update_vsi_stats(struct ixl_vsi *vsi) 2182 { 2183 struct ixl_pf *pf; 2184 struct ifnet *ifp; 2185 struct i40e_eth_stats *es; 2186 u64 tx_discards; 2187 2188 struct i40e_hw_port_stats *nsd; 2189 2190 pf = vsi->back; 2191 ifp = vsi->ifp; 2192 es = &vsi->eth_stats; 2193 nsd = &pf->stats; 2194 2195 ixl_update_eth_stats(vsi); 2196 2197 tx_discards = es->tx_discards + nsd->tx_dropped_link_down; 2198 2199 /* Update ifnet stats */ 2200 IXL_SET_IPACKETS(vsi, es->rx_unicast + 2201 es->rx_multicast + 2202 es->rx_broadcast); 2203 IXL_SET_OPACKETS(vsi, es->tx_unicast + 2204 es->tx_multicast + 2205 es->tx_broadcast); 2206 IXL_SET_IBYTES(vsi, es->rx_bytes); 2207 IXL_SET_OBYTES(vsi, es->tx_bytes); 2208 IXL_SET_IMCASTS(vsi, es->rx_multicast); 2209 IXL_SET_OMCASTS(vsi, es->tx_multicast); 2210 2211 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + 2212 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments + 2213 nsd->rx_jabber); 2214 IXL_SET_OERRORS(vsi, es->tx_errors); 2215 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); 2216 IXL_SET_OQDROPS(vsi, tx_discards); 2217 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); 2218 IXL_SET_COLLISIONS(vsi, 0); 2219 } 2220 2221 /** 2222 * Reset all of the stats for the given pf 2223 **/ 2224 void 2225 ixl_pf_reset_stats(struct ixl_pf *pf) 2226 { 2227 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); 2228 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); 2229 pf->stat_offsets_loaded = false; 2230 } 2231 2232 /** 2233 * Resets all stats of the given vsi 2234 **/ 2235 void 2236 ixl_vsi_reset_stats(struct ixl_vsi *vsi) 2237 { 2238 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); 2239 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); 2240 vsi->stat_offsets_loaded = false; 2241 } 2242 2243 /** 2244 * Read and update a 48 bit stat from the hw 2245 * 2246 * Since the device stats are not reset at PFReset, they likely will not 2247 * be zeroed when the driver starts. We'll save the first values read 2248 * and use them as offsets to be subtracted from the raw values in order 2249 * to report stats that count from zero. 2250 **/ 2251 void 2252 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, 2253 bool offset_loaded, u64 *offset, u64 *stat) 2254 { 2255 u64 new_data; 2256 2257 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__) 2258 new_data = rd64(hw, loreg); 2259 #else 2260 /* 2261 * Use two rd32's instead of one rd64; FreeBSD versions before 2262 * 10 don't support 64-bit bus reads/writes. 2263 */ 2264 new_data = rd32(hw, loreg); 2265 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; 2266 #endif 2267 2268 if (!offset_loaded) 2269 *offset = new_data; 2270 if (new_data >= *offset) 2271 *stat = new_data - *offset; 2272 else 2273 *stat = (new_data + ((u64)1 << 48)) - *offset; 2274 *stat &= 0xFFFFFFFFFFFFULL; 2275 } 2276 2277 /** 2278 * Read and update a 32 bit stat from the hw 2279 **/ 2280 void 2281 ixl_stat_update32(struct i40e_hw *hw, u32 reg, 2282 bool offset_loaded, u64 *offset, u64 *stat) 2283 { 2284 u32 new_data; 2285 2286 new_data = rd32(hw, reg); 2287 if (!offset_loaded) 2288 *offset = new_data; 2289 if (new_data >= *offset) 2290 *stat = (u32)(new_data - *offset); 2291 else 2292 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); 2293 } 2294 2295 /** 2296 * Add subset of device sysctls safe to use in recovery mode 2297 */ 2298 void 2299 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf) 2300 { 2301 device_t dev = pf->dev; 2302 2303 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2304 struct sysctl_oid_list *ctx_list = 2305 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2306 2307 struct sysctl_oid *debug_node; 2308 struct sysctl_oid_list *debug_list; 2309 2310 SYSCTL_ADD_PROC(ctx, ctx_list, 2311 OID_AUTO, "fw_version", 2312 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2313 ixl_sysctl_show_fw, "A", "Firmware version"); 2314 2315 /* Add sysctls meant to print debug information, but don't list them 2316 * in "sysctl -a" output. */ 2317 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2318 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2319 "Debug Sysctls"); 2320 debug_list = SYSCTL_CHILDREN(debug_node); 2321 2322 SYSCTL_ADD_UINT(ctx, debug_list, 2323 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2324 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2325 2326 SYSCTL_ADD_UINT(ctx, debug_list, 2327 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2328 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2329 2330 SYSCTL_ADD_PROC(ctx, debug_list, 2331 OID_AUTO, "dump_debug_data", 2332 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2333 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2334 2335 SYSCTL_ADD_PROC(ctx, debug_list, 2336 OID_AUTO, "do_pf_reset", 2337 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2338 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2339 2340 SYSCTL_ADD_PROC(ctx, debug_list, 2341 OID_AUTO, "do_core_reset", 2342 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2343 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2344 2345 SYSCTL_ADD_PROC(ctx, debug_list, 2346 OID_AUTO, "do_global_reset", 2347 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2348 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2349 2350 SYSCTL_ADD_PROC(ctx, debug_list, 2351 OID_AUTO, "queue_interrupt_table", 2352 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2353 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2354 } 2355 2356 void 2357 ixl_add_device_sysctls(struct ixl_pf *pf) 2358 { 2359 device_t dev = pf->dev; 2360 struct i40e_hw *hw = &pf->hw; 2361 2362 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 2363 struct sysctl_oid_list *ctx_list = 2364 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2365 2366 struct sysctl_oid *debug_node; 2367 struct sysctl_oid_list *debug_list; 2368 2369 struct sysctl_oid *fec_node; 2370 struct sysctl_oid_list *fec_list; 2371 struct sysctl_oid *eee_node; 2372 struct sysctl_oid_list *eee_list; 2373 2374 /* Set up sysctls */ 2375 SYSCTL_ADD_PROC(ctx, ctx_list, 2376 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2377 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); 2378 2379 SYSCTL_ADD_PROC(ctx, ctx_list, 2380 OID_AUTO, "advertise_speed", 2381 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2382 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); 2383 2384 SYSCTL_ADD_PROC(ctx, ctx_list, 2385 OID_AUTO, "supported_speeds", 2386 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2387 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); 2388 2389 SYSCTL_ADD_PROC(ctx, ctx_list, 2390 OID_AUTO, "current_speed", 2391 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2392 ixl_sysctl_current_speed, "A", "Current Port Speed"); 2393 2394 SYSCTL_ADD_PROC(ctx, ctx_list, 2395 OID_AUTO, "fw_version", 2396 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2397 ixl_sysctl_show_fw, "A", "Firmware version"); 2398 2399 SYSCTL_ADD_PROC(ctx, ctx_list, 2400 OID_AUTO, "unallocated_queues", 2401 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0, 2402 ixl_sysctl_unallocated_queues, "I", 2403 "Queues not allocated to a PF or VF"); 2404 2405 SYSCTL_ADD_PROC(ctx, ctx_list, 2406 OID_AUTO, "tx_itr", 2407 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2408 ixl_sysctl_pf_tx_itr, "I", 2409 "Immediately set TX ITR value for all queues"); 2410 2411 SYSCTL_ADD_PROC(ctx, ctx_list, 2412 OID_AUTO, "rx_itr", 2413 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2414 ixl_sysctl_pf_rx_itr, "I", 2415 "Immediately set RX ITR value for all queues"); 2416 2417 SYSCTL_ADD_INT(ctx, ctx_list, 2418 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, 2419 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); 2420 2421 SYSCTL_ADD_INT(ctx, ctx_list, 2422 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, 2423 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); 2424 2425 /* Add FEC sysctls for 25G adapters */ 2426 if (i40e_is_25G_device(hw->device_id)) { 2427 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2428 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2429 "FEC Sysctls"); 2430 fec_list = SYSCTL_CHILDREN(fec_node); 2431 2432 SYSCTL_ADD_PROC(ctx, fec_list, 2433 OID_AUTO, "fc_ability", 2434 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2435 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); 2436 2437 SYSCTL_ADD_PROC(ctx, fec_list, 2438 OID_AUTO, "rs_ability", 2439 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2440 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); 2441 2442 SYSCTL_ADD_PROC(ctx, fec_list, 2443 OID_AUTO, "fc_requested", 2444 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2445 ixl_sysctl_fec_fc_request, "I", 2446 "FC FEC mode requested on link"); 2447 2448 SYSCTL_ADD_PROC(ctx, fec_list, 2449 OID_AUTO, "rs_requested", 2450 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2451 ixl_sysctl_fec_rs_request, "I", 2452 "RS FEC mode requested on link"); 2453 2454 SYSCTL_ADD_PROC(ctx, fec_list, 2455 OID_AUTO, "auto_fec_enabled", 2456 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0, 2457 ixl_sysctl_fec_auto_enable, "I", 2458 "Let FW decide FEC ability/request modes"); 2459 } 2460 2461 SYSCTL_ADD_PROC(ctx, ctx_list, 2462 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2463 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); 2464 2465 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2466 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2467 "Energy Efficient Ethernet (EEE) Sysctls"); 2468 eee_list = SYSCTL_CHILDREN(eee_node); 2469 2470 SYSCTL_ADD_PROC(ctx, eee_list, 2471 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 2472 pf, 0, ixl_sysctl_eee_enable, "I", 2473 "Enable Energy Efficient Ethernet (EEE)"); 2474 2475 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status", 2476 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0, 2477 "TX LPI status"); 2478 2479 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status", 2480 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0, 2481 "RX LPI status"); 2482 2483 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count", 2484 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count, 2485 "TX LPI count"); 2486 2487 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count", 2488 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count, 2489 "RX LPI count"); 2490 2491 /* Add sysctls meant to print debug information, but don't list them 2492 * in "sysctl -a" output. */ 2493 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, 2494 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 2495 "Debug Sysctls"); 2496 debug_list = SYSCTL_CHILDREN(debug_node); 2497 2498 SYSCTL_ADD_UINT(ctx, debug_list, 2499 OID_AUTO, "shared_debug_mask", CTLFLAG_RW, 2500 &pf->hw.debug_mask, 0, "Shared code debug message level"); 2501 2502 SYSCTL_ADD_UINT(ctx, debug_list, 2503 OID_AUTO, "core_debug_mask", CTLFLAG_RW, 2504 &pf->dbg_mask, 0, "Non-shared code debug message level"); 2505 2506 SYSCTL_ADD_PROC(ctx, debug_list, 2507 OID_AUTO, "link_status", 2508 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2509 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); 2510 2511 SYSCTL_ADD_PROC(ctx, debug_list, 2512 OID_AUTO, "phy_abilities", 2513 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2514 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); 2515 2516 SYSCTL_ADD_PROC(ctx, debug_list, 2517 OID_AUTO, "filter_list", 2518 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2519 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); 2520 2521 SYSCTL_ADD_PROC(ctx, debug_list, 2522 OID_AUTO, "hw_res_alloc", 2523 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2524 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); 2525 2526 SYSCTL_ADD_PROC(ctx, debug_list, 2527 OID_AUTO, "switch_config", 2528 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2529 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); 2530 2531 SYSCTL_ADD_PROC(ctx, debug_list, 2532 OID_AUTO, "switch_vlans", 2533 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2534 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration"); 2535 2536 SYSCTL_ADD_PROC(ctx, debug_list, 2537 OID_AUTO, "rss_key", 2538 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2539 pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); 2540 2541 SYSCTL_ADD_PROC(ctx, debug_list, 2542 OID_AUTO, "rss_lut", 2543 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2544 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); 2545 2546 SYSCTL_ADD_PROC(ctx, debug_list, 2547 OID_AUTO, "rss_hena", 2548 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2549 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); 2550 2551 SYSCTL_ADD_PROC(ctx, debug_list, 2552 OID_AUTO, "disable_fw_link_management", 2553 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2554 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); 2555 2556 SYSCTL_ADD_PROC(ctx, debug_list, 2557 OID_AUTO, "dump_debug_data", 2558 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2559 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); 2560 2561 SYSCTL_ADD_PROC(ctx, debug_list, 2562 OID_AUTO, "do_pf_reset", 2563 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2564 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); 2565 2566 SYSCTL_ADD_PROC(ctx, debug_list, 2567 OID_AUTO, "do_core_reset", 2568 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2569 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); 2570 2571 SYSCTL_ADD_PROC(ctx, debug_list, 2572 OID_AUTO, "do_global_reset", 2573 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT, 2574 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); 2575 2576 SYSCTL_ADD_PROC(ctx, debug_list, 2577 OID_AUTO, "queue_interrupt_table", 2578 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2579 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 2580 2581 if (pf->has_i2c) { 2582 SYSCTL_ADD_PROC(ctx, debug_list, 2583 OID_AUTO, "read_i2c_byte", 2584 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2585 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C); 2586 2587 SYSCTL_ADD_PROC(ctx, debug_list, 2588 OID_AUTO, "write_i2c_byte", 2589 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2590 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C); 2591 2592 SYSCTL_ADD_PROC(ctx, debug_list, 2593 OID_AUTO, "read_i2c_diag_data", 2594 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2595 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); 2596 } 2597 } 2598 2599 /* 2600 * Primarily for finding out how many queues can be assigned to VFs, 2601 * at runtime. 2602 */ 2603 static int 2604 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) 2605 { 2606 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2607 int queues; 2608 2609 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); 2610 2611 return sysctl_handle_int(oidp, NULL, queues, req); 2612 } 2613 2614 static const char * 2615 ixl_link_speed_string(enum i40e_aq_link_speed link_speed) 2616 { 2617 const char * link_speed_str[] = { 2618 "Unknown", 2619 "100 Mbps", 2620 "1 Gbps", 2621 "10 Gbps", 2622 "40 Gbps", 2623 "20 Gbps", 2624 "25 Gbps", 2625 "2.5 Gbps", 2626 "5 Gbps" 2627 }; 2628 int index; 2629 2630 switch (link_speed) { 2631 case I40E_LINK_SPEED_100MB: 2632 index = 1; 2633 break; 2634 case I40E_LINK_SPEED_1GB: 2635 index = 2; 2636 break; 2637 case I40E_LINK_SPEED_10GB: 2638 index = 3; 2639 break; 2640 case I40E_LINK_SPEED_40GB: 2641 index = 4; 2642 break; 2643 case I40E_LINK_SPEED_20GB: 2644 index = 5; 2645 break; 2646 case I40E_LINK_SPEED_25GB: 2647 index = 6; 2648 break; 2649 case I40E_LINK_SPEED_2_5GB: 2650 index = 7; 2651 break; 2652 case I40E_LINK_SPEED_5GB: 2653 index = 8; 2654 break; 2655 case I40E_LINK_SPEED_UNKNOWN: 2656 default: 2657 index = 0; 2658 break; 2659 } 2660 2661 return (link_speed_str[index]); 2662 } 2663 2664 int 2665 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2666 { 2667 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2668 struct i40e_hw *hw = &pf->hw; 2669 int error = 0; 2670 2671 ixl_update_link_status(pf); 2672 2673 error = sysctl_handle_string(oidp, 2674 __DECONST(void *, 2675 ixl_link_speed_string(hw->phy.link_info.link_speed)), 2676 8, req); 2677 2678 return (error); 2679 } 2680 2681 /* 2682 * Converts 8-bit speeds value to and from sysctl flags and 2683 * Admin Queue flags. 2684 */ 2685 static u8 2686 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) 2687 { 2688 #define SPEED_MAP_SIZE 8 2689 static u16 speedmap[SPEED_MAP_SIZE] = { 2690 (I40E_LINK_SPEED_100MB | (0x1 << 8)), 2691 (I40E_LINK_SPEED_1GB | (0x2 << 8)), 2692 (I40E_LINK_SPEED_10GB | (0x4 << 8)), 2693 (I40E_LINK_SPEED_20GB | (0x8 << 8)), 2694 (I40E_LINK_SPEED_25GB | (0x10 << 8)), 2695 (I40E_LINK_SPEED_40GB | (0x20 << 8)), 2696 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)), 2697 (I40E_LINK_SPEED_5GB | (0x80 << 8)), 2698 }; 2699 u8 retval = 0; 2700 2701 for (int i = 0; i < SPEED_MAP_SIZE; i++) { 2702 if (to_aq) 2703 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; 2704 else 2705 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; 2706 } 2707 2708 return (retval); 2709 } 2710 2711 int 2712 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) 2713 { 2714 struct i40e_hw *hw = &pf->hw; 2715 device_t dev = pf->dev; 2716 struct i40e_aq_get_phy_abilities_resp abilities; 2717 struct i40e_aq_set_phy_config config; 2718 enum i40e_status_code aq_error = 0; 2719 2720 /* Get current capability information */ 2721 aq_error = i40e_aq_get_phy_capabilities(hw, 2722 FALSE, FALSE, &abilities, NULL); 2723 if (aq_error) { 2724 device_printf(dev, 2725 "%s: Error getting phy capabilities %d," 2726 " aq error: %d\n", __func__, aq_error, 2727 hw->aq.asq_last_status); 2728 return (EIO); 2729 } 2730 2731 /* Prepare new config */ 2732 bzero(&config, sizeof(config)); 2733 if (from_aq) 2734 config.link_speed = speeds; 2735 else 2736 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); 2737 config.phy_type = abilities.phy_type; 2738 config.phy_type_ext = abilities.phy_type_ext; 2739 config.abilities = abilities.abilities 2740 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 2741 config.eee_capability = abilities.eee_capability; 2742 config.eeer = abilities.eeer_val; 2743 config.low_power_ctrl = abilities.d3_lpan; 2744 config.fec_config = abilities.fec_cfg_curr_mod_ext_info 2745 & I40E_AQ_PHY_FEC_CONFIG_MASK; 2746 2747 /* Do aq command & restart link */ 2748 aq_error = i40e_aq_set_phy_config(hw, &config, NULL); 2749 if (aq_error) { 2750 device_printf(dev, 2751 "%s: Error setting new phy config %d," 2752 " aq error: %d\n", __func__, aq_error, 2753 hw->aq.asq_last_status); 2754 return (EIO); 2755 } 2756 2757 return (0); 2758 } 2759 2760 /* 2761 ** Supported link speeds 2762 ** Flags: 2763 ** 0x1 - 100 Mb 2764 ** 0x2 - 1G 2765 ** 0x4 - 10G 2766 ** 0x8 - 20G 2767 ** 0x10 - 25G 2768 ** 0x20 - 40G 2769 ** 0x40 - 2.5G 2770 ** 0x80 - 5G 2771 */ 2772 static int 2773 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) 2774 { 2775 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2776 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); 2777 2778 return sysctl_handle_int(oidp, NULL, supported, req); 2779 } 2780 2781 /* 2782 ** Control link advertise speed: 2783 ** Flags: 2784 ** 0x1 - advertise 100 Mb 2785 ** 0x2 - advertise 1G 2786 ** 0x4 - advertise 10G 2787 ** 0x8 - advertise 20G 2788 ** 0x10 - advertise 25G 2789 ** 0x20 - advertise 40G 2790 ** 0x40 - advertise 2.5G 2791 ** 0x80 - advertise 5G 2792 ** 2793 ** Set to 0 to disable link 2794 */ 2795 int 2796 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) 2797 { 2798 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2799 device_t dev = pf->dev; 2800 u8 converted_speeds; 2801 int requested_ls = 0; 2802 int error = 0; 2803 2804 /* Read in new mode */ 2805 requested_ls = pf->advertised_speed; 2806 error = sysctl_handle_int(oidp, &requested_ls, 0, req); 2807 if ((error) || (req->newptr == NULL)) 2808 return (error); 2809 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 2810 device_printf(dev, "Interface is currently in FW recovery mode. " 2811 "Setting advertise speed not supported\n"); 2812 return (EINVAL); 2813 } 2814 2815 /* Error out if bits outside of possible flag range are set */ 2816 if ((requested_ls & ~((u8)0xFF)) != 0) { 2817 device_printf(dev, "Input advertised speed out of range; " 2818 "valid flags are: 0x%02x\n", 2819 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2820 return (EINVAL); 2821 } 2822 2823 /* Check if adapter supports input value */ 2824 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); 2825 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { 2826 device_printf(dev, "Invalid advertised speed; " 2827 "valid flags are: 0x%02x\n", 2828 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); 2829 return (EINVAL); 2830 } 2831 2832 error = ixl_set_advertised_speeds(pf, requested_ls, false); 2833 if (error) 2834 return (error); 2835 2836 pf->advertised_speed = requested_ls; 2837 ixl_update_link_status(pf); 2838 return (0); 2839 } 2840 2841 /* 2842 * Input: bitmap of enum i40e_aq_link_speed 2843 */ 2844 u64 2845 ixl_max_aq_speed_to_value(u8 link_speeds) 2846 { 2847 if (link_speeds & I40E_LINK_SPEED_40GB) 2848 return IF_Gbps(40); 2849 if (link_speeds & I40E_LINK_SPEED_25GB) 2850 return IF_Gbps(25); 2851 if (link_speeds & I40E_LINK_SPEED_20GB) 2852 return IF_Gbps(20); 2853 if (link_speeds & I40E_LINK_SPEED_10GB) 2854 return IF_Gbps(10); 2855 if (link_speeds & I40E_LINK_SPEED_5GB) 2856 return IF_Gbps(5); 2857 if (link_speeds & I40E_LINK_SPEED_2_5GB) 2858 return IF_Mbps(2500); 2859 if (link_speeds & I40E_LINK_SPEED_1GB) 2860 return IF_Gbps(1); 2861 if (link_speeds & I40E_LINK_SPEED_100MB) 2862 return IF_Mbps(100); 2863 else 2864 /* Minimum supported link speed */ 2865 return IF_Mbps(100); 2866 } 2867 2868 /* 2869 ** Get the width and transaction speed of 2870 ** the bus this adapter is plugged into. 2871 */ 2872 void 2873 ixl_get_bus_info(struct ixl_pf *pf) 2874 { 2875 struct i40e_hw *hw = &pf->hw; 2876 device_t dev = pf->dev; 2877 u16 link; 2878 u32 offset, num_ports; 2879 u64 max_speed; 2880 2881 /* Some devices don't use PCIE */ 2882 if (hw->mac.type == I40E_MAC_X722) 2883 return; 2884 2885 /* Read PCI Express Capabilities Link Status Register */ 2886 pci_find_cap(dev, PCIY_EXPRESS, &offset); 2887 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 2888 2889 /* Fill out hw struct with PCIE info */ 2890 i40e_set_pci_config_data(hw, link); 2891 2892 /* Use info to print out bandwidth messages */ 2893 device_printf(dev,"PCI Express Bus: Speed %s %s\n", 2894 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": 2895 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": 2896 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), 2897 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : 2898 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : 2899 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : 2900 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : 2901 ("Unknown")); 2902 2903 /* 2904 * If adapter is in slot with maximum supported speed, 2905 * no warning message needs to be printed out. 2906 */ 2907 if (hw->bus.speed >= i40e_bus_speed_8000 2908 && hw->bus.width >= i40e_bus_width_pcie_x8) 2909 return; 2910 2911 num_ports = bitcount32(hw->func_caps.valid_functions); 2912 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; 2913 2914 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { 2915 device_printf(dev, "PCI-Express bandwidth available" 2916 " for this device may be insufficient for" 2917 " optimal performance.\n"); 2918 device_printf(dev, "Please move the device to a different" 2919 " PCI-e link with more lanes and/or higher" 2920 " transfer rate.\n"); 2921 } 2922 } 2923 2924 static int 2925 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 2926 { 2927 struct ixl_pf *pf = (struct ixl_pf *)arg1; 2928 struct i40e_hw *hw = &pf->hw; 2929 struct sbuf *sbuf; 2930 2931 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2932 ixl_nvm_version_str(hw, sbuf); 2933 sbuf_finish(sbuf); 2934 sbuf_delete(sbuf); 2935 2936 return (0); 2937 } 2938 2939 void 2940 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) 2941 { 2942 u8 nvma_ptr = nvma->config & 0xFF; 2943 u8 nvma_flags = (nvma->config & 0xF00) >> 8; 2944 const char * cmd_str; 2945 2946 switch (nvma->command) { 2947 case I40E_NVM_READ: 2948 if (nvma_ptr == 0xF && nvma_flags == 0xF && 2949 nvma->offset == 0 && nvma->data_size == 1) { 2950 device_printf(dev, "NVMUPD: Get Driver Status Command\n"); 2951 return; 2952 } 2953 cmd_str = "READ "; 2954 break; 2955 case I40E_NVM_WRITE: 2956 cmd_str = "WRITE"; 2957 break; 2958 default: 2959 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command); 2960 return; 2961 } 2962 device_printf(dev, 2963 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n", 2964 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size); 2965 } 2966 2967 int 2968 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) 2969 { 2970 struct i40e_hw *hw = &pf->hw; 2971 struct i40e_nvm_access *nvma; 2972 device_t dev = pf->dev; 2973 enum i40e_status_code status = 0; 2974 size_t nvma_size, ifd_len, exp_len; 2975 int err, perrno; 2976 2977 DEBUGFUNC("ixl_handle_nvmupd_cmd"); 2978 2979 /* Sanity checks */ 2980 nvma_size = sizeof(struct i40e_nvm_access); 2981 ifd_len = ifd->ifd_len; 2982 2983 if (ifd_len < nvma_size || 2984 ifd->ifd_data == NULL) { 2985 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", 2986 __func__); 2987 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", 2988 __func__, ifd_len, nvma_size); 2989 device_printf(dev, "%s: data pointer: %p\n", __func__, 2990 ifd->ifd_data); 2991 return (EINVAL); 2992 } 2993 2994 nvma = malloc(ifd_len, M_IXL, M_WAITOK); 2995 err = copyin(ifd->ifd_data, nvma, ifd_len); 2996 if (err) { 2997 device_printf(dev, "%s: Cannot get request from user space\n", 2998 __func__); 2999 free(nvma, M_IXL); 3000 return (err); 3001 } 3002 3003 if (pf->dbg_mask & IXL_DBG_NVMUPD) 3004 ixl_print_nvm_cmd(dev, nvma); 3005 3006 if (IXL_PF_IS_RESETTING(pf)) { 3007 int count = 0; 3008 while (count++ < 100) { 3009 i40e_msec_delay(100); 3010 if (!(IXL_PF_IS_RESETTING(pf))) 3011 break; 3012 } 3013 } 3014 3015 if (IXL_PF_IS_RESETTING(pf)) { 3016 device_printf(dev, 3017 "%s: timeout waiting for EMP reset to finish\n", 3018 __func__); 3019 free(nvma, M_IXL); 3020 return (-EBUSY); 3021 } 3022 3023 if (nvma->data_size < 1 || nvma->data_size > 4096) { 3024 device_printf(dev, 3025 "%s: invalid request, data size not in supported range\n", 3026 __func__); 3027 free(nvma, M_IXL); 3028 return (EINVAL); 3029 } 3030 3031 /* 3032 * Older versions of the NVM update tool don't set ifd_len to the size 3033 * of the entire buffer passed to the ioctl. Check the data_size field 3034 * in the contained i40e_nvm_access struct and ensure everything is 3035 * copied in from userspace. 3036 */ 3037 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */ 3038 3039 if (ifd_len < exp_len) { 3040 ifd_len = exp_len; 3041 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK); 3042 err = copyin(ifd->ifd_data, nvma, ifd_len); 3043 if (err) { 3044 device_printf(dev, "%s: Cannot get request from user space\n", 3045 __func__); 3046 free(nvma, M_IXL); 3047 return (err); 3048 } 3049 } 3050 3051 // TODO: Might need a different lock here 3052 // IXL_PF_LOCK(pf); 3053 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); 3054 // IXL_PF_UNLOCK(pf); 3055 3056 err = copyout(nvma, ifd->ifd_data, ifd_len); 3057 free(nvma, M_IXL); 3058 if (err) { 3059 device_printf(dev, "%s: Cannot return data to user space\n", 3060 __func__); 3061 return (err); 3062 } 3063 3064 /* Let the nvmupdate report errors, show them only when debug is enabled */ 3065 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) 3066 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", 3067 i40e_stat_str(hw, status), perrno); 3068 3069 /* 3070 * -EPERM is actually ERESTART, which the kernel interprets as it needing 3071 * to run this ioctl again. So use -EACCES for -EPERM instead. 3072 */ 3073 if (perrno == -EPERM) 3074 return (-EACCES); 3075 else 3076 return (perrno); 3077 } 3078 3079 int 3080 ixl_find_i2c_interface(struct ixl_pf *pf) 3081 { 3082 struct i40e_hw *hw = &pf->hw; 3083 bool i2c_en, port_matched; 3084 u32 reg; 3085 3086 for (int i = 0; i < 4; i++) { 3087 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); 3088 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); 3089 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) 3090 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) 3091 & BIT(hw->port); 3092 if (i2c_en && port_matched) 3093 return (i); 3094 } 3095 3096 return (-1); 3097 } 3098 3099 static char * 3100 ixl_phy_type_string(u32 bit_pos, bool ext) 3101 { 3102 static char * phy_types_str[32] = { 3103 "SGMII", 3104 "1000BASE-KX", 3105 "10GBASE-KX4", 3106 "10GBASE-KR", 3107 "40GBASE-KR4", 3108 "XAUI", 3109 "XFI", 3110 "SFI", 3111 "XLAUI", 3112 "XLPPI", 3113 "40GBASE-CR4", 3114 "10GBASE-CR1", 3115 "SFP+ Active DA", 3116 "QSFP+ Active DA", 3117 "Reserved (14)", 3118 "Reserved (15)", 3119 "Reserved (16)", 3120 "100BASE-TX", 3121 "1000BASE-T", 3122 "10GBASE-T", 3123 "10GBASE-SR", 3124 "10GBASE-LR", 3125 "10GBASE-SFP+Cu", 3126 "10GBASE-CR1", 3127 "40GBASE-CR4", 3128 "40GBASE-SR4", 3129 "40GBASE-LR4", 3130 "1000BASE-SX", 3131 "1000BASE-LX", 3132 "1000BASE-T Optical", 3133 "20GBASE-KR2", 3134 "Reserved (31)" 3135 }; 3136 static char * ext_phy_types_str[8] = { 3137 "25GBASE-KR", 3138 "25GBASE-CR", 3139 "25GBASE-SR", 3140 "25GBASE-LR", 3141 "25GBASE-AOC", 3142 "25GBASE-ACC", 3143 "2.5GBASE-T", 3144 "5GBASE-T" 3145 }; 3146 3147 if (ext && bit_pos > 7) return "Invalid_Ext"; 3148 if (bit_pos > 31) return "Invalid"; 3149 3150 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; 3151 } 3152 3153 /* TODO: ERJ: I don't this is necessary anymore. */ 3154 int 3155 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) 3156 { 3157 device_t dev = pf->dev; 3158 struct i40e_hw *hw = &pf->hw; 3159 struct i40e_aq_desc desc; 3160 enum i40e_status_code status; 3161 3162 struct i40e_aqc_get_link_status *aq_link_status = 3163 (struct i40e_aqc_get_link_status *)&desc.params.raw; 3164 3165 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 3166 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); 3167 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 3168 if (status) { 3169 device_printf(dev, 3170 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", 3171 __func__, i40e_stat_str(hw, status), 3172 i40e_aq_str(hw, hw->aq.asq_last_status)); 3173 return (EIO); 3174 } 3175 3176 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); 3177 return (0); 3178 } 3179 3180 static char * 3181 ixl_phy_type_string_ls(u8 val) 3182 { 3183 if (val >= 0x1F) 3184 return ixl_phy_type_string(val - 0x1F, true); 3185 else 3186 return ixl_phy_type_string(val, false); 3187 } 3188 3189 static int 3190 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) 3191 { 3192 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3193 device_t dev = pf->dev; 3194 struct sbuf *buf; 3195 int error = 0; 3196 3197 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3198 if (!buf) { 3199 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3200 return (ENOMEM); 3201 } 3202 3203 struct i40e_aqc_get_link_status link_status; 3204 error = ixl_aq_get_link_status(pf, &link_status); 3205 if (error) { 3206 sbuf_delete(buf); 3207 return (error); 3208 } 3209 3210 sbuf_printf(buf, "\n" 3211 "PHY Type : 0x%02x<%s>\n" 3212 "Speed : 0x%02x\n" 3213 "Link info: 0x%02x\n" 3214 "AN info : 0x%02x\n" 3215 "Ext info : 0x%02x\n" 3216 "Loopback : 0x%02x\n" 3217 "Max Frame: %d\n" 3218 "Config : 0x%02x\n" 3219 "Power : 0x%02x", 3220 link_status.phy_type, 3221 ixl_phy_type_string_ls(link_status.phy_type), 3222 link_status.link_speed, 3223 link_status.link_info, 3224 link_status.an_info, 3225 link_status.ext_info, 3226 link_status.loopback, 3227 link_status.max_frame_size, 3228 link_status.config, 3229 link_status.power_desc); 3230 3231 error = sbuf_finish(buf); 3232 if (error) 3233 device_printf(dev, "Error finishing sbuf: %d\n", error); 3234 3235 sbuf_delete(buf); 3236 return (error); 3237 } 3238 3239 static int 3240 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) 3241 { 3242 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3243 struct i40e_hw *hw = &pf->hw; 3244 device_t dev = pf->dev; 3245 enum i40e_status_code status; 3246 struct i40e_aq_get_phy_abilities_resp abilities; 3247 struct sbuf *buf; 3248 int error = 0; 3249 3250 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3251 if (!buf) { 3252 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3253 return (ENOMEM); 3254 } 3255 3256 status = i40e_aq_get_phy_capabilities(hw, 3257 FALSE, FALSE, &abilities, NULL); 3258 if (status) { 3259 device_printf(dev, 3260 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 3261 __func__, i40e_stat_str(hw, status), 3262 i40e_aq_str(hw, hw->aq.asq_last_status)); 3263 sbuf_delete(buf); 3264 return (EIO); 3265 } 3266 3267 sbuf_printf(buf, "\n" 3268 "PHY Type : %08x", 3269 abilities.phy_type); 3270 3271 if (abilities.phy_type != 0) { 3272 sbuf_printf(buf, "<"); 3273 for (int i = 0; i < 32; i++) 3274 if ((1 << i) & abilities.phy_type) 3275 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); 3276 sbuf_printf(buf, ">"); 3277 } 3278 3279 sbuf_printf(buf, "\nPHY Ext : %02x", 3280 abilities.phy_type_ext); 3281 3282 if (abilities.phy_type_ext != 0) { 3283 sbuf_printf(buf, "<"); 3284 for (int i = 0; i < 4; i++) 3285 if ((1 << i) & abilities.phy_type_ext) 3286 sbuf_printf(buf, "%s,", 3287 ixl_phy_type_string(i, true)); 3288 sbuf_printf(buf, ">"); 3289 } 3290 3291 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed); 3292 if (abilities.link_speed != 0) { 3293 u8 link_speed; 3294 sbuf_printf(buf, " <"); 3295 for (int i = 0; i < 8; i++) { 3296 link_speed = (1 << i) & abilities.link_speed; 3297 if (link_speed) 3298 sbuf_printf(buf, "%s, ", 3299 ixl_link_speed_string(link_speed)); 3300 } 3301 sbuf_printf(buf, ">"); 3302 } 3303 3304 sbuf_printf(buf, "\n" 3305 "Abilities: %02x\n" 3306 "EEE cap : %04x\n" 3307 "EEER reg : %08x\n" 3308 "D3 Lpan : %02x\n" 3309 "ID : %02x %02x %02x %02x\n" 3310 "ModType : %02x %02x %02x\n" 3311 "ModType E: %01x\n" 3312 "FEC Cfg : %02x\n" 3313 "Ext CC : %02x", 3314 abilities.abilities, abilities.eee_capability, 3315 abilities.eeer_val, abilities.d3_lpan, 3316 abilities.phy_id[0], abilities.phy_id[1], 3317 abilities.phy_id[2], abilities.phy_id[3], 3318 abilities.module_type[0], abilities.module_type[1], 3319 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, 3320 abilities.fec_cfg_curr_mod_ext_info & 0x1F, 3321 abilities.ext_comp_code); 3322 3323 error = sbuf_finish(buf); 3324 if (error) 3325 device_printf(dev, "Error finishing sbuf: %d\n", error); 3326 3327 sbuf_delete(buf); 3328 return (error); 3329 } 3330 3331 static int 3332 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) 3333 { 3334 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3335 struct ixl_vsi *vsi = &pf->vsi; 3336 struct ixl_mac_filter *f; 3337 device_t dev = pf->dev; 3338 int error = 0, ftl_len = 0, ftl_counter = 0; 3339 3340 struct sbuf *buf; 3341 3342 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3343 if (!buf) { 3344 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3345 return (ENOMEM); 3346 } 3347 3348 sbuf_printf(buf, "\n"); 3349 3350 /* Print MAC filters */ 3351 sbuf_printf(buf, "PF Filters:\n"); 3352 LIST_FOREACH(f, &vsi->ftl, ftle) 3353 ftl_len++; 3354 3355 if (ftl_len < 1) 3356 sbuf_printf(buf, "(none)\n"); 3357 else { 3358 LIST_FOREACH(f, &vsi->ftl, ftle) { 3359 sbuf_printf(buf, 3360 MAC_FORMAT ", vlan %4d, flags %#06x", 3361 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3362 /* don't print '\n' for last entry */ 3363 if (++ftl_counter != ftl_len) 3364 sbuf_printf(buf, "\n"); 3365 } 3366 } 3367 3368 #ifdef PCI_IOV 3369 /* TODO: Give each VF its own filter list sysctl */ 3370 struct ixl_vf *vf; 3371 if (pf->num_vfs > 0) { 3372 sbuf_printf(buf, "\n\n"); 3373 for (int i = 0; i < pf->num_vfs; i++) { 3374 vf = &pf->vfs[i]; 3375 if (!(vf->vf_flags & VF_FLAG_ENABLED)) 3376 continue; 3377 3378 vsi = &vf->vsi; 3379 ftl_len = 0, ftl_counter = 0; 3380 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num); 3381 LIST_FOREACH(f, &vsi->ftl, ftle) 3382 ftl_len++; 3383 3384 if (ftl_len < 1) 3385 sbuf_printf(buf, "(none)\n"); 3386 else { 3387 LIST_FOREACH(f, &vsi->ftl, ftle) { 3388 sbuf_printf(buf, 3389 MAC_FORMAT ", vlan %4d, flags %#06x\n", 3390 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); 3391 } 3392 } 3393 } 3394 } 3395 #endif 3396 3397 error = sbuf_finish(buf); 3398 if (error) 3399 device_printf(dev, "Error finishing sbuf: %d\n", error); 3400 sbuf_delete(buf); 3401 3402 return (error); 3403 } 3404 3405 #define IXL_SW_RES_SIZE 0x14 3406 int 3407 ixl_res_alloc_cmp(const void *a, const void *b) 3408 { 3409 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; 3410 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; 3411 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; 3412 3413 return ((int)one->resource_type - (int)two->resource_type); 3414 } 3415 3416 /* 3417 * Longest string length: 25 3418 */ 3419 const char * 3420 ixl_switch_res_type_string(u8 type) 3421 { 3422 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = { 3423 "VEB", 3424 "VSI", 3425 "Perfect Match MAC address", 3426 "S-tag", 3427 "(Reserved)", 3428 "Multicast hash entry", 3429 "Unicast hash entry", 3430 "VLAN", 3431 "VSI List entry", 3432 "(Reserved)", 3433 "VLAN Statistic Pool", 3434 "Mirror Rule", 3435 "Queue Set", 3436 "Inner VLAN Forward filter", 3437 "(Reserved)", 3438 "Inner MAC", 3439 "IP", 3440 "GRE/VN1 Key", 3441 "VN2 Key", 3442 "Tunneling Port" 3443 }; 3444 3445 if (type < IXL_SW_RES_SIZE) 3446 return ixl_switch_res_type_strings[type]; 3447 else 3448 return "(Reserved)"; 3449 } 3450 3451 static int 3452 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) 3453 { 3454 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3455 struct i40e_hw *hw = &pf->hw; 3456 device_t dev = pf->dev; 3457 struct sbuf *buf; 3458 enum i40e_status_code status; 3459 int error = 0; 3460 3461 u8 num_entries; 3462 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; 3463 3464 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3465 if (!buf) { 3466 device_printf(dev, "Could not allocate sbuf for output.\n"); 3467 return (ENOMEM); 3468 } 3469 3470 bzero(resp, sizeof(resp)); 3471 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, 3472 resp, 3473 IXL_SW_RES_SIZE, 3474 NULL); 3475 if (status) { 3476 device_printf(dev, 3477 "%s: get_switch_resource_alloc() error %s, aq error %s\n", 3478 __func__, i40e_stat_str(hw, status), 3479 i40e_aq_str(hw, hw->aq.asq_last_status)); 3480 sbuf_delete(buf); 3481 return (error); 3482 } 3483 3484 /* Sort entries by type for display */ 3485 qsort(resp, num_entries, 3486 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), 3487 &ixl_res_alloc_cmp); 3488 3489 sbuf_cat(buf, "\n"); 3490 sbuf_printf(buf, "# of entries: %d\n", num_entries); 3491 sbuf_printf(buf, 3492 " Type | Guaranteed | Total | Used | Un-allocated\n" 3493 " | (this) | (all) | (this) | (all) \n"); 3494 for (int i = 0; i < num_entries; i++) { 3495 sbuf_printf(buf, 3496 "%25s | %10d %5d %6d %12d", 3497 ixl_switch_res_type_string(resp[i].resource_type), 3498 resp[i].guaranteed, 3499 resp[i].total, 3500 resp[i].used, 3501 resp[i].total_unalloced); 3502 if (i < num_entries - 1) 3503 sbuf_cat(buf, "\n"); 3504 } 3505 3506 error = sbuf_finish(buf); 3507 if (error) 3508 device_printf(dev, "Error finishing sbuf: %d\n", error); 3509 3510 sbuf_delete(buf); 3511 return (error); 3512 } 3513 3514 enum ixl_sw_seid_offset { 3515 IXL_SW_SEID_EMP = 1, 3516 IXL_SW_SEID_MAC_START = 2, 3517 IXL_SW_SEID_MAC_END = 5, 3518 IXL_SW_SEID_PF_START = 16, 3519 IXL_SW_SEID_PF_END = 31, 3520 IXL_SW_SEID_VF_START = 32, 3521 IXL_SW_SEID_VF_END = 159, 3522 }; 3523 3524 /* 3525 * Caller must init and delete sbuf; this function will clear and 3526 * finish it for caller. 3527 * 3528 * Note: The SEID argument only applies for elements defined by FW at 3529 * power-on; these include the EMP, Ports, PFs and VFs. 3530 */ 3531 static char * 3532 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid) 3533 { 3534 sbuf_clear(s); 3535 3536 /* If SEID is in certain ranges, then we can infer the 3537 * mapping of SEID to switch element. 3538 */ 3539 if (seid == IXL_SW_SEID_EMP) { 3540 sbuf_cat(s, "EMP"); 3541 goto out; 3542 } else if (seid >= IXL_SW_SEID_MAC_START && 3543 seid <= IXL_SW_SEID_MAC_END) { 3544 sbuf_printf(s, "MAC %2d", 3545 seid - IXL_SW_SEID_MAC_START); 3546 goto out; 3547 } else if (seid >= IXL_SW_SEID_PF_START && 3548 seid <= IXL_SW_SEID_PF_END) { 3549 sbuf_printf(s, "PF %3d", 3550 seid - IXL_SW_SEID_PF_START); 3551 goto out; 3552 } else if (seid >= IXL_SW_SEID_VF_START && 3553 seid <= IXL_SW_SEID_VF_END) { 3554 sbuf_printf(s, "VF %3d", 3555 seid - IXL_SW_SEID_VF_START); 3556 goto out; 3557 } 3558 3559 switch (element_type) { 3560 case I40E_AQ_SW_ELEM_TYPE_BMC: 3561 sbuf_cat(s, "BMC"); 3562 break; 3563 case I40E_AQ_SW_ELEM_TYPE_PV: 3564 sbuf_cat(s, "PV"); 3565 break; 3566 case I40E_AQ_SW_ELEM_TYPE_VEB: 3567 sbuf_cat(s, "VEB"); 3568 break; 3569 case I40E_AQ_SW_ELEM_TYPE_PA: 3570 sbuf_cat(s, "PA"); 3571 break; 3572 case I40E_AQ_SW_ELEM_TYPE_VSI: 3573 sbuf_printf(s, "VSI"); 3574 break; 3575 default: 3576 sbuf_cat(s, "?"); 3577 break; 3578 } 3579 3580 out: 3581 sbuf_finish(s); 3582 return sbuf_data(s); 3583 } 3584 3585 static int 3586 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b) 3587 { 3588 const struct i40e_aqc_switch_config_element_resp *one, *two; 3589 one = (const struct i40e_aqc_switch_config_element_resp *)a; 3590 two = (const struct i40e_aqc_switch_config_element_resp *)b; 3591 3592 return ((int)one->seid - (int)two->seid); 3593 } 3594 3595 static int 3596 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) 3597 { 3598 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3599 struct i40e_hw *hw = &pf->hw; 3600 device_t dev = pf->dev; 3601 struct sbuf *buf; 3602 struct sbuf *nmbuf; 3603 enum i40e_status_code status; 3604 int error = 0; 3605 u16 next = 0; 3606 u8 aq_buf[I40E_AQ_LARGE_BUF]; 3607 3608 struct i40e_aqc_switch_config_element_resp *elem; 3609 struct i40e_aqc_get_switch_config_resp *sw_config; 3610 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 3611 3612 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3613 if (!buf) { 3614 device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); 3615 return (ENOMEM); 3616 } 3617 3618 status = i40e_aq_get_switch_config(hw, sw_config, 3619 sizeof(aq_buf), &next, NULL); 3620 if (status) { 3621 device_printf(dev, 3622 "%s: aq_get_switch_config() error %s, aq error %s\n", 3623 __func__, i40e_stat_str(hw, status), 3624 i40e_aq_str(hw, hw->aq.asq_last_status)); 3625 sbuf_delete(buf); 3626 return error; 3627 } 3628 if (next) 3629 device_printf(dev, "%s: TODO: get more config with SEID %d\n", 3630 __func__, next); 3631 3632 nmbuf = sbuf_new_auto(); 3633 if (!nmbuf) { 3634 device_printf(dev, "Could not allocate sbuf for name output.\n"); 3635 sbuf_delete(buf); 3636 return (ENOMEM); 3637 } 3638 3639 /* Sort entries by SEID for display */ 3640 qsort(sw_config->element, sw_config->header.num_reported, 3641 sizeof(struct i40e_aqc_switch_config_element_resp), 3642 &ixl_sw_cfg_elem_seid_cmp); 3643 3644 sbuf_cat(buf, "\n"); 3645 /* Assuming <= 255 elements in switch */ 3646 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); 3647 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); 3648 /* Exclude: 3649 * Revision -- all elements are revision 1 for now 3650 */ 3651 sbuf_printf(buf, 3652 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n" 3653 " | | | (uplink)\n"); 3654 for (int i = 0; i < sw_config->header.num_reported; i++) { 3655 elem = &sw_config->element[i]; 3656 3657 // "%4d (%8s) | %8s %8s %#8x", 3658 sbuf_printf(buf, "%4d", elem->seid); 3659 sbuf_cat(buf, " "); 3660 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3661 elem->element_type, elem->seid)); 3662 sbuf_cat(buf, " | "); 3663 sbuf_printf(buf, "%4d", elem->uplink_seid); 3664 sbuf_cat(buf, " "); 3665 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3666 0, elem->uplink_seid)); 3667 sbuf_cat(buf, " | "); 3668 sbuf_printf(buf, "%4d", elem->downlink_seid); 3669 sbuf_cat(buf, " "); 3670 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, 3671 0, elem->downlink_seid)); 3672 sbuf_cat(buf, " | "); 3673 sbuf_printf(buf, "%8d", elem->connection_type); 3674 if (i < sw_config->header.num_reported - 1) 3675 sbuf_cat(buf, "\n"); 3676 } 3677 sbuf_delete(nmbuf); 3678 3679 error = sbuf_finish(buf); 3680 if (error) 3681 device_printf(dev, "Error finishing sbuf: %d\n", error); 3682 3683 sbuf_delete(buf); 3684 3685 return (error); 3686 } 3687 3688 static int 3689 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS) 3690 { 3691 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3692 struct i40e_hw *hw = &pf->hw; 3693 device_t dev = pf->dev; 3694 int requested_vlan = -1; 3695 enum i40e_status_code status = 0; 3696 int error = 0; 3697 3698 error = sysctl_handle_int(oidp, &requested_vlan, 0, req); 3699 if ((error) || (req->newptr == NULL)) 3700 return (error); 3701 3702 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) { 3703 device_printf(dev, "Flags disallow setting of vlans\n"); 3704 return (ENODEV); 3705 } 3706 3707 hw->switch_tag = requested_vlan; 3708 device_printf(dev, 3709 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n", 3710 hw->switch_tag, hw->first_tag, hw->second_tag); 3711 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL); 3712 if (status) { 3713 device_printf(dev, 3714 "%s: aq_set_switch_config() error %s, aq error %s\n", 3715 __func__, i40e_stat_str(hw, status), 3716 i40e_aq_str(hw, hw->aq.asq_last_status)); 3717 return (status); 3718 } 3719 return (0); 3720 } 3721 3722 static int 3723 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) 3724 { 3725 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3726 struct i40e_hw *hw = &pf->hw; 3727 device_t dev = pf->dev; 3728 struct sbuf *buf; 3729 int error = 0; 3730 enum i40e_status_code status; 3731 u32 reg; 3732 3733 struct i40e_aqc_get_set_rss_key_data key_data; 3734 3735 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3736 if (!buf) { 3737 device_printf(dev, "Could not allocate sbuf for output.\n"); 3738 return (ENOMEM); 3739 } 3740 3741 bzero(&key_data, sizeof(key_data)); 3742 3743 sbuf_cat(buf, "\n"); 3744 if (hw->mac.type == I40E_MAC_X722) { 3745 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); 3746 if (status) 3747 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", 3748 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3749 } else { 3750 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { 3751 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); 3752 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); 3753 } 3754 } 3755 3756 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); 3757 3758 error = sbuf_finish(buf); 3759 if (error) 3760 device_printf(dev, "Error finishing sbuf: %d\n", error); 3761 sbuf_delete(buf); 3762 3763 return (error); 3764 } 3765 3766 static void 3767 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) 3768 { 3769 int i, j, k, width; 3770 char c; 3771 3772 if (length < 1 || buf == NULL) return; 3773 3774 int byte_stride = 16; 3775 int lines = length / byte_stride; 3776 int rem = length % byte_stride; 3777 if (rem > 0) 3778 lines++; 3779 3780 for (i = 0; i < lines; i++) { 3781 width = (rem > 0 && i == lines - 1) 3782 ? rem : byte_stride; 3783 3784 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); 3785 3786 for (j = 0; j < width; j++) 3787 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); 3788 3789 if (width < byte_stride) { 3790 for (k = 0; k < (byte_stride - width); k++) 3791 sbuf_printf(sb, " "); 3792 } 3793 3794 if (!text) { 3795 sbuf_printf(sb, "\n"); 3796 continue; 3797 } 3798 3799 for (j = 0; j < width; j++) { 3800 c = (char)buf[i * byte_stride + j]; 3801 if (c < 32 || c > 126) 3802 sbuf_printf(sb, "."); 3803 else 3804 sbuf_printf(sb, "%c", c); 3805 3806 if (j == width - 1) 3807 sbuf_printf(sb, "\n"); 3808 } 3809 } 3810 } 3811 3812 static int 3813 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) 3814 { 3815 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3816 struct i40e_hw *hw = &pf->hw; 3817 device_t dev = pf->dev; 3818 struct sbuf *buf; 3819 int error = 0; 3820 enum i40e_status_code status; 3821 u8 hlut[512]; 3822 u32 reg; 3823 3824 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3825 if (!buf) { 3826 device_printf(dev, "Could not allocate sbuf for output.\n"); 3827 return (ENOMEM); 3828 } 3829 3830 bzero(hlut, sizeof(hlut)); 3831 sbuf_cat(buf, "\n"); 3832 if (hw->mac.type == I40E_MAC_X722) { 3833 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); 3834 if (status) 3835 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", 3836 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 3837 } else { 3838 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { 3839 reg = rd32(hw, I40E_PFQF_HLUT(i)); 3840 bcopy(®, &hlut[i << 2], 4); 3841 } 3842 } 3843 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); 3844 3845 error = sbuf_finish(buf); 3846 if (error) 3847 device_printf(dev, "Error finishing sbuf: %d\n", error); 3848 sbuf_delete(buf); 3849 3850 return (error); 3851 } 3852 3853 static int 3854 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) 3855 { 3856 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3857 struct i40e_hw *hw = &pf->hw; 3858 u64 hena; 3859 3860 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 3861 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 3862 3863 return sysctl_handle_long(oidp, NULL, hena, req); 3864 } 3865 3866 /* 3867 * Sysctl to disable firmware's link management 3868 * 3869 * 1 - Disable link management on this port 3870 * 0 - Re-enable link management 3871 * 3872 * On normal NVMs, firmware manages link by default. 3873 */ 3874 static int 3875 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) 3876 { 3877 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3878 struct i40e_hw *hw = &pf->hw; 3879 device_t dev = pf->dev; 3880 int requested_mode = -1; 3881 enum i40e_status_code status = 0; 3882 int error = 0; 3883 3884 /* Read in new mode */ 3885 error = sysctl_handle_int(oidp, &requested_mode, 0, req); 3886 if ((error) || (req->newptr == NULL)) 3887 return (error); 3888 /* Check for sane value */ 3889 if (requested_mode < 0 || requested_mode > 1) { 3890 device_printf(dev, "Valid modes are 0 or 1\n"); 3891 return (EINVAL); 3892 } 3893 3894 /* Set new mode */ 3895 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); 3896 if (status) { 3897 device_printf(dev, 3898 "%s: Error setting new phy debug mode %s," 3899 " aq error: %s\n", __func__, i40e_stat_str(hw, status), 3900 i40e_aq_str(hw, hw->aq.asq_last_status)); 3901 return (EIO); 3902 } 3903 3904 return (0); 3905 } 3906 3907 /* 3908 * Read some diagnostic data from a (Q)SFP+ module 3909 * 3910 * SFP A2 QSFP Lower Page 3911 * Temperature 96-97 22-23 3912 * Vcc 98-99 26-27 3913 * TX power 102-103 34-35..40-41 3914 * RX power 104-105 50-51..56-57 3915 */ 3916 static int 3917 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 3918 { 3919 struct ixl_pf *pf = (struct ixl_pf *)arg1; 3920 device_t dev = pf->dev; 3921 struct sbuf *sbuf; 3922 int error = 0; 3923 u8 output; 3924 3925 if (req->oldptr == NULL) { 3926 error = SYSCTL_OUT(req, 0, 128); 3927 return (0); 3928 } 3929 3930 error = pf->read_i2c_byte(pf, 0, 0xA0, &output); 3931 if (error) { 3932 device_printf(dev, "Error reading from i2c\n"); 3933 return (error); 3934 } 3935 3936 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 3937 if (output == 0x3) { 3938 /* 3939 * Check for: 3940 * - Internally calibrated data 3941 * - Diagnostic monitoring is implemented 3942 */ 3943 pf->read_i2c_byte(pf, 92, 0xA0, &output); 3944 if (!(output & 0x60)) { 3945 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); 3946 return (0); 3947 } 3948 3949 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3950 3951 for (u8 offset = 96; offset < 100; offset++) { 3952 pf->read_i2c_byte(pf, offset, 0xA2, &output); 3953 sbuf_printf(sbuf, "%02X ", output); 3954 } 3955 for (u8 offset = 102; offset < 106; offset++) { 3956 pf->read_i2c_byte(pf, offset, 0xA2, &output); 3957 sbuf_printf(sbuf, "%02X ", output); 3958 } 3959 } else if (output == 0xD || output == 0x11) { 3960 /* 3961 * QSFP+ modules are always internally calibrated, and must indicate 3962 * what types of diagnostic monitoring are implemented 3963 */ 3964 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3965 3966 for (u8 offset = 22; offset < 24; offset++) { 3967 pf->read_i2c_byte(pf, offset, 0xA0, &output); 3968 sbuf_printf(sbuf, "%02X ", output); 3969 } 3970 for (u8 offset = 26; offset < 28; offset++) { 3971 pf->read_i2c_byte(pf, offset, 0xA0, &output); 3972 sbuf_printf(sbuf, "%02X ", output); 3973 } 3974 /* Read the data from the first lane */ 3975 for (u8 offset = 34; offset < 36; offset++) { 3976 pf->read_i2c_byte(pf, offset, 0xA0, &output); 3977 sbuf_printf(sbuf, "%02X ", output); 3978 } 3979 for (u8 offset = 50; offset < 52; offset++) { 3980 pf->read_i2c_byte(pf, offset, 0xA0, &output); 3981 sbuf_printf(sbuf, "%02X ", output); 3982 } 3983 } else { 3984 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output); 3985 return (0); 3986 } 3987 3988 sbuf_finish(sbuf); 3989 sbuf_delete(sbuf); 3990 3991 return (0); 3992 } 3993 3994 /* 3995 * Sysctl to read a byte from I2C bus. 3996 * 3997 * Input: 32-bit value: 3998 * bits 0-7: device address (0xA0 or 0xA2) 3999 * bits 8-15: offset (0-255) 4000 * bits 16-31: unused 4001 * Output: 8-bit value read 4002 */ 4003 static int 4004 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) 4005 { 4006 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4007 device_t dev = pf->dev; 4008 int input = -1, error = 0; 4009 u8 dev_addr, offset, output; 4010 4011 /* Read in I2C read parameters */ 4012 error = sysctl_handle_int(oidp, &input, 0, req); 4013 if ((error) || (req->newptr == NULL)) 4014 return (error); 4015 /* Validate device address */ 4016 dev_addr = input & 0xFF; 4017 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4018 return (EINVAL); 4019 } 4020 offset = (input >> 8) & 0xFF; 4021 4022 error = pf->read_i2c_byte(pf, offset, dev_addr, &output); 4023 if (error) 4024 return (error); 4025 4026 device_printf(dev, "%02X\n", output); 4027 return (0); 4028 } 4029 4030 /* 4031 * Sysctl to write a byte to the I2C bus. 4032 * 4033 * Input: 32-bit value: 4034 * bits 0-7: device address (0xA0 or 0xA2) 4035 * bits 8-15: offset (0-255) 4036 * bits 16-23: value to write 4037 * bits 24-31: unused 4038 * Output: 8-bit value written 4039 */ 4040 static int 4041 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) 4042 { 4043 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4044 device_t dev = pf->dev; 4045 int input = -1, error = 0; 4046 u8 dev_addr, offset, value; 4047 4048 /* Read in I2C write parameters */ 4049 error = sysctl_handle_int(oidp, &input, 0, req); 4050 if ((error) || (req->newptr == NULL)) 4051 return (error); 4052 /* Validate device address */ 4053 dev_addr = input & 0xFF; 4054 if (dev_addr != 0xA0 && dev_addr != 0xA2) { 4055 return (EINVAL); 4056 } 4057 offset = (input >> 8) & 0xFF; 4058 value = (input >> 16) & 0xFF; 4059 4060 error = pf->write_i2c_byte(pf, offset, dev_addr, value); 4061 if (error) 4062 return (error); 4063 4064 device_printf(dev, "%02X written\n", value); 4065 return (0); 4066 } 4067 4068 static int 4069 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4070 u8 bit_pos, int *is_set) 4071 { 4072 device_t dev = pf->dev; 4073 struct i40e_hw *hw = &pf->hw; 4074 enum i40e_status_code status; 4075 4076 if (IXL_PF_IN_RECOVERY_MODE(pf)) 4077 return (EIO); 4078 4079 status = i40e_aq_get_phy_capabilities(hw, 4080 FALSE, FALSE, abilities, NULL); 4081 if (status) { 4082 device_printf(dev, 4083 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", 4084 __func__, i40e_stat_str(hw, status), 4085 i40e_aq_str(hw, hw->aq.asq_last_status)); 4086 return (EIO); 4087 } 4088 4089 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); 4090 return (0); 4091 } 4092 4093 static int 4094 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, 4095 u8 bit_pos, int set) 4096 { 4097 device_t dev = pf->dev; 4098 struct i40e_hw *hw = &pf->hw; 4099 struct i40e_aq_set_phy_config config; 4100 enum i40e_status_code status; 4101 4102 /* Set new PHY config */ 4103 memset(&config, 0, sizeof(config)); 4104 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); 4105 if (set) 4106 config.fec_config |= bit_pos; 4107 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { 4108 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 4109 config.phy_type = abilities->phy_type; 4110 config.phy_type_ext = abilities->phy_type_ext; 4111 config.link_speed = abilities->link_speed; 4112 config.eee_capability = abilities->eee_capability; 4113 config.eeer = abilities->eeer_val; 4114 config.low_power_ctrl = abilities->d3_lpan; 4115 status = i40e_aq_set_phy_config(hw, &config, NULL); 4116 4117 if (status) { 4118 device_printf(dev, 4119 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", 4120 __func__, i40e_stat_str(hw, status), 4121 i40e_aq_str(hw, hw->aq.asq_last_status)); 4122 return (EIO); 4123 } 4124 } 4125 4126 return (0); 4127 } 4128 4129 static int 4130 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) 4131 { 4132 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4133 int mode, error = 0; 4134 4135 struct i40e_aq_get_phy_abilities_resp abilities; 4136 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); 4137 if (error) 4138 return (error); 4139 /* Read in new mode */ 4140 error = sysctl_handle_int(oidp, &mode, 0, req); 4141 if ((error) || (req->newptr == NULL)) 4142 return (error); 4143 4144 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); 4145 } 4146 4147 static int 4148 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) 4149 { 4150 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4151 int mode, error = 0; 4152 4153 struct i40e_aq_get_phy_abilities_resp abilities; 4154 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); 4155 if (error) 4156 return (error); 4157 /* Read in new mode */ 4158 error = sysctl_handle_int(oidp, &mode, 0, req); 4159 if ((error) || (req->newptr == NULL)) 4160 return (error); 4161 4162 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); 4163 } 4164 4165 static int 4166 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) 4167 { 4168 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4169 int mode, error = 0; 4170 4171 struct i40e_aq_get_phy_abilities_resp abilities; 4172 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); 4173 if (error) 4174 return (error); 4175 /* Read in new mode */ 4176 error = sysctl_handle_int(oidp, &mode, 0, req); 4177 if ((error) || (req->newptr == NULL)) 4178 return (error); 4179 4180 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); 4181 } 4182 4183 static int 4184 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) 4185 { 4186 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4187 int mode, error = 0; 4188 4189 struct i40e_aq_get_phy_abilities_resp abilities; 4190 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); 4191 if (error) 4192 return (error); 4193 /* Read in new mode */ 4194 error = sysctl_handle_int(oidp, &mode, 0, req); 4195 if ((error) || (req->newptr == NULL)) 4196 return (error); 4197 4198 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); 4199 } 4200 4201 static int 4202 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) 4203 { 4204 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4205 int mode, error = 0; 4206 4207 struct i40e_aq_get_phy_abilities_resp abilities; 4208 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); 4209 if (error) 4210 return (error); 4211 /* Read in new mode */ 4212 error = sysctl_handle_int(oidp, &mode, 0, req); 4213 if ((error) || (req->newptr == NULL)) 4214 return (error); 4215 4216 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); 4217 } 4218 4219 static int 4220 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) 4221 { 4222 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4223 struct i40e_hw *hw = &pf->hw; 4224 device_t dev = pf->dev; 4225 struct sbuf *buf; 4226 int error = 0; 4227 enum i40e_status_code status; 4228 4229 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4230 if (!buf) { 4231 device_printf(dev, "Could not allocate sbuf for output.\n"); 4232 return (ENOMEM); 4233 } 4234 4235 u8 *final_buff; 4236 /* This amount is only necessary if reading the entire cluster into memory */ 4237 #define IXL_FINAL_BUFF_SIZE (1280 * 1024) 4238 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT); 4239 if (final_buff == NULL) { 4240 device_printf(dev, "Could not allocate memory for output.\n"); 4241 goto out; 4242 } 4243 int final_buff_len = 0; 4244 4245 u8 cluster_id = 1; 4246 bool more = true; 4247 4248 u8 dump_buf[4096]; 4249 u16 curr_buff_size = 4096; 4250 u8 curr_next_table = 0; 4251 u32 curr_next_index = 0; 4252 4253 u16 ret_buff_size; 4254 u8 ret_next_table; 4255 u32 ret_next_index; 4256 4257 sbuf_cat(buf, "\n"); 4258 4259 while (more) { 4260 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, 4261 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); 4262 if (status) { 4263 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", 4264 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); 4265 goto free_out; 4266 } 4267 4268 /* copy info out of temp buffer */ 4269 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); 4270 final_buff_len += ret_buff_size; 4271 4272 if (ret_next_table != curr_next_table) { 4273 /* We're done with the current table; we can dump out read data. */ 4274 sbuf_printf(buf, "%d:", curr_next_table); 4275 int bytes_printed = 0; 4276 while (bytes_printed <= final_buff_len) { 4277 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); 4278 bytes_printed += 16; 4279 } 4280 sbuf_cat(buf, "\n"); 4281 4282 /* The entire cluster has been read; we're finished */ 4283 if (ret_next_table == 0xFF) 4284 break; 4285 4286 /* Otherwise clear the output buffer and continue reading */ 4287 bzero(final_buff, IXL_FINAL_BUFF_SIZE); 4288 final_buff_len = 0; 4289 } 4290 4291 if (ret_next_index == 0xFFFFFFFF) 4292 ret_next_index = 0; 4293 4294 bzero(dump_buf, sizeof(dump_buf)); 4295 curr_next_table = ret_next_table; 4296 curr_next_index = ret_next_index; 4297 } 4298 4299 free_out: 4300 free(final_buff, M_IXL); 4301 out: 4302 error = sbuf_finish(buf); 4303 if (error) 4304 device_printf(dev, "Error finishing sbuf: %d\n", error); 4305 sbuf_delete(buf); 4306 4307 return (error); 4308 } 4309 4310 static int 4311 ixl_start_fw_lldp(struct ixl_pf *pf) 4312 { 4313 struct i40e_hw *hw = &pf->hw; 4314 enum i40e_status_code status; 4315 4316 status = i40e_aq_start_lldp(hw, false, NULL); 4317 if (status != I40E_SUCCESS) { 4318 switch (hw->aq.asq_last_status) { 4319 case I40E_AQ_RC_EEXIST: 4320 device_printf(pf->dev, 4321 "FW LLDP agent is already running\n"); 4322 break; 4323 case I40E_AQ_RC_EPERM: 4324 device_printf(pf->dev, 4325 "Device configuration forbids SW from starting " 4326 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII " 4327 "attribute to \"Enabled\" to use this sysctl\n"); 4328 return (EINVAL); 4329 default: 4330 device_printf(pf->dev, 4331 "Starting FW LLDP agent failed: error: %s, %s\n", 4332 i40e_stat_str(hw, status), 4333 i40e_aq_str(hw, hw->aq.asq_last_status)); 4334 return (EINVAL); 4335 } 4336 } 4337 4338 atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 4339 return (0); 4340 } 4341 4342 static int 4343 ixl_stop_fw_lldp(struct ixl_pf *pf) 4344 { 4345 struct i40e_hw *hw = &pf->hw; 4346 device_t dev = pf->dev; 4347 enum i40e_status_code status; 4348 4349 if (hw->func_caps.npar_enable != 0) { 4350 device_printf(dev, 4351 "Disabling FW LLDP agent is not supported on this device\n"); 4352 return (EINVAL); 4353 } 4354 4355 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) { 4356 device_printf(dev, 4357 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); 4358 return (EINVAL); 4359 } 4360 4361 status = i40e_aq_stop_lldp(hw, true, false, NULL); 4362 if (status != I40E_SUCCESS) { 4363 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) { 4364 device_printf(dev, 4365 "Disabling FW LLDP agent failed: error: %s, %s\n", 4366 i40e_stat_str(hw, status), 4367 i40e_aq_str(hw, hw->aq.asq_last_status)); 4368 return (EINVAL); 4369 } 4370 4371 device_printf(dev, "FW LLDP agent is already stopped\n"); 4372 } 4373 4374 i40e_aq_set_dcb_parameters(hw, true, NULL); 4375 atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); 4376 return (0); 4377 } 4378 4379 static int 4380 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) 4381 { 4382 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4383 int state, new_state, error = 0; 4384 4385 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0); 4386 4387 /* Read in new mode */ 4388 error = sysctl_handle_int(oidp, &new_state, 0, req); 4389 if ((error) || (req->newptr == NULL)) 4390 return (error); 4391 4392 /* Already in requested state */ 4393 if (new_state == state) 4394 return (error); 4395 4396 if (new_state == 0) 4397 return ixl_stop_fw_lldp(pf); 4398 4399 return ixl_start_fw_lldp(pf); 4400 } 4401 4402 static int 4403 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) 4404 { 4405 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4406 int state, new_state; 4407 int sysctl_handle_status = 0; 4408 enum i40e_status_code cmd_status; 4409 4410 /* Init states' values */ 4411 state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED)); 4412 4413 /* Get requested mode */ 4414 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req); 4415 if ((sysctl_handle_status) || (req->newptr == NULL)) 4416 return (sysctl_handle_status); 4417 4418 /* Check if state has changed */ 4419 if (new_state == state) 4420 return (0); 4421 4422 /* Set new state */ 4423 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state)); 4424 4425 /* Save new state or report error */ 4426 if (!cmd_status) { 4427 if (new_state == 0) 4428 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 4429 else 4430 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 4431 } else if (cmd_status == I40E_ERR_CONFIG) 4432 return (EPERM); 4433 else 4434 return (EIO); 4435 4436 return (0); 4437 } 4438 4439 int 4440 ixl_attach_get_link_status(struct ixl_pf *pf) 4441 { 4442 struct i40e_hw *hw = &pf->hw; 4443 device_t dev = pf->dev; 4444 int error = 0; 4445 4446 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 4447 (hw->aq.fw_maj_ver < 4)) { 4448 i40e_msec_delay(75); 4449 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); 4450 if (error) { 4451 device_printf(dev, "link restart failed, aq_err=%d\n", 4452 pf->hw.aq.asq_last_status); 4453 return error; 4454 } 4455 } 4456 4457 /* Determine link state */ 4458 hw->phy.get_link_info = TRUE; 4459 i40e_get_link_status(hw, &pf->link_up); 4460 return (0); 4461 } 4462 4463 static int 4464 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS) 4465 { 4466 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4467 int requested = 0, error = 0; 4468 4469 /* Read in new mode */ 4470 error = sysctl_handle_int(oidp, &requested, 0, req); 4471 if ((error) || (req->newptr == NULL)) 4472 return (error); 4473 4474 /* Initiate the PF reset later in the admin task */ 4475 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ); 4476 4477 return (error); 4478 } 4479 4480 static int 4481 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS) 4482 { 4483 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4484 struct i40e_hw *hw = &pf->hw; 4485 int requested = 0, error = 0; 4486 4487 /* Read in new mode */ 4488 error = sysctl_handle_int(oidp, &requested, 0, req); 4489 if ((error) || (req->newptr == NULL)) 4490 return (error); 4491 4492 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); 4493 4494 return (error); 4495 } 4496 4497 static int 4498 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS) 4499 { 4500 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4501 struct i40e_hw *hw = &pf->hw; 4502 int requested = 0, error = 0; 4503 4504 /* Read in new mode */ 4505 error = sysctl_handle_int(oidp, &requested, 0, req); 4506 if ((error) || (req->newptr == NULL)) 4507 return (error); 4508 4509 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK); 4510 4511 return (error); 4512 } 4513 4514 /* 4515 * Print out mapping of TX queue indexes and Rx queue indexes 4516 * to MSI-X vectors. 4517 */ 4518 static int 4519 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) 4520 { 4521 struct ixl_pf *pf = (struct ixl_pf *)arg1; 4522 struct ixl_vsi *vsi = &pf->vsi; 4523 device_t dev = pf->dev; 4524 struct sbuf *buf; 4525 int error = 0; 4526 4527 struct ixl_rx_queue *rx_que = vsi->rx_queues; 4528 struct ixl_tx_queue *tx_que = vsi->tx_queues; 4529 4530 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4531 if (!buf) { 4532 device_printf(dev, "Could not allocate sbuf for output.\n"); 4533 return (ENOMEM); 4534 } 4535 4536 sbuf_cat(buf, "\n"); 4537 for (int i = 0; i < vsi->num_rx_queues; i++) { 4538 rx_que = &vsi->rx_queues[i]; 4539 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); 4540 } 4541 for (int i = 0; i < vsi->num_tx_queues; i++) { 4542 tx_que = &vsi->tx_queues[i]; 4543 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); 4544 } 4545 4546 error = sbuf_finish(buf); 4547 if (error) 4548 device_printf(dev, "Error finishing sbuf: %d\n", error); 4549 sbuf_delete(buf); 4550 4551 return (error); 4552 } 4553