1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2021, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 /** 34 * @file ice_lib.c 35 * @brief Generic device setup and sysctl functions 36 * 37 * Library of generic device functions not specific to the networking stack. 38 * 39 * This includes hardware initialization functions, as well as handlers for 40 * many of the device sysctls used to probe driver status or tune specific 41 * behaviors. 42 */ 43 44 #include "ice_lib.h" 45 #include "ice_iflib.h" 46 #include <dev/pci/pcivar.h> 47 #include <dev/pci/pcireg.h> 48 #include <machine/resource.h> 49 #include <net/if_dl.h> 50 #include <sys/firmware.h> 51 #include <sys/priv.h> 52 #include <sys/limits.h> 53 54 /** 55 * @var M_ICE 56 * @brief main ice driver allocation type 57 * 58 * malloc(9) allocation type used by the majority of memory allocations in the 59 * ice driver. 60 */ 61 MALLOC_DEFINE(M_ICE, "ice", "Intel(R) 100Gb Network Driver lib allocations"); 62 63 /* 64 * Helper function prototypes 65 */ 66 static int ice_get_next_vsi(struct ice_vsi **all_vsi, int size); 67 static void ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx); 68 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type); 69 static int ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx); 70 static int ice_setup_tx_ctx(struct ice_tx_queue *txq, 71 struct ice_tlan_ctx *tlan_ctx, u16 pf_q); 72 static int ice_setup_rx_ctx(struct ice_rx_queue *rxq); 73 static int ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg); 74 static void ice_free_fltr_list(struct ice_list_head *list); 75 static int ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list, 76 const u8 *addr, enum ice_sw_fwd_act_type action); 77 static void ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname, 78 struct ice_ctl_q_info *cq); 79 static void ice_process_link_event(struct ice_softc *sc, struct ice_rq_event_info *e); 80 static void ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, 81 struct ice_rq_event_info *event); 82 static void ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf); 83 static void ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf); 84 static void ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf); 85 static bool ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info); 86 static u_int ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, u_int errors); 87 static void ice_add_debug_tunables(struct ice_softc *sc); 88 static void ice_add_debug_sysctls(struct ice_softc *sc); 89 static void ice_vsi_set_rss_params(struct ice_vsi *vsi); 90 static void ice_get_default_rss_key(u8 *seed); 91 static int ice_set_rss_key(struct ice_vsi *vsi); 92 static int ice_set_rss_lut(struct ice_vsi *vsi); 93 static void ice_set_rss_flow_flds(struct ice_vsi *vsi); 94 static void ice_clean_vsi_rss_cfg(struct ice_vsi *vsi); 95 static const char *ice_aq_speed_to_str(struct ice_port_info *pi); 96 static const char *ice_requested_fec_mode(struct ice_port_info *pi); 97 static const char *ice_negotiated_fec_mode(struct ice_port_info *pi); 98 static const char *ice_autoneg_mode(struct ice_port_info *pi); 99 static const char *ice_flowcontrol_mode(struct ice_port_info *pi); 100 static void ice_print_bus_link_data(device_t dev, struct ice_hw *hw); 101 static void ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status); 102 static uint8_t ice_pcie_bandwidth_check(struct ice_softc *sc); 103 static uint64_t ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed); 104 static int ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width); 105 static uint64_t ice_phy_types_to_max_rate(struct ice_port_info *pi); 106 static void ice_add_sysctls_sw_stats(struct ice_vsi *vsi, 107 struct sysctl_ctx_list *ctx, 108 struct sysctl_oid *parent); 109 static void ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi, 110 enum ice_vsi_type type, int idx, 111 bool dynamic); 112 static void ice_handle_mib_change_event(struct ice_softc *sc, 113 struct ice_rq_event_info *event); 114 static void 115 ice_handle_lan_overflow_event(struct ice_softc *sc, 116 struct ice_rq_event_info *event); 117 static int ice_add_ethertype_to_list(struct ice_vsi *vsi, 118 struct ice_list_head *list, 119 u16 ethertype, u16 direction, 120 enum ice_sw_fwd_act_type action); 121 static void ice_add_rx_lldp_filter(struct ice_softc *sc); 122 static void ice_del_rx_lldp_filter(struct ice_softc *sc); 123 static u16 ice_aq_phy_types_to_link_speeds(u64 phy_type_low, 124 u64 phy_type_high); 125 struct ice_phy_data; 126 static int 127 ice_intersect_phy_types_and_speeds(struct ice_softc *sc, 128 struct ice_phy_data *phy_data); 129 static int 130 ice_apply_saved_phy_req_to_cfg(struct ice_softc *sc, 131 struct ice_aqc_set_phy_cfg_data *cfg); 132 static int 133 ice_apply_saved_fec_req_to_cfg(struct ice_softc *sc, 134 struct ice_aqc_set_phy_cfg_data *cfg); 135 static void 136 ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, 137 struct ice_aqc_set_phy_cfg_data *cfg); 138 static void 139 ice_print_ldo_tlv(struct ice_softc *sc, 140 struct ice_link_default_override_tlv *tlv); 141 static void 142 ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, 143 u64 *phy_type_high); 144 static u16 ice_apply_supported_speed_filter(u16 report_speeds); 145 static void 146 ice_handle_health_status_event(struct ice_softc *sc, 147 struct ice_rq_event_info *event); 148 static void 149 ice_print_health_status_string(device_t dev, 150 struct ice_aqc_health_status_elem *elem); 151 152 static int ice_module_init(void); 153 static int ice_module_exit(void); 154 155 /* 156 * package version comparison functions 157 */ 158 static bool pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name); 159 static int pkg_ver_compatible(struct ice_pkg_ver *pkg_ver); 160 161 /* 162 * dynamic sysctl handlers 163 */ 164 static int ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 165 static int ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS); 166 static int ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS); 167 static int ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS); 168 static int ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS); 169 static int ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS); 170 static int ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS); 171 static int ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 172 static int ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS); 173 static int ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS); 174 static int ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS); 175 static int ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS); 176 static int ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS); 177 static int ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS); 178 static int ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS); 179 static int ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS); 180 static int __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, 181 bool is_phy_type_high); 182 static int ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS); 183 static int ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS); 184 static int ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS); 185 static int ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS); 186 static int ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS); 187 static int ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS); 188 static int ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode); 189 static int ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS); 190 static int ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS); 191 static int ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS); 192 static int ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS); 193 static int ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 194 static int ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS); 195 static int ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS); 196 static int ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS); 197 static int ice_sysctl_rx_errors_stat(SYSCTL_HANDLER_ARGS); 198 199 /** 200 * ice_map_bar - Map PCIe BAR memory 201 * @dev: the PCIe device 202 * @bar: the BAR info structure 203 * @bar_num: PCIe BAR number 204 * 205 * Maps the specified PCIe BAR. Stores the mapping data in struct 206 * ice_bar_info. 207 */ 208 int 209 ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num) 210 { 211 if (bar->res != NULL) { 212 device_printf(dev, "PCI BAR%d already mapped\n", bar_num); 213 return (EDOOFUS); 214 } 215 216 bar->rid = PCIR_BAR(bar_num); 217 bar->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar->rid, 218 RF_ACTIVE); 219 if (!bar->res) { 220 device_printf(dev, "PCI BAR%d mapping failed\n", bar_num); 221 return (ENXIO); 222 } 223 224 bar->tag = rman_get_bustag(bar->res); 225 bar->handle = rman_get_bushandle(bar->res); 226 bar->size = rman_get_size(bar->res); 227 228 return (0); 229 } 230 231 /** 232 * ice_free_bar - Free PCIe BAR memory 233 * @dev: the PCIe device 234 * @bar: the BAR info structure 235 * 236 * Frees the specified PCIe BAR, releasing its resources. 237 */ 238 void 239 ice_free_bar(device_t dev, struct ice_bar_info *bar) 240 { 241 if (bar->res != NULL) 242 bus_release_resource(dev, SYS_RES_MEMORY, bar->rid, bar->res); 243 bar->res = NULL; 244 } 245 246 /** 247 * ice_set_ctrlq_len - Configure ctrlq lengths for a device 248 * @hw: the device hardware structure 249 * 250 * Configures the control queues for the given device, setting up the 251 * specified lengths, prior to initializing hardware. 252 */ 253 void 254 ice_set_ctrlq_len(struct ice_hw *hw) 255 { 256 hw->adminq.num_rq_entries = ICE_AQ_LEN; 257 hw->adminq.num_sq_entries = ICE_AQ_LEN; 258 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 259 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 260 261 hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; 262 hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; 263 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 264 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 265 266 } 267 268 /** 269 * ice_get_next_vsi - Get the next available VSI slot 270 * @all_vsi: the VSI list 271 * @size: the size of the VSI list 272 * 273 * Returns the index to the first available VSI slot. Will return size (one 274 * past the last index) if there are no slots available. 275 */ 276 static int 277 ice_get_next_vsi(struct ice_vsi **all_vsi, int size) 278 { 279 int i; 280 281 for (i = 0; i < size; i++) { 282 if (all_vsi[i] == NULL) 283 return i; 284 } 285 286 return size; 287 } 288 289 /** 290 * ice_setup_vsi_common - Common VSI setup for both dynamic and static VSIs 291 * @sc: the device private softc structure 292 * @vsi: the VSI to setup 293 * @type: the VSI type of the new VSI 294 * @idx: the index in the all_vsi array to use 295 * @dynamic: whether this VSI memory was dynamically allocated 296 * 297 * Perform setup for a VSI that is common to both dynamically allocated VSIs 298 * and the static PF VSI which is embedded in the softc structure. 299 */ 300 static void 301 ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi, 302 enum ice_vsi_type type, int idx, bool dynamic) 303 { 304 /* Store important values in VSI struct */ 305 vsi->type = type; 306 vsi->sc = sc; 307 vsi->idx = idx; 308 sc->all_vsi[idx] = vsi; 309 vsi->dynamic = dynamic; 310 311 /* Setup the VSI tunables now */ 312 ice_add_vsi_tunables(vsi, sc->vsi_sysctls); 313 } 314 315 /** 316 * ice_alloc_vsi - Allocate a dynamic VSI 317 * @sc: device softc structure 318 * @type: VSI type 319 * 320 * Allocates a new dynamic VSI structure and inserts it into the VSI list. 321 */ 322 struct ice_vsi * 323 ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type) 324 { 325 struct ice_vsi *vsi; 326 int idx; 327 328 /* Find an open index for a new VSI to be allocated. If the returned 329 * index is >= the num_available_vsi then it means no slot is 330 * available. 331 */ 332 idx = ice_get_next_vsi(sc->all_vsi, sc->num_available_vsi); 333 if (idx >= sc->num_available_vsi) { 334 device_printf(sc->dev, "No available VSI slots\n"); 335 return NULL; 336 } 337 338 vsi = (struct ice_vsi *)malloc(sizeof(*vsi), M_ICE, M_WAITOK|M_ZERO); 339 if (!vsi) { 340 device_printf(sc->dev, "Unable to allocate VSI memory\n"); 341 return NULL; 342 } 343 344 ice_setup_vsi_common(sc, vsi, type, idx, true); 345 346 return vsi; 347 } 348 349 /** 350 * ice_setup_pf_vsi - Setup the PF VSI 351 * @sc: the device private softc 352 * 353 * Setup the PF VSI structure which is embedded as sc->pf_vsi in the device 354 * private softc. Unlike other VSIs, the PF VSI memory is allocated as part of 355 * the softc memory, instead of being dynamically allocated at creation. 356 */ 357 void 358 ice_setup_pf_vsi(struct ice_softc *sc) 359 { 360 ice_setup_vsi_common(sc, &sc->pf_vsi, ICE_VSI_PF, 0, false); 361 } 362 363 /** 364 * ice_alloc_vsi_qmap 365 * @vsi: VSI structure 366 * @max_tx_queues: Number of transmit queues to identify 367 * @max_rx_queues: Number of receive queues to identify 368 * 369 * Allocates a max_[t|r]x_queues array of words for the VSI where each 370 * word contains the index of the queue it represents. In here, all 371 * words are initialized to an index of ICE_INVALID_RES_IDX, indicating 372 * all queues for this VSI are not yet assigned an index and thus, 373 * not ready for use. 374 * 375 * Returns an error code on failure. 376 */ 377 int 378 ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues, 379 const int max_rx_queues) 380 { 381 struct ice_softc *sc = vsi->sc; 382 int i; 383 384 MPASS(max_tx_queues > 0); 385 MPASS(max_rx_queues > 0); 386 387 /* Allocate Tx queue mapping memory */ 388 if (!(vsi->tx_qmap = 389 (u16 *) malloc(sizeof(u16) * max_tx_queues, M_ICE, M_WAITOK))) { 390 device_printf(sc->dev, "Unable to allocate Tx qmap memory\n"); 391 return (ENOMEM); 392 } 393 394 /* Allocate Rx queue mapping memory */ 395 if (!(vsi->rx_qmap = 396 (u16 *) malloc(sizeof(u16) * max_rx_queues, M_ICE, M_WAITOK))) { 397 device_printf(sc->dev, "Unable to allocate Rx qmap memory\n"); 398 goto free_tx_qmap; 399 } 400 401 /* Mark every queue map as invalid to start with */ 402 for (i = 0; i < max_tx_queues; i++) { 403 vsi->tx_qmap[i] = ICE_INVALID_RES_IDX; 404 } 405 for (i = 0; i < max_rx_queues; i++) { 406 vsi->rx_qmap[i] = ICE_INVALID_RES_IDX; 407 } 408 409 return 0; 410 411 free_tx_qmap: 412 free(vsi->tx_qmap, M_ICE); 413 vsi->tx_qmap = NULL; 414 415 return (ENOMEM); 416 } 417 418 /** 419 * ice_free_vsi_qmaps - Free the PF qmaps associated with a VSI 420 * @vsi: the VSI private structure 421 * 422 * Frees the PF qmaps associated with the given VSI. Generally this will be 423 * called by ice_release_vsi, but may need to be called during attach cleanup, 424 * depending on when the qmaps were allocated. 425 */ 426 void 427 ice_free_vsi_qmaps(struct ice_vsi *vsi) 428 { 429 struct ice_softc *sc = vsi->sc; 430 431 if (vsi->tx_qmap) { 432 ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, 433 vsi->num_tx_queues); 434 free(vsi->tx_qmap, M_ICE); 435 vsi->tx_qmap = NULL; 436 } 437 438 if (vsi->rx_qmap) { 439 ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap, 440 vsi->num_rx_queues); 441 free(vsi->rx_qmap, M_ICE); 442 vsi->rx_qmap = NULL; 443 } 444 } 445 446 /** 447 * ice_set_default_vsi_ctx - Setup default VSI context parameters 448 * @ctx: the VSI context to initialize 449 * 450 * Initialize and prepare a default VSI context for configuring a new VSI. 451 */ 452 static void 453 ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx) 454 { 455 u32 table = 0; 456 457 memset(&ctx->info, 0, sizeof(ctx->info)); 458 /* VSI will be allocated from shared pool */ 459 ctx->alloc_from_pool = true; 460 /* Enable source pruning by default */ 461 ctx->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 462 /* Traffic from VSI can be sent to LAN */ 463 ctx->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 464 /* Allow all packets untagged/tagged */ 465 ctx->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL & 466 ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >> 467 ICE_AQ_VSI_INNER_VLAN_TX_MODE_S); 468 /* Show VLAN/UP from packets in Rx descriptors */ 469 ctx->info.inner_vlan_flags |= ((ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH & 470 ICE_AQ_VSI_INNER_VLAN_EMODE_M) >> 471 ICE_AQ_VSI_INNER_VLAN_EMODE_S); 472 /* Have 1:1 UP mapping for both ingress/egress tables */ 473 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 474 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 475 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 476 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 477 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 478 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 479 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 480 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 481 ctx->info.ingress_table = CPU_TO_LE32(table); 482 ctx->info.egress_table = CPU_TO_LE32(table); 483 /* Have 1:1 UP mapping for outer to inner UP table */ 484 ctx->info.outer_up_table = CPU_TO_LE32(table); 485 /* No Outer tag support, so outer_vlan_flags remains zero */ 486 } 487 488 /** 489 * ice_set_rss_vsi_ctx - Setup VSI context parameters for RSS 490 * @ctx: the VSI context to configure 491 * @type: the VSI type 492 * 493 * Configures the VSI context for RSS, based on the VSI type. 494 */ 495 static void 496 ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type) 497 { 498 u8 lut_type, hash_type; 499 500 switch (type) { 501 case ICE_VSI_PF: 502 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 503 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 504 break; 505 case ICE_VSI_VF: 506 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 507 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 508 break; 509 default: 510 /* Other VSI types do not support RSS */ 511 return; 512 } 513 514 ctx->info.q_opt_rss = (((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 515 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 516 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & 517 ICE_AQ_VSI_Q_OPT_RSS_HASH_M)); 518 } 519 520 /** 521 * ice_setup_vsi_qmap - Setup the queue mapping for a VSI 522 * @vsi: the VSI to configure 523 * @ctx: the VSI context to configure 524 * 525 * Configures the context for the given VSI, setting up how the firmware 526 * should map the queues for this VSI. 527 */ 528 static int 529 ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 530 { 531 int pow = 0; 532 u16 qmap; 533 534 MPASS(vsi->rx_qmap != NULL); 535 536 /* TODO: 537 * Handle multiple Traffic Classes 538 * Handle scattered queues (for VFs) 539 */ 540 if (vsi->qmap_type != ICE_RESMGR_ALLOC_CONTIGUOUS) 541 return (EOPNOTSUPP); 542 543 ctx->info.mapping_flags |= CPU_TO_LE16(ICE_AQ_VSI_Q_MAP_CONTIG); 544 545 ctx->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]); 546 ctx->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues); 547 548 549 /* Calculate the next power-of-2 of number of queues */ 550 if (vsi->num_rx_queues) 551 pow = flsl(vsi->num_rx_queues - 1); 552 553 /* Assign all the queues to traffic class zero */ 554 qmap = (pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M; 555 ctx->info.tc_mapping[0] = CPU_TO_LE16(qmap); 556 557 return 0; 558 } 559 560 /** 561 * ice_initialize_vsi - Initialize a VSI for use 562 * @vsi: the vsi to initialize 563 * 564 * Initialize a VSI over the adminq and prepare it for operation. 565 */ 566 int 567 ice_initialize_vsi(struct ice_vsi *vsi) 568 { 569 struct ice_vsi_ctx ctx = { 0 }; 570 struct ice_hw *hw = &vsi->sc->hw; 571 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 572 enum ice_status status; 573 int err; 574 575 /* For now, we only have code supporting PF VSIs */ 576 switch (vsi->type) { 577 case ICE_VSI_PF: 578 ctx.flags = ICE_AQ_VSI_TYPE_PF; 579 break; 580 default: 581 return (ENODEV); 582 } 583 584 ice_set_default_vsi_ctx(&ctx); 585 ice_set_rss_vsi_ctx(&ctx, vsi->type); 586 587 /* XXX: VSIs of other types may need different port info? */ 588 ctx.info.sw_id = hw->port_info->sw_id; 589 590 /* Set some RSS parameters based on the VSI type */ 591 ice_vsi_set_rss_params(vsi); 592 593 /* Initialize the Rx queue mapping for this VSI */ 594 err = ice_setup_vsi_qmap(vsi, &ctx); 595 if (err) { 596 return err; 597 } 598 599 /* (Re-)add VSI to HW VSI handle list */ 600 status = ice_add_vsi(hw, vsi->idx, &ctx, NULL); 601 if (status != 0) { 602 device_printf(vsi->sc->dev, 603 "Add VSI AQ call failed, err %s aq_err %s\n", 604 ice_status_str(status), 605 ice_aq_str(hw->adminq.sq_last_status)); 606 return (EIO); 607 } 608 vsi->info = ctx.info; 609 610 /* TODO: DCB traffic class support? */ 611 max_txqs[0] = vsi->num_tx_queues; 612 613 status = ice_cfg_vsi_lan(hw->port_info, vsi->idx, 614 ICE_DFLT_TRAFFIC_CLASS, max_txqs); 615 if (status) { 616 device_printf(vsi->sc->dev, 617 "Failed VSI lan queue config, err %s aq_err %s\n", 618 ice_status_str(status), 619 ice_aq_str(hw->adminq.sq_last_status)); 620 ice_deinit_vsi(vsi); 621 return (ENODEV); 622 } 623 624 /* Reset VSI stats */ 625 ice_reset_vsi_stats(vsi); 626 627 return 0; 628 } 629 630 /** 631 * ice_deinit_vsi - Tell firmware to release resources for a VSI 632 * @vsi: the VSI to release 633 * 634 * Helper function which requests the firmware to release the hardware 635 * resources associated with a given VSI. 636 */ 637 void 638 ice_deinit_vsi(struct ice_vsi *vsi) 639 { 640 struct ice_vsi_ctx ctx = { 0 }; 641 struct ice_softc *sc = vsi->sc; 642 struct ice_hw *hw = &sc->hw; 643 enum ice_status status; 644 645 /* Assert that the VSI pointer matches in the list */ 646 MPASS(vsi == sc->all_vsi[vsi->idx]); 647 648 ctx.info = vsi->info; 649 650 status = ice_rm_vsi_lan_cfg(hw->port_info, vsi->idx); 651 if (status) { 652 /* 653 * This should only fail if the VSI handle is invalid, or if 654 * any of the nodes have leaf nodes which are still in use. 655 */ 656 device_printf(sc->dev, 657 "Unable to remove scheduler nodes for VSI %d, err %s\n", 658 vsi->idx, ice_status_str(status)); 659 } 660 661 /* Tell firmware to release the VSI resources */ 662 status = ice_free_vsi(hw, vsi->idx, &ctx, false, NULL); 663 if (status != 0) { 664 device_printf(sc->dev, 665 "Free VSI %u AQ call failed, err %s aq_err %s\n", 666 vsi->idx, ice_status_str(status), 667 ice_aq_str(hw->adminq.sq_last_status)); 668 } 669 } 670 671 /** 672 * ice_release_vsi - Release resources associated with a VSI 673 * @vsi: the VSI to release 674 * 675 * Release software and firmware resources associated with a VSI. Release the 676 * queue managers associated with this VSI. Also free the VSI structure memory 677 * if the VSI was allocated dynamically using ice_alloc_vsi(). 678 */ 679 void 680 ice_release_vsi(struct ice_vsi *vsi) 681 { 682 struct ice_softc *sc = vsi->sc; 683 int idx = vsi->idx; 684 685 /* Assert that the VSI pointer matches in the list */ 686 MPASS(vsi == sc->all_vsi[idx]); 687 688 /* Cleanup RSS configuration */ 689 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS)) 690 ice_clean_vsi_rss_cfg(vsi); 691 692 ice_del_vsi_sysctl_ctx(vsi); 693 694 ice_deinit_vsi(vsi); 695 696 ice_free_vsi_qmaps(vsi); 697 698 if (vsi->dynamic) { 699 free(sc->all_vsi[idx], M_ICE); 700 } 701 702 sc->all_vsi[idx] = NULL; 703 } 704 705 /** 706 * ice_aq_speed_to_rate - Convert AdminQ speed enum to baudrate 707 * @pi: port info data 708 * 709 * Returns the baudrate value for the current link speed of a given port. 710 */ 711 uint64_t 712 ice_aq_speed_to_rate(struct ice_port_info *pi) 713 { 714 switch (pi->phy.link_info.link_speed) { 715 case ICE_AQ_LINK_SPEED_100GB: 716 return IF_Gbps(100); 717 case ICE_AQ_LINK_SPEED_50GB: 718 return IF_Gbps(50); 719 case ICE_AQ_LINK_SPEED_40GB: 720 return IF_Gbps(40); 721 case ICE_AQ_LINK_SPEED_25GB: 722 return IF_Gbps(25); 723 case ICE_AQ_LINK_SPEED_10GB: 724 return IF_Gbps(10); 725 case ICE_AQ_LINK_SPEED_5GB: 726 return IF_Gbps(5); 727 case ICE_AQ_LINK_SPEED_2500MB: 728 return IF_Mbps(2500); 729 case ICE_AQ_LINK_SPEED_1000MB: 730 return IF_Mbps(1000); 731 case ICE_AQ_LINK_SPEED_100MB: 732 return IF_Mbps(100); 733 case ICE_AQ_LINK_SPEED_10MB: 734 return IF_Mbps(10); 735 case ICE_AQ_LINK_SPEED_UNKNOWN: 736 default: 737 /* return 0 if we don't know the link speed */ 738 return 0; 739 } 740 } 741 742 /** 743 * ice_aq_speed_to_str - Convert AdminQ speed enum to string representation 744 * @pi: port info data 745 * 746 * Returns the string representation of the current link speed for a given 747 * port. 748 */ 749 static const char * 750 ice_aq_speed_to_str(struct ice_port_info *pi) 751 { 752 switch (pi->phy.link_info.link_speed) { 753 case ICE_AQ_LINK_SPEED_100GB: 754 return "100 Gbps"; 755 case ICE_AQ_LINK_SPEED_50GB: 756 return "50 Gbps"; 757 case ICE_AQ_LINK_SPEED_40GB: 758 return "40 Gbps"; 759 case ICE_AQ_LINK_SPEED_25GB: 760 return "25 Gbps"; 761 case ICE_AQ_LINK_SPEED_20GB: 762 return "20 Gbps"; 763 case ICE_AQ_LINK_SPEED_10GB: 764 return "10 Gbps"; 765 case ICE_AQ_LINK_SPEED_5GB: 766 return "5 Gbps"; 767 case ICE_AQ_LINK_SPEED_2500MB: 768 return "2.5 Gbps"; 769 case ICE_AQ_LINK_SPEED_1000MB: 770 return "1 Gbps"; 771 case ICE_AQ_LINK_SPEED_100MB: 772 return "100 Mbps"; 773 case ICE_AQ_LINK_SPEED_10MB: 774 return "10 Mbps"; 775 case ICE_AQ_LINK_SPEED_UNKNOWN: 776 default: 777 return "Unknown speed"; 778 } 779 } 780 781 /** 782 * ice_get_phy_type_low - Get media associated with phy_type_low 783 * @phy_type_low: the low 64bits of phy_type from the AdminQ 784 * 785 * Given the lower 64bits of the phy_type from the hardware, return the 786 * ifm_active bit associated. Return IFM_UNKNOWN when phy_type_low is unknown. 787 * Note that only one of ice_get_phy_type_low or ice_get_phy_type_high should 788 * be called. If phy_type_low is zero, call ice_phy_type_high. 789 */ 790 int 791 ice_get_phy_type_low(uint64_t phy_type_low) 792 { 793 switch (phy_type_low) { 794 case ICE_PHY_TYPE_LOW_100BASE_TX: 795 return IFM_100_TX; 796 case ICE_PHY_TYPE_LOW_100M_SGMII: 797 return IFM_100_SGMII; 798 case ICE_PHY_TYPE_LOW_1000BASE_T: 799 return IFM_1000_T; 800 case ICE_PHY_TYPE_LOW_1000BASE_SX: 801 return IFM_1000_SX; 802 case ICE_PHY_TYPE_LOW_1000BASE_LX: 803 return IFM_1000_LX; 804 case ICE_PHY_TYPE_LOW_1000BASE_KX: 805 return IFM_1000_KX; 806 case ICE_PHY_TYPE_LOW_1G_SGMII: 807 return IFM_1000_SGMII; 808 case ICE_PHY_TYPE_LOW_2500BASE_T: 809 return IFM_2500_T; 810 case ICE_PHY_TYPE_LOW_2500BASE_X: 811 return IFM_2500_X; 812 case ICE_PHY_TYPE_LOW_2500BASE_KX: 813 return IFM_2500_KX; 814 case ICE_PHY_TYPE_LOW_5GBASE_T: 815 return IFM_5000_T; 816 case ICE_PHY_TYPE_LOW_5GBASE_KR: 817 return IFM_5000_KR; 818 case ICE_PHY_TYPE_LOW_10GBASE_T: 819 return IFM_10G_T; 820 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 821 return IFM_10G_TWINAX; 822 case ICE_PHY_TYPE_LOW_10GBASE_SR: 823 return IFM_10G_SR; 824 case ICE_PHY_TYPE_LOW_10GBASE_LR: 825 return IFM_10G_LR; 826 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 827 return IFM_10G_KR; 828 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 829 return IFM_10G_AOC; 830 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 831 return IFM_10G_SFI; 832 case ICE_PHY_TYPE_LOW_25GBASE_T: 833 return IFM_25G_T; 834 case ICE_PHY_TYPE_LOW_25GBASE_CR: 835 return IFM_25G_CR; 836 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 837 return IFM_25G_CR_S; 838 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 839 return IFM_25G_CR1; 840 case ICE_PHY_TYPE_LOW_25GBASE_SR: 841 return IFM_25G_SR; 842 case ICE_PHY_TYPE_LOW_25GBASE_LR: 843 return IFM_25G_LR; 844 case ICE_PHY_TYPE_LOW_25GBASE_KR: 845 return IFM_25G_KR; 846 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 847 return IFM_25G_KR_S; 848 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 849 return IFM_25G_KR1; 850 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 851 return IFM_25G_AOC; 852 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 853 return IFM_25G_AUI; 854 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 855 return IFM_40G_CR4; 856 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 857 return IFM_40G_SR4; 858 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 859 return IFM_40G_LR4; 860 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 861 return IFM_40G_KR4; 862 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 863 return IFM_40G_XLAUI_AC; 864 case ICE_PHY_TYPE_LOW_40G_XLAUI: 865 return IFM_40G_XLAUI; 866 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 867 return IFM_50G_CR2; 868 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 869 return IFM_50G_SR2; 870 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 871 return IFM_50G_LR2; 872 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 873 return IFM_50G_KR2; 874 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 875 return IFM_50G_LAUI2_AC; 876 case ICE_PHY_TYPE_LOW_50G_LAUI2: 877 return IFM_50G_LAUI2; 878 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 879 return IFM_50G_AUI2_AC; 880 case ICE_PHY_TYPE_LOW_50G_AUI2: 881 return IFM_50G_AUI2; 882 case ICE_PHY_TYPE_LOW_50GBASE_CP: 883 return IFM_50G_CP; 884 case ICE_PHY_TYPE_LOW_50GBASE_SR: 885 return IFM_50G_SR; 886 case ICE_PHY_TYPE_LOW_50GBASE_FR: 887 return IFM_50G_FR; 888 case ICE_PHY_TYPE_LOW_50GBASE_LR: 889 return IFM_50G_LR; 890 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 891 return IFM_50G_KR_PAM4; 892 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 893 return IFM_50G_AUI1_AC; 894 case ICE_PHY_TYPE_LOW_50G_AUI1: 895 return IFM_50G_AUI1; 896 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 897 return IFM_100G_CR4; 898 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 899 return IFM_100G_SR4; 900 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 901 return IFM_100G_LR4; 902 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 903 return IFM_100G_KR4; 904 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 905 return IFM_100G_CAUI4_AC; 906 case ICE_PHY_TYPE_LOW_100G_CAUI4: 907 return IFM_100G_CAUI4; 908 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 909 return IFM_100G_AUI4_AC; 910 case ICE_PHY_TYPE_LOW_100G_AUI4: 911 return IFM_100G_AUI4; 912 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 913 return IFM_100G_CR_PAM4; 914 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 915 return IFM_100G_KR_PAM4; 916 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 917 return IFM_100G_CP2; 918 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 919 return IFM_100G_SR2; 920 case ICE_PHY_TYPE_LOW_100GBASE_DR: 921 return IFM_100G_DR; 922 default: 923 return IFM_UNKNOWN; 924 } 925 } 926 927 /** 928 * ice_get_phy_type_high - Get media associated with phy_type_high 929 * @phy_type_high: the upper 64bits of phy_type from the AdminQ 930 * 931 * Given the upper 64bits of the phy_type from the hardware, return the 932 * ifm_active bit associated. Return IFM_UNKNOWN on an unknown value. Note 933 * that only one of ice_get_phy_type_low or ice_get_phy_type_high should be 934 * called. If phy_type_high is zero, call ice_get_phy_type_low. 935 */ 936 int 937 ice_get_phy_type_high(uint64_t phy_type_high) 938 { 939 switch (phy_type_high) { 940 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 941 return IFM_100G_KR2_PAM4; 942 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 943 return IFM_100G_CAUI2_AC; 944 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 945 return IFM_100G_CAUI2; 946 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 947 return IFM_100G_AUI2_AC; 948 case ICE_PHY_TYPE_HIGH_100G_AUI2: 949 return IFM_100G_AUI2; 950 default: 951 return IFM_UNKNOWN; 952 } 953 } 954 955 /** 956 * ice_phy_types_to_max_rate - Returns port's max supported baudrate 957 * @pi: port info struct 958 * 959 * ice_aq_get_phy_caps() w/ ICE_AQC_REPORT_TOPO_CAP_MEDIA parameter needs 960 * to have been called before this function for it to work. 961 */ 962 static uint64_t 963 ice_phy_types_to_max_rate(struct ice_port_info *pi) 964 { 965 uint64_t phy_low = pi->phy.phy_type_low; 966 uint64_t phy_high = pi->phy.phy_type_high; 967 uint64_t max_rate = 0; 968 int bit; 969 970 /* 971 * These are based on the indices used in the BIT() macros for 972 * ICE_PHY_TYPE_LOW_* 973 */ 974 static const uint64_t phy_rates[] = { 975 IF_Mbps(100), 976 IF_Mbps(100), 977 IF_Gbps(1ULL), 978 IF_Gbps(1ULL), 979 IF_Gbps(1ULL), 980 IF_Gbps(1ULL), 981 IF_Gbps(1ULL), 982 IF_Mbps(2500ULL), 983 IF_Mbps(2500ULL), 984 IF_Mbps(2500ULL), 985 IF_Gbps(5ULL), 986 IF_Gbps(5ULL), 987 IF_Gbps(10ULL), 988 IF_Gbps(10ULL), 989 IF_Gbps(10ULL), 990 IF_Gbps(10ULL), 991 IF_Gbps(10ULL), 992 IF_Gbps(10ULL), 993 IF_Gbps(10ULL), 994 IF_Gbps(25ULL), 995 IF_Gbps(25ULL), 996 IF_Gbps(25ULL), 997 IF_Gbps(25ULL), 998 IF_Gbps(25ULL), 999 IF_Gbps(25ULL), 1000 IF_Gbps(25ULL), 1001 IF_Gbps(25ULL), 1002 IF_Gbps(25ULL), 1003 IF_Gbps(25ULL), 1004 IF_Gbps(25ULL), 1005 IF_Gbps(40ULL), 1006 IF_Gbps(40ULL), 1007 IF_Gbps(40ULL), 1008 IF_Gbps(40ULL), 1009 IF_Gbps(40ULL), 1010 IF_Gbps(40ULL), 1011 IF_Gbps(50ULL), 1012 IF_Gbps(50ULL), 1013 IF_Gbps(50ULL), 1014 IF_Gbps(50ULL), 1015 IF_Gbps(50ULL), 1016 IF_Gbps(50ULL), 1017 IF_Gbps(50ULL), 1018 IF_Gbps(50ULL), 1019 IF_Gbps(50ULL), 1020 IF_Gbps(50ULL), 1021 IF_Gbps(50ULL), 1022 IF_Gbps(50ULL), 1023 IF_Gbps(50ULL), 1024 IF_Gbps(50ULL), 1025 IF_Gbps(50ULL), 1026 IF_Gbps(100ULL), 1027 IF_Gbps(100ULL), 1028 IF_Gbps(100ULL), 1029 IF_Gbps(100ULL), 1030 IF_Gbps(100ULL), 1031 IF_Gbps(100ULL), 1032 IF_Gbps(100ULL), 1033 IF_Gbps(100ULL), 1034 IF_Gbps(100ULL), 1035 IF_Gbps(100ULL), 1036 IF_Gbps(100ULL), 1037 IF_Gbps(100ULL), 1038 IF_Gbps(100ULL), 1039 /* These rates are for ICE_PHY_TYPE_HIGH_* */ 1040 IF_Gbps(100ULL), 1041 IF_Gbps(100ULL), 1042 IF_Gbps(100ULL), 1043 IF_Gbps(100ULL), 1044 IF_Gbps(100ULL) 1045 }; 1046 1047 /* coverity[address_of] */ 1048 for_each_set_bit(bit, &phy_high, 64) 1049 if ((bit + 64) < (int)ARRAY_SIZE(phy_rates)) 1050 max_rate = uqmax(max_rate, phy_rates[(bit + 64)]); 1051 1052 /* coverity[address_of] */ 1053 for_each_set_bit(bit, &phy_low, 64) 1054 max_rate = uqmax(max_rate, phy_rates[bit]); 1055 1056 return (max_rate); 1057 } 1058 1059 /* The if_media type is split over the original 5 bit media variant field, 1060 * along with extended types using up extra bits in the options section. 1061 * We want to convert this split number into a bitmap index, so we reverse the 1062 * calculation of IFM_X here. 1063 */ 1064 #define IFM_IDX(x) (((x) & IFM_TMASK) | \ 1065 (((x) & IFM_ETH_XTYPE) >> IFM_ETH_XSHIFT)) 1066 1067 /** 1068 * ice_add_media_types - Add supported media types to the media structure 1069 * @sc: ice private softc structure 1070 * @media: ifmedia structure to setup 1071 * 1072 * Looks up the supported phy types, and initializes the various media types 1073 * available. 1074 * 1075 * @pre this function must be protected from being called while another thread 1076 * is accessing the ifmedia types. 1077 */ 1078 enum ice_status 1079 ice_add_media_types(struct ice_softc *sc, struct ifmedia *media) 1080 { 1081 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 1082 struct ice_port_info *pi = sc->hw.port_info; 1083 enum ice_status status; 1084 uint64_t phy_low, phy_high; 1085 int bit; 1086 1087 ASSERT_CFG_LOCKED(sc); 1088 1089 /* the maximum possible media type index is 511. We probably don't 1090 * need most of this space, but this ensures future compatibility when 1091 * additional media types are used. 1092 */ 1093 ice_declare_bitmap(already_added, 511); 1094 1095 /* Remove all previous media types */ 1096 ifmedia_removeall(media); 1097 1098 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 1099 &pcaps, NULL); 1100 if (status != ICE_SUCCESS) { 1101 device_printf(sc->dev, 1102 "%s: ice_aq_get_phy_caps (ACTIVE) failed; status %s, aq_err %s\n", 1103 __func__, ice_status_str(status), 1104 ice_aq_str(sc->hw.adminq.sq_last_status)); 1105 return (status); 1106 } 1107 phy_low = le64toh(pcaps.phy_type_low); 1108 phy_high = le64toh(pcaps.phy_type_high); 1109 1110 /* make sure the added bitmap is zero'd */ 1111 memset(already_added, 0, sizeof(already_added)); 1112 1113 /* coverity[address_of] */ 1114 for_each_set_bit(bit, &phy_low, 64) { 1115 uint64_t type = BIT_ULL(bit); 1116 int ostype; 1117 1118 /* get the OS media type */ 1119 ostype = ice_get_phy_type_low(type); 1120 1121 /* don't bother adding the unknown type */ 1122 if (ostype == IFM_UNKNOWN) 1123 continue; 1124 1125 /* only add each media type to the list once */ 1126 if (ice_is_bit_set(already_added, IFM_IDX(ostype))) 1127 continue; 1128 1129 ifmedia_add(media, IFM_ETHER | ostype, 0, NULL); 1130 ice_set_bit(IFM_IDX(ostype), already_added); 1131 } 1132 1133 /* coverity[address_of] */ 1134 for_each_set_bit(bit, &phy_high, 64) { 1135 uint64_t type = BIT_ULL(bit); 1136 int ostype; 1137 1138 /* get the OS media type */ 1139 ostype = ice_get_phy_type_high(type); 1140 1141 /* don't bother adding the unknown type */ 1142 if (ostype == IFM_UNKNOWN) 1143 continue; 1144 1145 /* only add each media type to the list once */ 1146 if (ice_is_bit_set(already_added, IFM_IDX(ostype))) 1147 continue; 1148 1149 ifmedia_add(media, IFM_ETHER | ostype, 0, NULL); 1150 ice_set_bit(IFM_IDX(ostype), already_added); 1151 } 1152 1153 /* Use autoselect media by default */ 1154 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 1155 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 1156 1157 return (ICE_SUCCESS); 1158 } 1159 1160 /** 1161 * ice_configure_rxq_interrupts - Configure HW Rx queues for MSI-X interrupts 1162 * @vsi: the VSI to configure 1163 * 1164 * Called when setting up MSI-X interrupts to configure the Rx hardware queues. 1165 */ 1166 void 1167 ice_configure_rxq_interrupts(struct ice_vsi *vsi) 1168 { 1169 struct ice_hw *hw = &vsi->sc->hw; 1170 int i; 1171 1172 for (i = 0; i < vsi->num_rx_queues; i++) { 1173 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1174 u32 val; 1175 1176 val = (QINT_RQCTL_CAUSE_ENA_M | 1177 (ICE_RX_ITR << QINT_RQCTL_ITR_INDX_S) | 1178 (rxq->irqv->me << QINT_RQCTL_MSIX_INDX_S)); 1179 wr32(hw, QINT_RQCTL(vsi->rx_qmap[rxq->me]), val); 1180 } 1181 1182 ice_flush(hw); 1183 } 1184 1185 /** 1186 * ice_configure_txq_interrupts - Configure HW Tx queues for MSI-X interrupts 1187 * @vsi: the VSI to configure 1188 * 1189 * Called when setting up MSI-X interrupts to configure the Tx hardware queues. 1190 */ 1191 void 1192 ice_configure_txq_interrupts(struct ice_vsi *vsi) 1193 { 1194 struct ice_hw *hw = &vsi->sc->hw; 1195 int i; 1196 1197 for (i = 0; i < vsi->num_tx_queues; i++) { 1198 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1199 u32 val; 1200 1201 val = (QINT_TQCTL_CAUSE_ENA_M | 1202 (ICE_TX_ITR << QINT_TQCTL_ITR_INDX_S) | 1203 (txq->irqv->me << QINT_TQCTL_MSIX_INDX_S)); 1204 wr32(hw, QINT_TQCTL(vsi->tx_qmap[txq->me]), val); 1205 } 1206 1207 ice_flush(hw); 1208 } 1209 1210 /** 1211 * ice_flush_rxq_interrupts - Unconfigure Hw Rx queues MSI-X interrupt cause 1212 * @vsi: the VSI to configure 1213 * 1214 * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger 1215 * a software interrupt on that cause. This is required as part of the Rx 1216 * queue disable logic to dissociate the Rx queue from the interrupt. 1217 * 1218 * Note: this function must be called prior to disabling Rx queues with 1219 * ice_control_rx_queues, otherwise the Rx queue may not be disabled properly. 1220 */ 1221 void 1222 ice_flush_rxq_interrupts(struct ice_vsi *vsi) 1223 { 1224 struct ice_hw *hw = &vsi->sc->hw; 1225 int i; 1226 1227 for (i = 0; i < vsi->num_rx_queues; i++) { 1228 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1229 u32 reg, val; 1230 1231 /* Clear the CAUSE_ENA flag */ 1232 reg = vsi->rx_qmap[rxq->me]; 1233 val = rd32(hw, QINT_RQCTL(reg)); 1234 val &= ~QINT_RQCTL_CAUSE_ENA_M; 1235 wr32(hw, QINT_RQCTL(reg), val); 1236 1237 ice_flush(hw); 1238 1239 /* Trigger a software interrupt to complete interrupt 1240 * dissociation. 1241 */ 1242 wr32(hw, GLINT_DYN_CTL(rxq->irqv->me), 1243 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 1244 } 1245 } 1246 1247 /** 1248 * ice_flush_txq_interrupts - Unconfigure Hw Tx queues MSI-X interrupt cause 1249 * @vsi: the VSI to configure 1250 * 1251 * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger 1252 * a software interrupt on that cause. This is required as part of the Tx 1253 * queue disable logic to dissociate the Tx queue from the interrupt. 1254 * 1255 * Note: this function must be called prior to ice_vsi_disable_tx, otherwise 1256 * the Tx queue disable may not complete properly. 1257 */ 1258 void 1259 ice_flush_txq_interrupts(struct ice_vsi *vsi) 1260 { 1261 struct ice_hw *hw = &vsi->sc->hw; 1262 int i; 1263 1264 for (i = 0; i < vsi->num_tx_queues; i++) { 1265 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1266 u32 reg, val; 1267 1268 /* Clear the CAUSE_ENA flag */ 1269 reg = vsi->tx_qmap[txq->me]; 1270 val = rd32(hw, QINT_TQCTL(reg)); 1271 val &= ~QINT_TQCTL_CAUSE_ENA_M; 1272 wr32(hw, QINT_TQCTL(reg), val); 1273 1274 ice_flush(hw); 1275 1276 /* Trigger a software interrupt to complete interrupt 1277 * dissociation. 1278 */ 1279 wr32(hw, GLINT_DYN_CTL(txq->irqv->me), 1280 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 1281 } 1282 } 1283 1284 /** 1285 * ice_configure_rx_itr - Configure the Rx ITR settings for this VSI 1286 * @vsi: the VSI to configure 1287 * 1288 * Program the hardware ITR registers with the settings for this VSI. 1289 */ 1290 void 1291 ice_configure_rx_itr(struct ice_vsi *vsi) 1292 { 1293 struct ice_hw *hw = &vsi->sc->hw; 1294 int i; 1295 1296 /* TODO: Handle per-queue/per-vector ITR? */ 1297 1298 for (i = 0; i < vsi->num_rx_queues; i++) { 1299 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1300 1301 wr32(hw, GLINT_ITR(ICE_RX_ITR, rxq->irqv->me), 1302 ice_itr_to_reg(hw, vsi->rx_itr)); 1303 } 1304 1305 ice_flush(hw); 1306 } 1307 1308 /** 1309 * ice_configure_tx_itr - Configure the Tx ITR settings for this VSI 1310 * @vsi: the VSI to configure 1311 * 1312 * Program the hardware ITR registers with the settings for this VSI. 1313 */ 1314 void 1315 ice_configure_tx_itr(struct ice_vsi *vsi) 1316 { 1317 struct ice_hw *hw = &vsi->sc->hw; 1318 int i; 1319 1320 /* TODO: Handle per-queue/per-vector ITR? */ 1321 1322 for (i = 0; i < vsi->num_tx_queues; i++) { 1323 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1324 1325 wr32(hw, GLINT_ITR(ICE_TX_ITR, txq->irqv->me), 1326 ice_itr_to_reg(hw, vsi->tx_itr)); 1327 } 1328 1329 ice_flush(hw); 1330 } 1331 1332 /** 1333 * ice_setup_tx_ctx - Setup an ice_tlan_ctx structure for a queue 1334 * @txq: the Tx queue to configure 1335 * @tlan_ctx: the Tx LAN queue context structure to initialize 1336 * @pf_q: real queue number 1337 */ 1338 static int 1339 ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 1340 { 1341 struct ice_vsi *vsi = txq->vsi; 1342 struct ice_softc *sc = vsi->sc; 1343 struct ice_hw *hw = &sc->hw; 1344 1345 tlan_ctx->port_num = hw->port_info->lport; 1346 1347 /* number of descriptors in the queue */ 1348 tlan_ctx->qlen = txq->desc_count; 1349 1350 /* set the transmit queue base address, defined in 128 byte units */ 1351 tlan_ctx->base = txq->tx_paddr >> 7; 1352 1353 tlan_ctx->pf_num = hw->pf_id; 1354 1355 /* For now, we only have code supporting PF VSIs */ 1356 switch (vsi->type) { 1357 case ICE_VSI_PF: 1358 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 1359 break; 1360 default: 1361 return (ENODEV); 1362 } 1363 1364 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 1365 1366 /* Enable TSO */ 1367 tlan_ctx->tso_ena = 1; 1368 tlan_ctx->internal_usage_flag = 1; 1369 1370 tlan_ctx->tso_qnum = pf_q; 1371 1372 /* 1373 * Stick with the older legacy Tx queue interface, instead of the new 1374 * advanced queue interface. 1375 */ 1376 tlan_ctx->legacy_int = 1; 1377 1378 /* Descriptor WB mode */ 1379 tlan_ctx->wb_mode = 0; 1380 1381 return (0); 1382 } 1383 1384 /** 1385 * ice_cfg_vsi_for_tx - Configure the hardware for Tx 1386 * @vsi: the VSI to configure 1387 * 1388 * Configure the device Tx queues through firmware AdminQ commands. After 1389 * this, Tx queues will be ready for transmit. 1390 */ 1391 int 1392 ice_cfg_vsi_for_tx(struct ice_vsi *vsi) 1393 { 1394 struct ice_aqc_add_tx_qgrp *qg; 1395 struct ice_hw *hw = &vsi->sc->hw; 1396 device_t dev = vsi->sc->dev; 1397 enum ice_status status; 1398 int i; 1399 int err = 0; 1400 u16 qg_size, pf_q; 1401 1402 qg_size = ice_struct_size(qg, txqs, 1); 1403 qg = (struct ice_aqc_add_tx_qgrp *)malloc(qg_size, M_ICE, M_NOWAIT|M_ZERO); 1404 if (!qg) 1405 return (ENOMEM); 1406 1407 qg->num_txqs = 1; 1408 1409 for (i = 0; i < vsi->num_tx_queues; i++) { 1410 struct ice_tlan_ctx tlan_ctx = { 0 }; 1411 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1412 1413 pf_q = vsi->tx_qmap[txq->me]; 1414 qg->txqs[0].txq_id = htole16(pf_q); 1415 1416 err = ice_setup_tx_ctx(txq, &tlan_ctx, pf_q); 1417 if (err) 1418 goto free_txqg; 1419 1420 ice_set_ctx(hw, (u8 *)&tlan_ctx, qg->txqs[0].txq_ctx, 1421 ice_tlan_ctx_info); 1422 1423 status = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1424 i, 1, qg, qg_size, NULL); 1425 if (status) { 1426 device_printf(dev, 1427 "Failed to set LAN Tx queue context, err %s aq_err %s\n", 1428 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 1429 err = ENODEV; 1430 goto free_txqg; 1431 } 1432 1433 /* Keep track of the Tx queue TEID */ 1434 if (pf_q == le16toh(qg->txqs[0].txq_id)) 1435 txq->q_teid = le32toh(qg->txqs[0].q_teid); 1436 } 1437 1438 free_txqg: 1439 free(qg, M_ICE); 1440 1441 return (err); 1442 } 1443 1444 /** 1445 * ice_setup_rx_ctx - Setup an Rx context structure for a receive queue 1446 * @rxq: the receive queue to program 1447 * 1448 * Setup an Rx queue context structure and program it into the hardware 1449 * registers. This is a necessary step for enabling the Rx queue. 1450 * 1451 * @pre the VSI associated with this queue must have initialized mbuf_sz 1452 */ 1453 static int 1454 ice_setup_rx_ctx(struct ice_rx_queue *rxq) 1455 { 1456 struct ice_rlan_ctx rlan_ctx = {0}; 1457 struct ice_vsi *vsi = rxq->vsi; 1458 struct ice_softc *sc = vsi->sc; 1459 struct ice_hw *hw = &sc->hw; 1460 enum ice_status status; 1461 u32 rxdid = ICE_RXDID_FLEX_NIC; 1462 u32 regval; 1463 u16 pf_q; 1464 1465 pf_q = vsi->rx_qmap[rxq->me]; 1466 1467 /* set the receive queue base address, defined in 128 byte units */ 1468 rlan_ctx.base = rxq->rx_paddr >> 7; 1469 1470 rlan_ctx.qlen = rxq->desc_count; 1471 1472 rlan_ctx.dbuf = vsi->mbuf_sz >> ICE_RLAN_CTX_DBUF_S; 1473 1474 /* use 32 byte descriptors */ 1475 rlan_ctx.dsize = 1; 1476 1477 /* Strip the Ethernet CRC bytes before the packet is posted to the 1478 * host memory. 1479 */ 1480 rlan_ctx.crcstrip = 1; 1481 1482 rlan_ctx.l2tsel = 1; 1483 1484 /* don't do header splitting */ 1485 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; 1486 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; 1487 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; 1488 1489 /* strip VLAN from inner headers */ 1490 rlan_ctx.showiv = 1; 1491 1492 rlan_ctx.rxmax = min(vsi->max_frame_size, 1493 ICE_MAX_RX_SEGS * vsi->mbuf_sz); 1494 1495 rlan_ctx.lrxqthresh = 1; 1496 1497 if (vsi->type != ICE_VSI_VF) { 1498 regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 1499 regval &= ~QRXFLXP_CNTXT_RXDID_IDX_M; 1500 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 1501 QRXFLXP_CNTXT_RXDID_IDX_M; 1502 1503 regval &= ~QRXFLXP_CNTXT_RXDID_PRIO_M; 1504 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 1505 QRXFLXP_CNTXT_RXDID_PRIO_M; 1506 1507 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 1508 } 1509 1510 status = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); 1511 if (status) { 1512 device_printf(sc->dev, 1513 "Failed to set LAN Rx queue context, err %s aq_err %s\n", 1514 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 1515 return (EIO); 1516 } 1517 1518 wr32(hw, rxq->tail, 0); 1519 1520 return 0; 1521 } 1522 1523 /** 1524 * ice_cfg_vsi_for_rx - Configure the hardware for Rx 1525 * @vsi: the VSI to configure 1526 * 1527 * Prepare an Rx context descriptor and configure the device to receive 1528 * traffic. 1529 * 1530 * @pre the VSI must have initialized mbuf_sz 1531 */ 1532 int 1533 ice_cfg_vsi_for_rx(struct ice_vsi *vsi) 1534 { 1535 int i, err; 1536 1537 for (i = 0; i < vsi->num_rx_queues; i++) { 1538 MPASS(vsi->mbuf_sz > 0); 1539 err = ice_setup_rx_ctx(&vsi->rx_queues[i]); 1540 if (err) 1541 return err; 1542 } 1543 1544 return (0); 1545 } 1546 1547 /** 1548 * ice_is_rxq_ready - Check if an Rx queue is ready 1549 * @hw: ice hw structure 1550 * @pf_q: absolute PF queue index to check 1551 * @reg: on successful return, contains qrx_ctrl contents 1552 * 1553 * Reads the QRX_CTRL register and verifies if the queue is in a consistent 1554 * state. That is, QENA_REQ matches QENA_STAT. Used to check before making 1555 * a request to change the queue, as well as to verify the request has 1556 * finished. The queue should change status within a few microseconds, so we 1557 * use a small delay while polling the register. 1558 * 1559 * Returns an error code if the queue does not update after a few retries. 1560 */ 1561 static int 1562 ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg) 1563 { 1564 u32 qrx_ctrl, qena_req, qena_stat; 1565 int i; 1566 1567 for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { 1568 qrx_ctrl = rd32(hw, QRX_CTRL(pf_q)); 1569 qena_req = (qrx_ctrl >> QRX_CTRL_QENA_REQ_S) & 1; 1570 qena_stat = (qrx_ctrl >> QRX_CTRL_QENA_STAT_S) & 1; 1571 1572 /* if the request and status bits equal, then the queue is 1573 * fully disabled or enabled. 1574 */ 1575 if (qena_req == qena_stat) { 1576 *reg = qrx_ctrl; 1577 return (0); 1578 } 1579 1580 /* wait a few microseconds before we check again */ 1581 DELAY(10); 1582 } 1583 1584 return (ETIMEDOUT); 1585 } 1586 1587 /** 1588 * ice_control_rx_queues - Configure hardware to start or stop the Rx queues 1589 * @vsi: VSI to enable/disable queues 1590 * @enable: true to enable queues, false to disable 1591 * 1592 * Control the Rx queues through the QRX_CTRL register, enabling or disabling 1593 * them. Wait for the appropriate time to ensure that the queues have actually 1594 * reached the expected state. 1595 */ 1596 int 1597 ice_control_rx_queues(struct ice_vsi *vsi, bool enable) 1598 { 1599 struct ice_hw *hw = &vsi->sc->hw; 1600 device_t dev = vsi->sc->dev; 1601 u32 qrx_ctrl = 0; 1602 int i, err; 1603 1604 /* TODO: amortize waits by changing all queues up front and then 1605 * checking their status afterwards. This will become more necessary 1606 * when we have a large number of queues. 1607 */ 1608 for (i = 0; i < vsi->num_rx_queues; i++) { 1609 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1610 int pf_q = vsi->rx_qmap[rxq->me]; 1611 1612 err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl); 1613 if (err) { 1614 device_printf(dev, 1615 "Rx queue %d is not ready\n", 1616 pf_q); 1617 return err; 1618 } 1619 1620 /* Skip if the queue is already in correct state */ 1621 if (enable == !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) 1622 continue; 1623 1624 if (enable) 1625 qrx_ctrl |= QRX_CTRL_QENA_REQ_M; 1626 else 1627 qrx_ctrl &= ~QRX_CTRL_QENA_REQ_M; 1628 wr32(hw, QRX_CTRL(pf_q), qrx_ctrl); 1629 1630 /* wait for the queue to finalize the request */ 1631 err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl); 1632 if (err) { 1633 device_printf(dev, 1634 "Rx queue %d %sable timeout\n", 1635 pf_q, (enable ? "en" : "dis")); 1636 return err; 1637 } 1638 1639 /* this should never happen */ 1640 if (enable != !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) { 1641 device_printf(dev, 1642 "Rx queue %d invalid state\n", 1643 pf_q); 1644 return (EDOOFUS); 1645 } 1646 } 1647 1648 return (0); 1649 } 1650 1651 /** 1652 * ice_add_mac_to_list - Add MAC filter to a MAC filter list 1653 * @vsi: the VSI to forward to 1654 * @list: list which contains MAC filter entries 1655 * @addr: the MAC address to be added 1656 * @action: filter action to perform on match 1657 * 1658 * Adds a MAC address filter to the list which will be forwarded to firmware 1659 * to add a series of MAC address filters. 1660 * 1661 * Returns 0 on success, and an error code on failure. 1662 * 1663 */ 1664 static int 1665 ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list, 1666 const u8 *addr, enum ice_sw_fwd_act_type action) 1667 { 1668 struct ice_fltr_list_entry *entry; 1669 1670 entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO); 1671 if (!entry) 1672 return (ENOMEM); 1673 1674 entry->fltr_info.flag = ICE_FLTR_TX; 1675 entry->fltr_info.src_id = ICE_SRC_ID_VSI; 1676 entry->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 1677 entry->fltr_info.fltr_act = action; 1678 entry->fltr_info.vsi_handle = vsi->idx; 1679 bcopy(addr, entry->fltr_info.l_data.mac.mac_addr, ETHER_ADDR_LEN); 1680 1681 LIST_ADD(&entry->list_entry, list); 1682 1683 return 0; 1684 } 1685 1686 /** 1687 * ice_free_fltr_list - Free memory associated with a MAC address list 1688 * @list: the list to free 1689 * 1690 * Free the memory of each entry associated with the list. 1691 */ 1692 static void 1693 ice_free_fltr_list(struct ice_list_head *list) 1694 { 1695 struct ice_fltr_list_entry *e, *tmp; 1696 1697 LIST_FOR_EACH_ENTRY_SAFE(e, tmp, list, ice_fltr_list_entry, list_entry) { 1698 LIST_DEL(&e->list_entry); 1699 free(e, M_ICE); 1700 } 1701 } 1702 1703 /** 1704 * ice_add_vsi_mac_filter - Add a MAC address filter for a VSI 1705 * @vsi: the VSI to add the filter for 1706 * @addr: MAC address to add a filter for 1707 * 1708 * Add a MAC address filter for a given VSI. This is a wrapper around 1709 * ice_add_mac to simplify the interface. First, it only accepts a single 1710 * address, so we don't have to mess around with the list setup in other 1711 * functions. Second, it ignores the ICE_ERR_ALREADY_EXIST error, so that 1712 * callers don't need to worry about attempting to add the same filter twice. 1713 */ 1714 int 1715 ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr) 1716 { 1717 struct ice_list_head mac_addr_list; 1718 struct ice_hw *hw = &vsi->sc->hw; 1719 device_t dev = vsi->sc->dev; 1720 enum ice_status status; 1721 int err = 0; 1722 1723 INIT_LIST_HEAD(&mac_addr_list); 1724 1725 err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI); 1726 if (err) 1727 goto free_mac_list; 1728 1729 status = ice_add_mac(hw, &mac_addr_list); 1730 if (status == ICE_ERR_ALREADY_EXISTS) { 1731 ; /* Don't complain if we try to add a filter that already exists */ 1732 } else if (status) { 1733 device_printf(dev, 1734 "Failed to add a filter for MAC %6D, err %s aq_err %s\n", 1735 addr, ":", 1736 ice_status_str(status), 1737 ice_aq_str(hw->adminq.sq_last_status)); 1738 err = (EIO); 1739 } 1740 1741 free_mac_list: 1742 ice_free_fltr_list(&mac_addr_list); 1743 return err; 1744 } 1745 1746 /** 1747 * ice_cfg_pf_default_mac_filters - Setup default unicast and broadcast addrs 1748 * @sc: device softc structure 1749 * 1750 * Program the default unicast and broadcast filters for the PF VSI. 1751 */ 1752 int 1753 ice_cfg_pf_default_mac_filters(struct ice_softc *sc) 1754 { 1755 struct ice_vsi *vsi = &sc->pf_vsi; 1756 struct ice_hw *hw = &sc->hw; 1757 int err; 1758 1759 /* Add the LAN MAC address */ 1760 err = ice_add_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr); 1761 if (err) 1762 return err; 1763 1764 /* Add the broadcast address */ 1765 err = ice_add_vsi_mac_filter(vsi, broadcastaddr); 1766 if (err) 1767 return err; 1768 1769 return (0); 1770 } 1771 1772 /** 1773 * ice_remove_vsi_mac_filter - Remove a MAC address filter for a VSI 1774 * @vsi: the VSI to add the filter for 1775 * @addr: MAC address to remove a filter for 1776 * 1777 * Remove a MAC address filter from a given VSI. This is a wrapper around 1778 * ice_remove_mac to simplify the interface. First, it only accepts a single 1779 * address, so we don't have to mess around with the list setup in other 1780 * functions. Second, it ignores the ICE_ERR_DOES_NOT_EXIST error, so that 1781 * callers don't need to worry about attempting to remove filters which 1782 * haven't yet been added. 1783 */ 1784 int 1785 ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr) 1786 { 1787 struct ice_list_head mac_addr_list; 1788 struct ice_hw *hw = &vsi->sc->hw; 1789 device_t dev = vsi->sc->dev; 1790 enum ice_status status; 1791 int err = 0; 1792 1793 INIT_LIST_HEAD(&mac_addr_list); 1794 1795 err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI); 1796 if (err) 1797 goto free_mac_list; 1798 1799 status = ice_remove_mac(hw, &mac_addr_list); 1800 if (status == ICE_ERR_DOES_NOT_EXIST) { 1801 ; /* Don't complain if we try to remove a filter that doesn't exist */ 1802 } else if (status) { 1803 device_printf(dev, 1804 "Failed to remove a filter for MAC %6D, err %s aq_err %s\n", 1805 addr, ":", 1806 ice_status_str(status), 1807 ice_aq_str(hw->adminq.sq_last_status)); 1808 err = (EIO); 1809 } 1810 1811 free_mac_list: 1812 ice_free_fltr_list(&mac_addr_list); 1813 return err; 1814 } 1815 1816 /** 1817 * ice_rm_pf_default_mac_filters - Remove default unicast and broadcast addrs 1818 * @sc: device softc structure 1819 * 1820 * Remove the default unicast and broadcast filters from the PF VSI. 1821 */ 1822 int 1823 ice_rm_pf_default_mac_filters(struct ice_softc *sc) 1824 { 1825 struct ice_vsi *vsi = &sc->pf_vsi; 1826 struct ice_hw *hw = &sc->hw; 1827 int err; 1828 1829 /* Remove the LAN MAC address */ 1830 err = ice_remove_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr); 1831 if (err) 1832 return err; 1833 1834 /* Remove the broadcast address */ 1835 err = ice_remove_vsi_mac_filter(vsi, broadcastaddr); 1836 if (err) 1837 return (EIO); 1838 1839 return (0); 1840 } 1841 1842 /** 1843 * ice_check_ctrlq_errors - Check for and report controlq errors 1844 * @sc: device private structure 1845 * @qname: name of the controlq 1846 * @cq: the controlq to check 1847 * 1848 * Check and report controlq errors. Currently all we do is report them to the 1849 * kernel message log, but we might want to improve this in the future, such 1850 * as to keep track of statistics. 1851 */ 1852 static void 1853 ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname, 1854 struct ice_ctl_q_info *cq) 1855 { 1856 struct ice_hw *hw = &sc->hw; 1857 u32 val; 1858 1859 /* Check for error indications. Note that all the controlqs use the 1860 * same register layout, so we use the PF_FW_AxQLEN defines only. 1861 */ 1862 val = rd32(hw, cq->rq.len); 1863 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1864 PF_FW_ARQLEN_ARQCRIT_M)) { 1865 if (val & PF_FW_ARQLEN_ARQVFE_M) 1866 device_printf(sc->dev, 1867 "%s Receive Queue VF Error detected\n", qname); 1868 if (val & PF_FW_ARQLEN_ARQOVFL_M) 1869 device_printf(sc->dev, 1870 "%s Receive Queue Overflow Error detected\n", 1871 qname); 1872 if (val & PF_FW_ARQLEN_ARQCRIT_M) 1873 device_printf(sc->dev, 1874 "%s Receive Queue Critical Error detected\n", 1875 qname); 1876 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1877 PF_FW_ARQLEN_ARQCRIT_M); 1878 wr32(hw, cq->rq.len, val); 1879 } 1880 1881 val = rd32(hw, cq->sq.len); 1882 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1883 PF_FW_ATQLEN_ATQCRIT_M)) { 1884 if (val & PF_FW_ATQLEN_ATQVFE_M) 1885 device_printf(sc->dev, 1886 "%s Send Queue VF Error detected\n", qname); 1887 if (val & PF_FW_ATQLEN_ATQOVFL_M) 1888 device_printf(sc->dev, 1889 "%s Send Queue Overflow Error detected\n", 1890 qname); 1891 if (val & PF_FW_ATQLEN_ATQCRIT_M) 1892 device_printf(sc->dev, 1893 "%s Send Queue Critical Error detected\n", 1894 qname); 1895 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1896 PF_FW_ATQLEN_ATQCRIT_M); 1897 wr32(hw, cq->sq.len, val); 1898 } 1899 } 1900 1901 /** 1902 * ice_process_link_event - Process a link event indication from firmware 1903 * @sc: device softc structure 1904 * @e: the received event data 1905 * 1906 * Gets the current link status from hardware, and may print a message if an 1907 * unqualified is detected. 1908 */ 1909 static void 1910 ice_process_link_event(struct ice_softc *sc, 1911 struct ice_rq_event_info __invariant_only *e) 1912 { 1913 struct ice_port_info *pi = sc->hw.port_info; 1914 struct ice_hw *hw = &sc->hw; 1915 device_t dev = sc->dev; 1916 enum ice_status status; 1917 1918 /* Sanity check that the data length matches */ 1919 MPASS(le16toh(e->desc.datalen) == sizeof(struct ice_aqc_get_link_status_data)); 1920 1921 /* 1922 * Even though the adapter gets link status information inside the 1923 * event, it needs to send a Get Link Status AQ command in order 1924 * to re-enable link events. 1925 */ 1926 pi->phy.get_link_info = true; 1927 ice_get_link_status(pi, &sc->link_up); 1928 1929 if (pi->phy.link_info.topo_media_conflict & 1930 (ICE_AQ_LINK_TOPO_CONFLICT | ICE_AQ_LINK_MEDIA_CONFLICT | 1931 ICE_AQ_LINK_TOPO_CORRUPT)) 1932 device_printf(dev, 1933 "Possible mis-configuration of the Ethernet port detected; please use the Intel (R) Ethernet Port Configuration Tool utility to address the issue.\n"); 1934 1935 if ((pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) && 1936 !(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) { 1937 if (!(pi->phy.link_info.an_info & ICE_AQ_QUALIFIED_MODULE)) 1938 device_printf(dev, 1939 "Link is disabled on this device because an unsupported module type was detected! Refer to the Intel (R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 1940 if (pi->phy.link_info.link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) 1941 device_printf(dev, 1942 "The module's power requirements exceed the device's power supply. Cannot start link.\n"); 1943 if (pi->phy.link_info.link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) 1944 device_printf(dev, 1945 "The installed module is incompatible with the device's NVM image. Cannot start link.\n"); 1946 } 1947 1948 if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 1949 if (!ice_testandset_state(&sc->state, ICE_STATE_NO_MEDIA)) { 1950 status = ice_aq_set_link_restart_an(pi, false, NULL); 1951 if (status != ICE_SUCCESS) 1952 device_printf(dev, 1953 "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", 1954 __func__, ice_status_str(status), 1955 ice_aq_str(hw->adminq.sq_last_status)); 1956 } 1957 } 1958 /* ICE_STATE_NO_MEDIA is cleared when polling task detects media */ 1959 1960 /* Indicate that link status must be reported again */ 1961 ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED); 1962 1963 /* OS link info is updated elsewhere */ 1964 } 1965 1966 /** 1967 * ice_process_ctrlq_event - Respond to a controlq event 1968 * @sc: device private structure 1969 * @qname: the name for this controlq 1970 * @event: the event to process 1971 * 1972 * Perform actions in response to various controlq event notifications. 1973 */ 1974 static void 1975 ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, 1976 struct ice_rq_event_info *event) 1977 { 1978 u16 opcode; 1979 1980 opcode = le16toh(event->desc.opcode); 1981 1982 switch (opcode) { 1983 case ice_aqc_opc_get_link_status: 1984 ice_process_link_event(sc, event); 1985 break; 1986 case ice_mbx_opc_send_msg_to_pf: 1987 /* TODO: handle IOV event */ 1988 break; 1989 case ice_aqc_opc_lldp_set_mib_change: 1990 ice_handle_mib_change_event(sc, event); 1991 break; 1992 case ice_aqc_opc_event_lan_overflow: 1993 ice_handle_lan_overflow_event(sc, event); 1994 break; 1995 case ice_aqc_opc_get_health_status: 1996 ice_handle_health_status_event(sc, event); 1997 break; 1998 default: 1999 device_printf(sc->dev, 2000 "%s Receive Queue unhandled event 0x%04x ignored\n", 2001 qname, opcode); 2002 } 2003 } 2004 2005 /** 2006 * ice_process_ctrlq - helper function to process controlq rings 2007 * @sc: device private structure 2008 * @q_type: specific control queue type 2009 * @pending: return parameter to track remaining events 2010 * 2011 * Process controlq events for a given control queue type. Returns zero on 2012 * success, and an error code on failure. If successful, pending is the number 2013 * of remaining events left in the queue. 2014 */ 2015 int 2016 ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending) 2017 { 2018 struct ice_rq_event_info event = { { 0 } }; 2019 struct ice_hw *hw = &sc->hw; 2020 struct ice_ctl_q_info *cq; 2021 enum ice_status status; 2022 const char *qname; 2023 int loop = 0; 2024 2025 switch (q_type) { 2026 case ICE_CTL_Q_ADMIN: 2027 cq = &hw->adminq; 2028 qname = "Admin"; 2029 break; 2030 case ICE_CTL_Q_MAILBOX: 2031 cq = &hw->mailboxq; 2032 qname = "Mailbox"; 2033 break; 2034 default: 2035 device_printf(sc->dev, 2036 "Unknown control queue type 0x%x\n", 2037 q_type); 2038 return 0; 2039 } 2040 2041 ice_check_ctrlq_errors(sc, qname, cq); 2042 2043 /* 2044 * Control queue processing happens during the admin task which may be 2045 * holding a non-sleepable lock, so we *must* use M_NOWAIT here. 2046 */ 2047 event.buf_len = cq->rq_buf_size; 2048 event.msg_buf = (u8 *)malloc(event.buf_len, M_ICE, M_ZERO | M_NOWAIT); 2049 if (!event.msg_buf) { 2050 device_printf(sc->dev, 2051 "Unable to allocate memory for %s Receive Queue event\n", 2052 qname); 2053 return (ENOMEM); 2054 } 2055 2056 do { 2057 status = ice_clean_rq_elem(hw, cq, &event, pending); 2058 if (status == ICE_ERR_AQ_NO_WORK) 2059 break; 2060 if (status) { 2061 if (q_type == ICE_CTL_Q_ADMIN) 2062 device_printf(sc->dev, 2063 "%s Receive Queue event error %s\n", 2064 qname, ice_status_str(status)); 2065 else 2066 device_printf(sc->dev, 2067 "%s Receive Queue event error %s\n", 2068 qname, ice_status_str(status)); 2069 free(event.msg_buf, M_ICE); 2070 return (EIO); 2071 } 2072 /* XXX should we separate this handler by controlq type? */ 2073 ice_process_ctrlq_event(sc, qname, &event); 2074 } while (*pending && (++loop < ICE_CTRLQ_WORK_LIMIT)); 2075 2076 free(event.msg_buf, M_ICE); 2077 2078 return 0; 2079 } 2080 2081 /** 2082 * pkg_ver_empty - Check if a package version is empty 2083 * @pkg_ver: the package version to check 2084 * @pkg_name: the package name to check 2085 * 2086 * Checks if the package version structure is empty. We consider a package 2087 * version as empty if none of the versions are non-zero and the name string 2088 * is null as well. 2089 * 2090 * This is used to check if the package version was initialized by the driver, 2091 * as we do not expect an actual DDP package file to have a zero'd version and 2092 * name. 2093 * 2094 * @returns true if the package version is valid, or false otherwise. 2095 */ 2096 static bool 2097 pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name) 2098 { 2099 return (pkg_name[0] == '\0' && 2100 pkg_ver->major == 0 && 2101 pkg_ver->minor == 0 && 2102 pkg_ver->update == 0 && 2103 pkg_ver->draft == 0); 2104 } 2105 2106 /** 2107 * pkg_ver_compatible - Check if the package version is compatible 2108 * @pkg_ver: the package version to check 2109 * 2110 * Compares the package version number to the driver's expected major/minor 2111 * version. Returns an integer indicating whether the version is older, newer, 2112 * or compatible with the driver. 2113 * 2114 * @returns 0 if the package version is compatible, -1 if the package version 2115 * is older, and 1 if the package version is newer than the driver version. 2116 */ 2117 static int 2118 pkg_ver_compatible(struct ice_pkg_ver *pkg_ver) 2119 { 2120 if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ) 2121 return (1); /* newer */ 2122 else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) && 2123 (pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) 2124 return (1); /* newer */ 2125 else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) && 2126 (pkg_ver->minor == ICE_PKG_SUPP_VER_MNR)) 2127 return (0); /* compatible */ 2128 else 2129 return (-1); /* older */ 2130 } 2131 2132 /** 2133 * ice_os_pkg_version_str - Format OS package version info into a sbuf 2134 * @hw: device hw structure 2135 * @buf: string buffer to store name/version string 2136 * 2137 * Formats the name and version of the OS DDP package as found in the ice_ddp 2138 * module into a string. 2139 * 2140 * @remark This will almost always be the same as the active package, but 2141 * could be different in some cases. Use ice_active_pkg_version_str to get the 2142 * version of the active DDP package. 2143 */ 2144 static void 2145 ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf) 2146 { 2147 char name_buf[ICE_PKG_NAME_SIZE]; 2148 2149 /* If the OS DDP package info is empty, use "None" */ 2150 if (pkg_ver_empty(&hw->pkg_ver, hw->pkg_name)) { 2151 sbuf_printf(buf, "None"); 2152 return; 2153 } 2154 2155 /* 2156 * This should already be null-terminated, but since this is a raw 2157 * value from an external source, strlcpy() into a new buffer to 2158 * make sure. 2159 */ 2160 bzero(name_buf, sizeof(name_buf)); 2161 strlcpy(name_buf, (char *)hw->pkg_name, ICE_PKG_NAME_SIZE); 2162 2163 sbuf_printf(buf, "%s version %u.%u.%u.%u", 2164 name_buf, 2165 hw->pkg_ver.major, 2166 hw->pkg_ver.minor, 2167 hw->pkg_ver.update, 2168 hw->pkg_ver.draft); 2169 } 2170 2171 /** 2172 * ice_active_pkg_version_str - Format active package version info into a sbuf 2173 * @hw: device hw structure 2174 * @buf: string buffer to store name/version string 2175 * 2176 * Formats the name and version of the active DDP package info into a string 2177 * buffer for use. 2178 */ 2179 static void 2180 ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf) 2181 { 2182 char name_buf[ICE_PKG_NAME_SIZE]; 2183 2184 /* If the active DDP package info is empty, use "None" */ 2185 if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) { 2186 sbuf_printf(buf, "None"); 2187 return; 2188 } 2189 2190 /* 2191 * This should already be null-terminated, but since this is a raw 2192 * value from an external source, strlcpy() into a new buffer to 2193 * make sure. 2194 */ 2195 bzero(name_buf, sizeof(name_buf)); 2196 strlcpy(name_buf, (char *)hw->active_pkg_name, ICE_PKG_NAME_SIZE); 2197 2198 sbuf_printf(buf, "%s version %u.%u.%u.%u", 2199 name_buf, 2200 hw->active_pkg_ver.major, 2201 hw->active_pkg_ver.minor, 2202 hw->active_pkg_ver.update, 2203 hw->active_pkg_ver.draft); 2204 2205 if (hw->active_track_id != 0) 2206 sbuf_printf(buf, ", track id 0x%08x", hw->active_track_id); 2207 } 2208 2209 /** 2210 * ice_nvm_version_str - Format the NVM version information into a sbuf 2211 * @hw: device hw structure 2212 * @buf: string buffer to store version string 2213 * 2214 * Formats the NVM information including firmware version, API version, NVM 2215 * version, the EETRACK id, and OEM specific version information into a string 2216 * buffer. 2217 */ 2218 static void 2219 ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf) 2220 { 2221 struct ice_nvm_info *nvm = &hw->flash.nvm; 2222 struct ice_orom_info *orom = &hw->flash.orom; 2223 struct ice_netlist_info *netlist = &hw->flash.netlist; 2224 2225 /* Note that the netlist versions are stored in packed Binary Coded 2226 * Decimal format. The use of '%x' will correctly display these as 2227 * decimal numbers. This works because every 4 bits will be displayed 2228 * as a hexadecimal digit, and the BCD format will only use the values 2229 * 0-9. 2230 */ 2231 sbuf_printf(buf, 2232 "fw %u.%u.%u api %u.%u nvm %x.%02x etid %08x netlist %x.%x.%x-%x.%x.%x.%04x oem %u.%u.%u", 2233 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch, 2234 hw->api_maj_ver, hw->api_min_ver, 2235 nvm->major, nvm->minor, nvm->eetrack, 2236 netlist->major, netlist->minor, 2237 netlist->type >> 16, netlist->type & 0xFFFF, 2238 netlist->rev, netlist->cust_ver, netlist->hash, 2239 orom->major, orom->build, orom->patch); 2240 } 2241 2242 /** 2243 * ice_print_nvm_version - Print the NVM info to the kernel message log 2244 * @sc: the device softc structure 2245 * 2246 * Format and print an NVM version string using ice_nvm_version_str(). 2247 */ 2248 void 2249 ice_print_nvm_version(struct ice_softc *sc) 2250 { 2251 struct ice_hw *hw = &sc->hw; 2252 device_t dev = sc->dev; 2253 struct sbuf *sbuf; 2254 2255 sbuf = sbuf_new_auto(); 2256 ice_nvm_version_str(hw, sbuf); 2257 sbuf_finish(sbuf); 2258 device_printf(dev, "%s\n", sbuf_data(sbuf)); 2259 sbuf_delete(sbuf); 2260 } 2261 2262 /** 2263 * ice_update_vsi_hw_stats - Update VSI-specific ethernet statistics counters 2264 * @vsi: the VSI to be updated 2265 * 2266 * Reads hardware stats and updates the ice_vsi_hw_stats tracking structure with 2267 * the updated values. 2268 */ 2269 void 2270 ice_update_vsi_hw_stats(struct ice_vsi *vsi) 2271 { 2272 struct ice_eth_stats *prev_es, *cur_es; 2273 struct ice_hw *hw = &vsi->sc->hw; 2274 u16 vsi_num; 2275 2276 if (!ice_is_vsi_valid(hw, vsi->idx)) 2277 return; 2278 2279 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); /* HW absolute index of a VSI */ 2280 prev_es = &vsi->hw_stats.prev; 2281 cur_es = &vsi->hw_stats.cur; 2282 2283 #define ICE_VSI_STAT40(name, location) \ 2284 ice_stat_update40(hw, name ## L(vsi_num), \ 2285 vsi->hw_stats.offsets_loaded, \ 2286 &prev_es->location, &cur_es->location) 2287 2288 #define ICE_VSI_STAT32(name, location) \ 2289 ice_stat_update32(hw, name(vsi_num), \ 2290 vsi->hw_stats.offsets_loaded, \ 2291 &prev_es->location, &cur_es->location) 2292 2293 ICE_VSI_STAT40(GLV_GORC, rx_bytes); 2294 ICE_VSI_STAT40(GLV_UPRC, rx_unicast); 2295 ICE_VSI_STAT40(GLV_MPRC, rx_multicast); 2296 ICE_VSI_STAT40(GLV_BPRC, rx_broadcast); 2297 ICE_VSI_STAT32(GLV_RDPC, rx_discards); 2298 ICE_VSI_STAT40(GLV_GOTC, tx_bytes); 2299 ICE_VSI_STAT40(GLV_UPTC, tx_unicast); 2300 ICE_VSI_STAT40(GLV_MPTC, tx_multicast); 2301 ICE_VSI_STAT40(GLV_BPTC, tx_broadcast); 2302 ICE_VSI_STAT32(GLV_TEPC, tx_errors); 2303 2304 ice_stat_update_repc(hw, vsi->idx, vsi->hw_stats.offsets_loaded, 2305 cur_es); 2306 2307 #undef ICE_VSI_STAT40 2308 #undef ICE_VSI_STAT32 2309 2310 vsi->hw_stats.offsets_loaded = true; 2311 } 2312 2313 /** 2314 * ice_reset_vsi_stats - Reset VSI statistics counters 2315 * @vsi: VSI structure 2316 * 2317 * Resets the software tracking counters for the VSI statistics, and indicate 2318 * that the offsets haven't been loaded. This is intended to be called 2319 * post-reset so that VSI statistics count from zero again. 2320 */ 2321 void 2322 ice_reset_vsi_stats(struct ice_vsi *vsi) 2323 { 2324 /* Reset HW stats */ 2325 memset(&vsi->hw_stats.prev, 0, sizeof(vsi->hw_stats.prev)); 2326 memset(&vsi->hw_stats.cur, 0, sizeof(vsi->hw_stats.cur)); 2327 vsi->hw_stats.offsets_loaded = false; 2328 } 2329 2330 /** 2331 * ice_update_pf_stats - Update port stats counters 2332 * @sc: device private softc structure 2333 * 2334 * Reads hardware statistics registers and updates the software tracking 2335 * structure with new values. 2336 */ 2337 void 2338 ice_update_pf_stats(struct ice_softc *sc) 2339 { 2340 struct ice_hw_port_stats *prev_ps, *cur_ps; 2341 struct ice_hw *hw = &sc->hw; 2342 u8 lport; 2343 2344 MPASS(hw->port_info); 2345 2346 prev_ps = &sc->stats.prev; 2347 cur_ps = &sc->stats.cur; 2348 lport = hw->port_info->lport; 2349 2350 #define ICE_PF_STAT40(name, location) \ 2351 ice_stat_update40(hw, name ## L(lport), \ 2352 sc->stats.offsets_loaded, \ 2353 &prev_ps->location, &cur_ps->location) 2354 2355 #define ICE_PF_STAT32(name, location) \ 2356 ice_stat_update32(hw, name(lport), \ 2357 sc->stats.offsets_loaded, \ 2358 &prev_ps->location, &cur_ps->location) 2359 2360 ICE_PF_STAT40(GLPRT_GORC, eth.rx_bytes); 2361 ICE_PF_STAT40(GLPRT_UPRC, eth.rx_unicast); 2362 ICE_PF_STAT40(GLPRT_MPRC, eth.rx_multicast); 2363 ICE_PF_STAT40(GLPRT_BPRC, eth.rx_broadcast); 2364 ICE_PF_STAT40(GLPRT_GOTC, eth.tx_bytes); 2365 ICE_PF_STAT40(GLPRT_UPTC, eth.tx_unicast); 2366 ICE_PF_STAT40(GLPRT_MPTC, eth.tx_multicast); 2367 ICE_PF_STAT40(GLPRT_BPTC, eth.tx_broadcast); 2368 /* This stat register doesn't have an lport */ 2369 ice_stat_update32(hw, PRTRPB_RDPC, 2370 sc->stats.offsets_loaded, 2371 &prev_ps->eth.rx_discards, &cur_ps->eth.rx_discards); 2372 2373 ICE_PF_STAT32(GLPRT_TDOLD, tx_dropped_link_down); 2374 ICE_PF_STAT40(GLPRT_PRC64, rx_size_64); 2375 ICE_PF_STAT40(GLPRT_PRC127, rx_size_127); 2376 ICE_PF_STAT40(GLPRT_PRC255, rx_size_255); 2377 ICE_PF_STAT40(GLPRT_PRC511, rx_size_511); 2378 ICE_PF_STAT40(GLPRT_PRC1023, rx_size_1023); 2379 ICE_PF_STAT40(GLPRT_PRC1522, rx_size_1522); 2380 ICE_PF_STAT40(GLPRT_PRC9522, rx_size_big); 2381 ICE_PF_STAT40(GLPRT_PTC64, tx_size_64); 2382 ICE_PF_STAT40(GLPRT_PTC127, tx_size_127); 2383 ICE_PF_STAT40(GLPRT_PTC255, tx_size_255); 2384 ICE_PF_STAT40(GLPRT_PTC511, tx_size_511); 2385 ICE_PF_STAT40(GLPRT_PTC1023, tx_size_1023); 2386 ICE_PF_STAT40(GLPRT_PTC1522, tx_size_1522); 2387 ICE_PF_STAT40(GLPRT_PTC9522, tx_size_big); 2388 2389 ICE_PF_STAT32(GLPRT_LXONRXC, link_xon_rx); 2390 ICE_PF_STAT32(GLPRT_LXOFFRXC, link_xoff_rx); 2391 ICE_PF_STAT32(GLPRT_LXONTXC, link_xon_tx); 2392 ICE_PF_STAT32(GLPRT_LXOFFTXC, link_xoff_tx); 2393 ICE_PF_STAT32(GLPRT_CRCERRS, crc_errors); 2394 ICE_PF_STAT32(GLPRT_ILLERRC, illegal_bytes); 2395 ICE_PF_STAT32(GLPRT_MLFC, mac_local_faults); 2396 ICE_PF_STAT32(GLPRT_MRFC, mac_remote_faults); 2397 ICE_PF_STAT32(GLPRT_RLEC, rx_len_errors); 2398 ICE_PF_STAT32(GLPRT_RUC, rx_undersize); 2399 ICE_PF_STAT32(GLPRT_RFC, rx_fragments); 2400 ICE_PF_STAT32(GLPRT_ROC, rx_oversize); 2401 ICE_PF_STAT32(GLPRT_RJC, rx_jabber); 2402 2403 #undef ICE_PF_STAT40 2404 #undef ICE_PF_STAT32 2405 2406 sc->stats.offsets_loaded = true; 2407 } 2408 2409 /** 2410 * ice_reset_pf_stats - Reset port stats counters 2411 * @sc: Device private softc structure 2412 * 2413 * Reset software tracking values for statistics to zero, and indicate that 2414 * offsets haven't been loaded. Intended to be called after a device reset so 2415 * that statistics count from zero again. 2416 */ 2417 void 2418 ice_reset_pf_stats(struct ice_softc *sc) 2419 { 2420 memset(&sc->stats.prev, 0, sizeof(sc->stats.prev)); 2421 memset(&sc->stats.cur, 0, sizeof(sc->stats.cur)); 2422 sc->stats.offsets_loaded = false; 2423 } 2424 2425 /** 2426 * ice_sysctl_show_fw - sysctl callback to show firmware information 2427 * @oidp: sysctl oid structure 2428 * @arg1: pointer to private data structure 2429 * @arg2: unused 2430 * @req: sysctl request pointer 2431 * 2432 * Callback for the fw_version sysctl, to display the current firmware 2433 * information found at hardware init time. 2434 */ 2435 static int 2436 ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 2437 { 2438 struct ice_softc *sc = (struct ice_softc *)arg1; 2439 struct ice_hw *hw = &sc->hw; 2440 struct sbuf *sbuf; 2441 2442 UNREFERENCED_PARAMETER(oidp); 2443 UNREFERENCED_PARAMETER(arg2); 2444 2445 if (ice_driver_is_detaching(sc)) 2446 return (ESHUTDOWN); 2447 2448 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2449 ice_nvm_version_str(hw, sbuf); 2450 sbuf_finish(sbuf); 2451 sbuf_delete(sbuf); 2452 2453 return (0); 2454 } 2455 2456 /** 2457 * ice_sysctl_pba_number - sysctl callback to show PBA number 2458 * @oidp: sysctl oid structure 2459 * @arg1: pointer to private data structure 2460 * @arg2: unused 2461 * @req: sysctl request pointer 2462 * 2463 * Callback for the pba_number sysctl, used to read the Product Board Assembly 2464 * number for this device. 2465 */ 2466 static int 2467 ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS) 2468 { 2469 struct ice_softc *sc = (struct ice_softc *)arg1; 2470 struct ice_hw *hw = &sc->hw; 2471 device_t dev = sc->dev; 2472 u8 pba_string[32] = ""; 2473 enum ice_status status; 2474 2475 UNREFERENCED_PARAMETER(arg2); 2476 2477 if (ice_driver_is_detaching(sc)) 2478 return (ESHUTDOWN); 2479 2480 status = ice_read_pba_string(hw, pba_string, sizeof(pba_string)); 2481 if (status) { 2482 device_printf(dev, 2483 "%s: failed to read PBA string from NVM; status %s, aq_err %s\n", 2484 __func__, ice_status_str(status), 2485 ice_aq_str(hw->adminq.sq_last_status)); 2486 return (EIO); 2487 } 2488 2489 return sysctl_handle_string(oidp, pba_string, sizeof(pba_string), req); 2490 } 2491 2492 /** 2493 * ice_sysctl_pkg_version - sysctl to show the active package version info 2494 * @oidp: sysctl oid structure 2495 * @arg1: pointer to private data structure 2496 * @arg2: unused 2497 * @req: sysctl request pointer 2498 * 2499 * Callback for the pkg_version sysctl, to display the active DDP package name 2500 * and version information. 2501 */ 2502 static int 2503 ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS) 2504 { 2505 struct ice_softc *sc = (struct ice_softc *)arg1; 2506 struct ice_hw *hw = &sc->hw; 2507 struct sbuf *sbuf; 2508 2509 UNREFERENCED_PARAMETER(oidp); 2510 UNREFERENCED_PARAMETER(arg2); 2511 2512 if (ice_driver_is_detaching(sc)) 2513 return (ESHUTDOWN); 2514 2515 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2516 ice_active_pkg_version_str(hw, sbuf); 2517 sbuf_finish(sbuf); 2518 sbuf_delete(sbuf); 2519 2520 return (0); 2521 } 2522 2523 /** 2524 * ice_sysctl_os_pkg_version - sysctl to show the OS package version info 2525 * @oidp: sysctl oid structure 2526 * @arg1: pointer to private data structure 2527 * @arg2: unused 2528 * @req: sysctl request pointer 2529 * 2530 * Callback for the pkg_version sysctl, to display the OS DDP package name and 2531 * version info found in the ice_ddp module. 2532 */ 2533 static int 2534 ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS) 2535 { 2536 struct ice_softc *sc = (struct ice_softc *)arg1; 2537 struct ice_hw *hw = &sc->hw; 2538 struct sbuf *sbuf; 2539 2540 UNREFERENCED_PARAMETER(oidp); 2541 UNREFERENCED_PARAMETER(arg2); 2542 2543 if (ice_driver_is_detaching(sc)) 2544 return (ESHUTDOWN); 2545 2546 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2547 ice_os_pkg_version_str(hw, sbuf); 2548 sbuf_finish(sbuf); 2549 sbuf_delete(sbuf); 2550 2551 return (0); 2552 } 2553 2554 /** 2555 * ice_sysctl_current_speed - sysctl callback to show current link speed 2556 * @oidp: sysctl oid structure 2557 * @arg1: pointer to private data structure 2558 * @arg2: unused 2559 * @req: sysctl request pointer 2560 * 2561 * Callback for the current_speed sysctl, to display the string representing 2562 * the current link speed. 2563 */ 2564 static int 2565 ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2566 { 2567 struct ice_softc *sc = (struct ice_softc *)arg1; 2568 struct ice_hw *hw = &sc->hw; 2569 struct sbuf *sbuf; 2570 2571 UNREFERENCED_PARAMETER(oidp); 2572 UNREFERENCED_PARAMETER(arg2); 2573 2574 if (ice_driver_is_detaching(sc)) 2575 return (ESHUTDOWN); 2576 2577 sbuf = sbuf_new_for_sysctl(NULL, NULL, 10, req); 2578 sbuf_printf(sbuf, "%s", ice_aq_speed_to_str(hw->port_info)); 2579 sbuf_finish(sbuf); 2580 sbuf_delete(sbuf); 2581 2582 return (0); 2583 } 2584 2585 /** 2586 * @var phy_link_speeds 2587 * @brief PHY link speed conversion array 2588 * 2589 * Array of link speeds to convert ICE_PHY_TYPE_LOW and ICE_PHY_TYPE_HIGH into 2590 * link speeds used by the link speed sysctls. 2591 * 2592 * @remark these are based on the indices used in the BIT() macros for the 2593 * ICE_PHY_TYPE_LOW_* and ICE_PHY_TYPE_HIGH_* definitions. 2594 */ 2595 static const uint16_t phy_link_speeds[] = { 2596 ICE_AQ_LINK_SPEED_100MB, 2597 ICE_AQ_LINK_SPEED_100MB, 2598 ICE_AQ_LINK_SPEED_1000MB, 2599 ICE_AQ_LINK_SPEED_1000MB, 2600 ICE_AQ_LINK_SPEED_1000MB, 2601 ICE_AQ_LINK_SPEED_1000MB, 2602 ICE_AQ_LINK_SPEED_1000MB, 2603 ICE_AQ_LINK_SPEED_2500MB, 2604 ICE_AQ_LINK_SPEED_2500MB, 2605 ICE_AQ_LINK_SPEED_2500MB, 2606 ICE_AQ_LINK_SPEED_5GB, 2607 ICE_AQ_LINK_SPEED_5GB, 2608 ICE_AQ_LINK_SPEED_10GB, 2609 ICE_AQ_LINK_SPEED_10GB, 2610 ICE_AQ_LINK_SPEED_10GB, 2611 ICE_AQ_LINK_SPEED_10GB, 2612 ICE_AQ_LINK_SPEED_10GB, 2613 ICE_AQ_LINK_SPEED_10GB, 2614 ICE_AQ_LINK_SPEED_10GB, 2615 ICE_AQ_LINK_SPEED_25GB, 2616 ICE_AQ_LINK_SPEED_25GB, 2617 ICE_AQ_LINK_SPEED_25GB, 2618 ICE_AQ_LINK_SPEED_25GB, 2619 ICE_AQ_LINK_SPEED_25GB, 2620 ICE_AQ_LINK_SPEED_25GB, 2621 ICE_AQ_LINK_SPEED_25GB, 2622 ICE_AQ_LINK_SPEED_25GB, 2623 ICE_AQ_LINK_SPEED_25GB, 2624 ICE_AQ_LINK_SPEED_25GB, 2625 ICE_AQ_LINK_SPEED_25GB, 2626 ICE_AQ_LINK_SPEED_40GB, 2627 ICE_AQ_LINK_SPEED_40GB, 2628 ICE_AQ_LINK_SPEED_40GB, 2629 ICE_AQ_LINK_SPEED_40GB, 2630 ICE_AQ_LINK_SPEED_40GB, 2631 ICE_AQ_LINK_SPEED_40GB, 2632 ICE_AQ_LINK_SPEED_50GB, 2633 ICE_AQ_LINK_SPEED_50GB, 2634 ICE_AQ_LINK_SPEED_50GB, 2635 ICE_AQ_LINK_SPEED_50GB, 2636 ICE_AQ_LINK_SPEED_50GB, 2637 ICE_AQ_LINK_SPEED_50GB, 2638 ICE_AQ_LINK_SPEED_50GB, 2639 ICE_AQ_LINK_SPEED_50GB, 2640 ICE_AQ_LINK_SPEED_50GB, 2641 ICE_AQ_LINK_SPEED_50GB, 2642 ICE_AQ_LINK_SPEED_50GB, 2643 ICE_AQ_LINK_SPEED_50GB, 2644 ICE_AQ_LINK_SPEED_50GB, 2645 ICE_AQ_LINK_SPEED_50GB, 2646 ICE_AQ_LINK_SPEED_50GB, 2647 ICE_AQ_LINK_SPEED_100GB, 2648 ICE_AQ_LINK_SPEED_100GB, 2649 ICE_AQ_LINK_SPEED_100GB, 2650 ICE_AQ_LINK_SPEED_100GB, 2651 ICE_AQ_LINK_SPEED_100GB, 2652 ICE_AQ_LINK_SPEED_100GB, 2653 ICE_AQ_LINK_SPEED_100GB, 2654 ICE_AQ_LINK_SPEED_100GB, 2655 ICE_AQ_LINK_SPEED_100GB, 2656 ICE_AQ_LINK_SPEED_100GB, 2657 ICE_AQ_LINK_SPEED_100GB, 2658 ICE_AQ_LINK_SPEED_100GB, 2659 ICE_AQ_LINK_SPEED_100GB, 2660 /* These rates are for ICE_PHY_TYPE_HIGH_* */ 2661 ICE_AQ_LINK_SPEED_100GB, 2662 ICE_AQ_LINK_SPEED_100GB, 2663 ICE_AQ_LINK_SPEED_100GB, 2664 ICE_AQ_LINK_SPEED_100GB, 2665 ICE_AQ_LINK_SPEED_100GB 2666 }; 2667 2668 #define ICE_SYSCTL_HELP_ADVERTISE_SPEED \ 2669 "\nControl advertised link speed." \ 2670 "\nFlags:" \ 2671 "\n\t 0x0 - Auto" \ 2672 "\n\t 0x1 - 10 Mb" \ 2673 "\n\t 0x2 - 100 Mb" \ 2674 "\n\t 0x4 - 1G" \ 2675 "\n\t 0x8 - 2.5G" \ 2676 "\n\t 0x10 - 5G" \ 2677 "\n\t 0x20 - 10G" \ 2678 "\n\t 0x40 - 20G" \ 2679 "\n\t 0x80 - 25G" \ 2680 "\n\t 0x100 - 40G" \ 2681 "\n\t 0x200 - 50G" \ 2682 "\n\t 0x400 - 100G" \ 2683 "\n\t0x8000 - Unknown" \ 2684 "\n\t" \ 2685 "\nUse \"sysctl -x\" to view flags properly." 2686 2687 #define ICE_PHYS_100MB \ 2688 (ICE_PHY_TYPE_LOW_100BASE_TX | \ 2689 ICE_PHY_TYPE_LOW_100M_SGMII) 2690 #define ICE_PHYS_1000MB \ 2691 (ICE_PHY_TYPE_LOW_1000BASE_T | \ 2692 ICE_PHY_TYPE_LOW_1000BASE_SX | \ 2693 ICE_PHY_TYPE_LOW_1000BASE_LX | \ 2694 ICE_PHY_TYPE_LOW_1000BASE_KX | \ 2695 ICE_PHY_TYPE_LOW_1G_SGMII) 2696 #define ICE_PHYS_2500MB \ 2697 (ICE_PHY_TYPE_LOW_2500BASE_T | \ 2698 ICE_PHY_TYPE_LOW_2500BASE_X | \ 2699 ICE_PHY_TYPE_LOW_2500BASE_KX) 2700 #define ICE_PHYS_5GB \ 2701 (ICE_PHY_TYPE_LOW_5GBASE_T | \ 2702 ICE_PHY_TYPE_LOW_5GBASE_KR) 2703 #define ICE_PHYS_10GB \ 2704 (ICE_PHY_TYPE_LOW_10GBASE_T | \ 2705 ICE_PHY_TYPE_LOW_10G_SFI_DA | \ 2706 ICE_PHY_TYPE_LOW_10GBASE_SR | \ 2707 ICE_PHY_TYPE_LOW_10GBASE_LR | \ 2708 ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \ 2709 ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \ 2710 ICE_PHY_TYPE_LOW_10G_SFI_C2C) 2711 #define ICE_PHYS_25GB \ 2712 (ICE_PHY_TYPE_LOW_25GBASE_T | \ 2713 ICE_PHY_TYPE_LOW_25GBASE_CR | \ 2714 ICE_PHY_TYPE_LOW_25GBASE_CR_S | \ 2715 ICE_PHY_TYPE_LOW_25GBASE_CR1 | \ 2716 ICE_PHY_TYPE_LOW_25GBASE_SR | \ 2717 ICE_PHY_TYPE_LOW_25GBASE_LR | \ 2718 ICE_PHY_TYPE_LOW_25GBASE_KR | \ 2719 ICE_PHY_TYPE_LOW_25GBASE_KR_S | \ 2720 ICE_PHY_TYPE_LOW_25GBASE_KR1 | \ 2721 ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | \ 2722 ICE_PHY_TYPE_LOW_25G_AUI_C2C) 2723 #define ICE_PHYS_40GB \ 2724 (ICE_PHY_TYPE_LOW_40GBASE_CR4 | \ 2725 ICE_PHY_TYPE_LOW_40GBASE_SR4 | \ 2726 ICE_PHY_TYPE_LOW_40GBASE_LR4 | \ 2727 ICE_PHY_TYPE_LOW_40GBASE_KR4 | \ 2728 ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | \ 2729 ICE_PHY_TYPE_LOW_40G_XLAUI) 2730 #define ICE_PHYS_50GB \ 2731 (ICE_PHY_TYPE_LOW_50GBASE_CR2 | \ 2732 ICE_PHY_TYPE_LOW_50GBASE_SR2 | \ 2733 ICE_PHY_TYPE_LOW_50GBASE_LR2 | \ 2734 ICE_PHY_TYPE_LOW_50GBASE_KR2 | \ 2735 ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | \ 2736 ICE_PHY_TYPE_LOW_50G_LAUI2 | \ 2737 ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | \ 2738 ICE_PHY_TYPE_LOW_50G_AUI2 | \ 2739 ICE_PHY_TYPE_LOW_50GBASE_CP | \ 2740 ICE_PHY_TYPE_LOW_50GBASE_SR | \ 2741 ICE_PHY_TYPE_LOW_50GBASE_FR | \ 2742 ICE_PHY_TYPE_LOW_50GBASE_LR | \ 2743 ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 | \ 2744 ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | \ 2745 ICE_PHY_TYPE_LOW_50G_AUI1) 2746 #define ICE_PHYS_100GB_LOW \ 2747 (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \ 2748 ICE_PHY_TYPE_LOW_100GBASE_SR4 | \ 2749 ICE_PHY_TYPE_LOW_100GBASE_LR4 | \ 2750 ICE_PHY_TYPE_LOW_100GBASE_KR4 | \ 2751 ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \ 2752 ICE_PHY_TYPE_LOW_100G_CAUI4 | \ 2753 ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \ 2754 ICE_PHY_TYPE_LOW_100G_AUI4 | \ 2755 ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \ 2756 ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \ 2757 ICE_PHY_TYPE_LOW_100GBASE_CP2 | \ 2758 ICE_PHY_TYPE_LOW_100GBASE_SR2 | \ 2759 ICE_PHY_TYPE_LOW_100GBASE_DR) 2760 #define ICE_PHYS_100GB_HIGH \ 2761 (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \ 2762 ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | \ 2763 ICE_PHY_TYPE_HIGH_100G_CAUI2 | \ 2764 ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ 2765 ICE_PHY_TYPE_HIGH_100G_AUI2) 2766 2767 /** 2768 * ice_aq_phy_types_to_link_speeds - Convert the PHY Types to speeds 2769 * @phy_type_low: lower 64-bit PHY Type bitmask 2770 * @phy_type_high: upper 64-bit PHY Type bitmask 2771 * 2772 * Convert the PHY Type fields from Get PHY Abilities and Set PHY Config into 2773 * link speed flags. If phy_type_high has an unknown PHY type, then the return 2774 * value will include the "ICE_AQ_LINK_SPEED_UNKNOWN" flag as well. 2775 */ 2776 static u16 2777 ice_aq_phy_types_to_link_speeds(u64 phy_type_low, u64 phy_type_high) 2778 { 2779 u16 sysctl_speeds = 0; 2780 int bit; 2781 2782 /* coverity[address_of] */ 2783 for_each_set_bit(bit, &phy_type_low, 64) 2784 sysctl_speeds |= phy_link_speeds[bit]; 2785 2786 /* coverity[address_of] */ 2787 for_each_set_bit(bit, &phy_type_high, 64) { 2788 if ((bit + 64) < (int)ARRAY_SIZE(phy_link_speeds)) 2789 sysctl_speeds |= phy_link_speeds[bit + 64]; 2790 else 2791 sysctl_speeds |= ICE_AQ_LINK_SPEED_UNKNOWN; 2792 } 2793 2794 return (sysctl_speeds); 2795 } 2796 2797 /** 2798 * ice_sysctl_speeds_to_aq_phy_types - Convert sysctl speed flags to AQ PHY flags 2799 * @sysctl_speeds: 16-bit sysctl speeds or AQ_LINK_SPEED flags 2800 * @phy_type_low: output parameter for lower AQ PHY flags 2801 * @phy_type_high: output parameter for higher AQ PHY flags 2802 * 2803 * Converts the given link speed flags into AQ PHY type flag sets appropriate 2804 * for use in a Set PHY Config command. 2805 */ 2806 static void 2807 ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, 2808 u64 *phy_type_high) 2809 { 2810 *phy_type_low = 0, *phy_type_high = 0; 2811 2812 if (sysctl_speeds & ICE_AQ_LINK_SPEED_100MB) 2813 *phy_type_low |= ICE_PHYS_100MB; 2814 if (sysctl_speeds & ICE_AQ_LINK_SPEED_1000MB) 2815 *phy_type_low |= ICE_PHYS_1000MB; 2816 if (sysctl_speeds & ICE_AQ_LINK_SPEED_2500MB) 2817 *phy_type_low |= ICE_PHYS_2500MB; 2818 if (sysctl_speeds & ICE_AQ_LINK_SPEED_5GB) 2819 *phy_type_low |= ICE_PHYS_5GB; 2820 if (sysctl_speeds & ICE_AQ_LINK_SPEED_10GB) 2821 *phy_type_low |= ICE_PHYS_10GB; 2822 if (sysctl_speeds & ICE_AQ_LINK_SPEED_25GB) 2823 *phy_type_low |= ICE_PHYS_25GB; 2824 if (sysctl_speeds & ICE_AQ_LINK_SPEED_40GB) 2825 *phy_type_low |= ICE_PHYS_40GB; 2826 if (sysctl_speeds & ICE_AQ_LINK_SPEED_50GB) 2827 *phy_type_low |= ICE_PHYS_50GB; 2828 if (sysctl_speeds & ICE_AQ_LINK_SPEED_100GB) { 2829 *phy_type_low |= ICE_PHYS_100GB_LOW; 2830 *phy_type_high |= ICE_PHYS_100GB_HIGH; 2831 } 2832 } 2833 2834 /** 2835 * @struct ice_phy_data 2836 * @brief PHY caps and link speeds 2837 * 2838 * Buffer providing report mode and user speeds; 2839 * returning intersection of PHY types and speeds. 2840 */ 2841 struct ice_phy_data { 2842 u64 phy_low_orig; /* PHY low quad from report */ 2843 u64 phy_high_orig; /* PHY high quad from report */ 2844 u64 phy_low_intr; /* PHY low quad intersection with user speeds */ 2845 u64 phy_high_intr; /* PHY high quad intersection with user speeds */ 2846 u16 user_speeds_orig; /* Input from caller - See ICE_AQ_LINK_SPEED_* */ 2847 u16 user_speeds_intr; /* Intersect with report speeds */ 2848 u8 report_mode; /* See ICE_AQC_REPORT_* */ 2849 }; 2850 2851 /** 2852 * ice_intersect_phy_types_and_speeds - Return intersection of link speeds 2853 * @sc: device private structure 2854 * @phy_data: device PHY data 2855 * 2856 * On read: Displays the currently supported speeds 2857 * On write: Sets the device's supported speeds 2858 * Valid input flags: see ICE_SYSCTL_HELP_ADVERTISE_SPEED 2859 */ 2860 static int 2861 ice_intersect_phy_types_and_speeds(struct ice_softc *sc, 2862 struct ice_phy_data *phy_data) 2863 { 2864 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 2865 const char *report_types[5] = { "w/o MEDIA", 2866 "w/MEDIA", 2867 "ACTIVE", 2868 "EDOOFUS", /* Not used */ 2869 "DFLT" }; 2870 struct ice_hw *hw = &sc->hw; 2871 struct ice_port_info *pi = hw->port_info; 2872 enum ice_status status; 2873 u16 report_speeds, temp_speeds; 2874 u8 report_type; 2875 bool apply_speed_filter = false; 2876 2877 switch (phy_data->report_mode) { 2878 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 2879 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 2880 case ICE_AQC_REPORT_ACTIVE_CFG: 2881 case ICE_AQC_REPORT_DFLT_CFG: 2882 report_type = phy_data->report_mode >> 1; 2883 break; 2884 default: 2885 device_printf(sc->dev, 2886 "%s: phy_data.report_mode \"%u\" doesn't exist\n", 2887 __func__, phy_data->report_mode); 2888 return (EINVAL); 2889 } 2890 2891 /* 0 is treated as "Auto"; the driver will handle selecting the 2892 * correct speeds. Including, in some cases, applying an override 2893 * if provided. 2894 */ 2895 if (phy_data->user_speeds_orig == 0) 2896 phy_data->user_speeds_orig = USHRT_MAX; 2897 else if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) 2898 apply_speed_filter = true; 2899 2900 status = ice_aq_get_phy_caps(pi, false, phy_data->report_mode, &pcaps, NULL); 2901 if (status != ICE_SUCCESS) { 2902 device_printf(sc->dev, 2903 "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", 2904 __func__, report_types[report_type], 2905 ice_status_str(status), 2906 ice_aq_str(sc->hw.adminq.sq_last_status)); 2907 return (EIO); 2908 } 2909 2910 phy_data->phy_low_orig = le64toh(pcaps.phy_type_low); 2911 phy_data->phy_high_orig = le64toh(pcaps.phy_type_high); 2912 report_speeds = ice_aq_phy_types_to_link_speeds(phy_data->phy_low_orig, 2913 phy_data->phy_high_orig); 2914 if (apply_speed_filter) { 2915 temp_speeds = ice_apply_supported_speed_filter(report_speeds); 2916 if ((phy_data->user_speeds_orig & temp_speeds) == 0) { 2917 device_printf(sc->dev, 2918 "User-specified speeds (\"0x%04X\") not supported\n", 2919 phy_data->user_speeds_orig); 2920 return (EINVAL); 2921 } 2922 report_speeds = temp_speeds; 2923 } 2924 ice_sysctl_speeds_to_aq_phy_types(phy_data->user_speeds_orig, 2925 &phy_data->phy_low_intr, &phy_data->phy_high_intr); 2926 phy_data->user_speeds_intr = phy_data->user_speeds_orig & report_speeds; 2927 phy_data->phy_low_intr &= phy_data->phy_low_orig; 2928 phy_data->phy_high_intr &= phy_data->phy_high_orig; 2929 2930 return (0); 2931 } 2932 2933 /** 2934 * ice_sysctl_advertise_speed - Display/change link speeds supported by port 2935 * @oidp: sysctl oid structure 2936 * @arg1: pointer to private data structure 2937 * @arg2: unused 2938 * @req: sysctl request pointer 2939 * 2940 * On read: Displays the currently supported speeds 2941 * On write: Sets the device's supported speeds 2942 * Valid input flags: see ICE_SYSCTL_HELP_ADVERTISE_SPEED 2943 */ 2944 static int 2945 ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS) 2946 { 2947 struct ice_softc *sc = (struct ice_softc *)arg1; 2948 struct ice_port_info *pi = sc->hw.port_info; 2949 struct ice_phy_data phy_data = { 0 }; 2950 device_t dev = sc->dev; 2951 u16 sysctl_speeds; 2952 int ret; 2953 2954 UNREFERENCED_PARAMETER(arg2); 2955 2956 if (ice_driver_is_detaching(sc)) 2957 return (ESHUTDOWN); 2958 2959 /* Get the current speeds from the adapter's "active" configuration. */ 2960 phy_data.report_mode = ICE_AQC_REPORT_ACTIVE_CFG; 2961 ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); 2962 if (ret) { 2963 /* Error message already printed within function */ 2964 return (ret); 2965 } 2966 2967 sysctl_speeds = phy_data.user_speeds_intr; 2968 2969 ret = sysctl_handle_16(oidp, &sysctl_speeds, 0, req); 2970 if ((ret) || (req->newptr == NULL)) 2971 return (ret); 2972 2973 if (sysctl_speeds > 0x7FF) { 2974 device_printf(dev, 2975 "%s: \"%u\" is outside of the range of acceptable values.\n", 2976 __func__, sysctl_speeds); 2977 return (EINVAL); 2978 } 2979 2980 pi->phy.curr_user_speed_req = sysctl_speeds; 2981 2982 /* Apply settings requested by user */ 2983 return ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS); 2984 } 2985 2986 #define ICE_SYSCTL_HELP_FEC_CONFIG \ 2987 "\nDisplay or set the port's requested FEC mode." \ 2988 "\n\tauto - " ICE_FEC_STRING_AUTO \ 2989 "\n\tfc - " ICE_FEC_STRING_BASER \ 2990 "\n\trs - " ICE_FEC_STRING_RS \ 2991 "\n\tnone - " ICE_FEC_STRING_NONE \ 2992 "\nEither of the left or right strings above can be used to set the requested mode." 2993 2994 /** 2995 * ice_sysctl_fec_config - Display/change the configured FEC mode 2996 * @oidp: sysctl oid structure 2997 * @arg1: pointer to private data structure 2998 * @arg2: unused 2999 * @req: sysctl request pointer 3000 * 3001 * On read: Displays the configured FEC mode 3002 * On write: Sets the device's FEC mode to the input string, if it's valid. 3003 * Valid input strings: see ICE_SYSCTL_HELP_FEC_CONFIG 3004 */ 3005 static int 3006 ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS) 3007 { 3008 struct ice_softc *sc = (struct ice_softc *)arg1; 3009 struct ice_port_info *pi = sc->hw.port_info; 3010 enum ice_fec_mode new_mode; 3011 device_t dev = sc->dev; 3012 char req_fec[32]; 3013 int ret; 3014 3015 UNREFERENCED_PARAMETER(arg2); 3016 3017 if (ice_driver_is_detaching(sc)) 3018 return (ESHUTDOWN); 3019 3020 bzero(req_fec, sizeof(req_fec)); 3021 strlcpy(req_fec, ice_requested_fec_mode(pi), sizeof(req_fec)); 3022 3023 ret = sysctl_handle_string(oidp, req_fec, sizeof(req_fec), req); 3024 if ((ret) || (req->newptr == NULL)) 3025 return (ret); 3026 3027 if (strcmp(req_fec, "auto") == 0 || 3028 strcmp(req_fec, ice_fec_str(ICE_FEC_AUTO)) == 0) { 3029 new_mode = ICE_FEC_AUTO; 3030 } else if (strcmp(req_fec, "fc") == 0 || 3031 strcmp(req_fec, ice_fec_str(ICE_FEC_BASER)) == 0) { 3032 new_mode = ICE_FEC_BASER; 3033 } else if (strcmp(req_fec, "rs") == 0 || 3034 strcmp(req_fec, ice_fec_str(ICE_FEC_RS)) == 0) { 3035 new_mode = ICE_FEC_RS; 3036 } else if (strcmp(req_fec, "none") == 0 || 3037 strcmp(req_fec, ice_fec_str(ICE_FEC_NONE)) == 0) { 3038 new_mode = ICE_FEC_NONE; 3039 } else { 3040 device_printf(dev, 3041 "%s: \"%s\" is not a valid FEC mode\n", 3042 __func__, req_fec); 3043 return (EINVAL); 3044 } 3045 3046 /* Cache user FEC mode for later link ups */ 3047 pi->phy.curr_user_fec_req = new_mode; 3048 3049 /* Apply settings requested by user */ 3050 return ice_apply_saved_phy_cfg(sc, ICE_APPLY_FEC); 3051 } 3052 3053 /** 3054 * ice_sysctl_negotiated_fec - Display the negotiated FEC mode on the link 3055 * @oidp: sysctl oid structure 3056 * @arg1: pointer to private data structure 3057 * @arg2: unused 3058 * @req: sysctl request pointer 3059 * 3060 * On read: Displays the negotiated FEC mode, in a string 3061 */ 3062 static int 3063 ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS) 3064 { 3065 struct ice_softc *sc = (struct ice_softc *)arg1; 3066 struct ice_hw *hw = &sc->hw; 3067 char neg_fec[32]; 3068 int ret; 3069 3070 UNREFERENCED_PARAMETER(arg2); 3071 3072 if (ice_driver_is_detaching(sc)) 3073 return (ESHUTDOWN); 3074 3075 /* Copy const string into a buffer to drop const qualifier */ 3076 bzero(neg_fec, sizeof(neg_fec)); 3077 strlcpy(neg_fec, ice_negotiated_fec_mode(hw->port_info), sizeof(neg_fec)); 3078 3079 ret = sysctl_handle_string(oidp, neg_fec, 0, req); 3080 if (req->newptr != NULL) 3081 return (EPERM); 3082 3083 return (ret); 3084 } 3085 3086 #define ICE_SYSCTL_HELP_FC_CONFIG \ 3087 "\nDisplay or set the port's advertised flow control mode.\n" \ 3088 "\t0 - " ICE_FC_STRING_NONE \ 3089 "\n\t1 - " ICE_FC_STRING_RX \ 3090 "\n\t2 - " ICE_FC_STRING_TX \ 3091 "\n\t3 - " ICE_FC_STRING_FULL \ 3092 "\nEither the numbers or the strings above can be used to set the advertised mode." 3093 3094 /** 3095 * ice_sysctl_fc_config - Display/change the advertised flow control mode 3096 * @oidp: sysctl oid structure 3097 * @arg1: pointer to private data structure 3098 * @arg2: unused 3099 * @req: sysctl request pointer 3100 * 3101 * On read: Displays the configured flow control mode 3102 * On write: Sets the device's flow control mode to the input, if it's valid. 3103 * Valid input strings: see ICE_SYSCTL_HELP_FC_CONFIG 3104 */ 3105 static int 3106 ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS) 3107 { 3108 struct ice_softc *sc = (struct ice_softc *)arg1; 3109 struct ice_port_info *pi = sc->hw.port_info; 3110 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3111 enum ice_fc_mode old_mode, new_mode; 3112 struct ice_hw *hw = &sc->hw; 3113 device_t dev = sc->dev; 3114 enum ice_status status; 3115 int ret, fc_num; 3116 bool mode_set = false; 3117 struct sbuf buf; 3118 char *fc_str_end; 3119 char fc_str[32]; 3120 3121 UNREFERENCED_PARAMETER(arg2); 3122 3123 if (ice_driver_is_detaching(sc)) 3124 return (ESHUTDOWN); 3125 3126 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3127 &pcaps, NULL); 3128 if (status != ICE_SUCCESS) { 3129 device_printf(dev, 3130 "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", 3131 __func__, ice_status_str(status), 3132 ice_aq_str(hw->adminq.sq_last_status)); 3133 return (EIO); 3134 } 3135 3136 /* Convert HW response format to SW enum value */ 3137 if ((pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && 3138 (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) 3139 old_mode = ICE_FC_FULL; 3140 else if (pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3141 old_mode = ICE_FC_TX_PAUSE; 3142 else if (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3143 old_mode = ICE_FC_RX_PAUSE; 3144 else 3145 old_mode = ICE_FC_NONE; 3146 3147 /* Create "old" string for output */ 3148 bzero(fc_str, sizeof(fc_str)); 3149 sbuf_new_for_sysctl(&buf, fc_str, sizeof(fc_str), req); 3150 sbuf_printf(&buf, "%d<%s>", old_mode, ice_fc_str(old_mode)); 3151 sbuf_finish(&buf); 3152 sbuf_delete(&buf); 3153 3154 ret = sysctl_handle_string(oidp, fc_str, sizeof(fc_str), req); 3155 if ((ret) || (req->newptr == NULL)) 3156 return (ret); 3157 3158 /* Try to parse input as a string, first */ 3159 if (strcasecmp(ice_fc_str(ICE_FC_FULL), fc_str) == 0) { 3160 new_mode = ICE_FC_FULL; 3161 mode_set = true; 3162 } 3163 else if (strcasecmp(ice_fc_str(ICE_FC_TX_PAUSE), fc_str) == 0) { 3164 new_mode = ICE_FC_TX_PAUSE; 3165 mode_set = true; 3166 } 3167 else if (strcasecmp(ice_fc_str(ICE_FC_RX_PAUSE), fc_str) == 0) { 3168 new_mode = ICE_FC_RX_PAUSE; 3169 mode_set = true; 3170 } 3171 else if (strcasecmp(ice_fc_str(ICE_FC_NONE), fc_str) == 0) { 3172 new_mode = ICE_FC_NONE; 3173 mode_set = true; 3174 } 3175 3176 /* 3177 * Then check if it's an integer, for compatibility with the method 3178 * used in older drivers. 3179 */ 3180 if (!mode_set) { 3181 fc_num = strtol(fc_str, &fc_str_end, 0); 3182 if (fc_str_end == fc_str) 3183 fc_num = -1; 3184 switch (fc_num) { 3185 case 3: 3186 new_mode = ICE_FC_FULL; 3187 break; 3188 case 2: 3189 new_mode = ICE_FC_TX_PAUSE; 3190 break; 3191 case 1: 3192 new_mode = ICE_FC_RX_PAUSE; 3193 break; 3194 case 0: 3195 new_mode = ICE_FC_NONE; 3196 break; 3197 default: 3198 device_printf(dev, 3199 "%s: \"%s\" is not a valid flow control mode\n", 3200 __func__, fc_str); 3201 return (EINVAL); 3202 } 3203 } 3204 3205 /* Set the flow control mode in FW */ 3206 pi->phy.curr_user_fc_req = new_mode; 3207 3208 /* Apply settings requested by user */ 3209 return ice_apply_saved_phy_cfg(sc, ICE_APPLY_FC); 3210 } 3211 3212 /** 3213 * ice_sysctl_negotiated_fc - Display currently negotiated FC mode 3214 * @oidp: sysctl oid structure 3215 * @arg1: pointer to private data structure 3216 * @arg2: unused 3217 * @req: sysctl request pointer 3218 * 3219 * On read: Displays the currently negotiated flow control settings. 3220 * 3221 * If link is not established, this will report ICE_FC_NONE, as no flow 3222 * control is negotiated while link is down. 3223 */ 3224 static int 3225 ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS) 3226 { 3227 struct ice_softc *sc = (struct ice_softc *)arg1; 3228 struct ice_port_info *pi = sc->hw.port_info; 3229 const char *negotiated_fc; 3230 3231 UNREFERENCED_PARAMETER(arg2); 3232 3233 if (ice_driver_is_detaching(sc)) 3234 return (ESHUTDOWN); 3235 3236 negotiated_fc = ice_flowcontrol_mode(pi); 3237 3238 return sysctl_handle_string(oidp, __DECONST(char *, negotiated_fc), 0, req); 3239 } 3240 3241 /** 3242 * __ice_sysctl_phy_type_handler - Display/change supported PHY types/speeds 3243 * @oidp: sysctl oid structure 3244 * @arg1: pointer to private data structure 3245 * @arg2: unused 3246 * @req: sysctl request pointer 3247 * @is_phy_type_high: if true, handle the high PHY type instead of the low PHY type 3248 * 3249 * Private handler for phy_type_high and phy_type_low sysctls. 3250 */ 3251 static int 3252 __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high) 3253 { 3254 struct ice_softc *sc = (struct ice_softc *)arg1; 3255 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3256 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3257 struct ice_hw *hw = &sc->hw; 3258 device_t dev = sc->dev; 3259 enum ice_status status; 3260 uint64_t types; 3261 int ret; 3262 3263 UNREFERENCED_PARAMETER(arg2); 3264 3265 if (ice_driver_is_detaching(sc)) 3266 return (ESHUTDOWN); 3267 3268 status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_ACTIVE_CFG, 3269 &pcaps, NULL); 3270 if (status != ICE_SUCCESS) { 3271 device_printf(dev, 3272 "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", 3273 __func__, ice_status_str(status), 3274 ice_aq_str(hw->adminq.sq_last_status)); 3275 return (EIO); 3276 } 3277 3278 if (is_phy_type_high) 3279 types = pcaps.phy_type_high; 3280 else 3281 types = pcaps.phy_type_low; 3282 3283 ret = sysctl_handle_64(oidp, &types, sizeof(types), req); 3284 if ((ret) || (req->newptr == NULL)) 3285 return (ret); 3286 3287 ice_copy_phy_caps_to_cfg(hw->port_info, &pcaps, &cfg); 3288 3289 if (is_phy_type_high) 3290 cfg.phy_type_high = types & hw->port_info->phy.phy_type_high; 3291 else 3292 cfg.phy_type_low = types & hw->port_info->phy.phy_type_low; 3293 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3294 3295 status = ice_aq_set_phy_cfg(hw, hw->port_info, &cfg, NULL); 3296 if (status != ICE_SUCCESS) { 3297 device_printf(dev, 3298 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 3299 __func__, ice_status_str(status), 3300 ice_aq_str(hw->adminq.sq_last_status)); 3301 return (EIO); 3302 } 3303 3304 return (0); 3305 3306 } 3307 3308 /** 3309 * ice_sysctl_phy_type_low - Display/change supported lower PHY types/speeds 3310 * @oidp: sysctl oid structure 3311 * @arg1: pointer to private data structure 3312 * @arg2: unused 3313 * @req: sysctl request pointer 3314 * 3315 * On read: Displays the currently supported lower PHY types 3316 * On write: Sets the device's supported low PHY types 3317 */ 3318 static int 3319 ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS) 3320 { 3321 return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, false); 3322 } 3323 3324 /** 3325 * ice_sysctl_phy_type_high - Display/change supported higher PHY types/speeds 3326 * @oidp: sysctl oid structure 3327 * @arg1: pointer to private data structure 3328 * @arg2: unused 3329 * @req: sysctl request pointer 3330 * 3331 * On read: Displays the currently supported higher PHY types 3332 * On write: Sets the device's supported high PHY types 3333 */ 3334 static int 3335 ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS) 3336 { 3337 return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, true); 3338 } 3339 3340 /** 3341 * ice_sysctl_phy_caps - Display response from Get PHY abililties 3342 * @oidp: sysctl oid structure 3343 * @arg1: pointer to private data structure 3344 * @arg2: unused 3345 * @req: sysctl request pointer 3346 * @report_mode: the mode to report 3347 * 3348 * On read: Display the response from Get PHY abillities with the given report 3349 * mode. 3350 */ 3351 static int 3352 ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode) 3353 { 3354 struct ice_softc *sc = (struct ice_softc *)arg1; 3355 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3356 struct ice_hw *hw = &sc->hw; 3357 struct ice_port_info *pi = hw->port_info; 3358 device_t dev = sc->dev; 3359 enum ice_status status; 3360 int ret; 3361 3362 UNREFERENCED_PARAMETER(arg2); 3363 3364 ret = priv_check(curthread, PRIV_DRIVER); 3365 if (ret) 3366 return (ret); 3367 3368 if (ice_driver_is_detaching(sc)) 3369 return (ESHUTDOWN); 3370 3371 status = ice_aq_get_phy_caps(pi, true, report_mode, &pcaps, NULL); 3372 if (status != ICE_SUCCESS) { 3373 device_printf(dev, 3374 "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", 3375 __func__, ice_status_str(status), 3376 ice_aq_str(hw->adminq.sq_last_status)); 3377 return (EIO); 3378 } 3379 3380 ret = sysctl_handle_opaque(oidp, &pcaps, sizeof(pcaps), req); 3381 if (req->newptr != NULL) 3382 return (EPERM); 3383 3384 return (ret); 3385 } 3386 3387 /** 3388 * ice_sysctl_phy_sw_caps - Display response from Get PHY abililties 3389 * @oidp: sysctl oid structure 3390 * @arg1: pointer to private data structure 3391 * @arg2: unused 3392 * @req: sysctl request pointer 3393 * 3394 * On read: Display the response from Get PHY abillities reporting the last 3395 * software configuration. 3396 */ 3397 static int 3398 ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS) 3399 { 3400 return ice_sysctl_phy_caps(oidp, arg1, arg2, req, 3401 ICE_AQC_REPORT_ACTIVE_CFG); 3402 } 3403 3404 /** 3405 * ice_sysctl_phy_nvm_caps - Display response from Get PHY abililties 3406 * @oidp: sysctl oid structure 3407 * @arg1: pointer to private data structure 3408 * @arg2: unused 3409 * @req: sysctl request pointer 3410 * 3411 * On read: Display the response from Get PHY abillities reporting the NVM 3412 * configuration. 3413 */ 3414 static int 3415 ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS) 3416 { 3417 return ice_sysctl_phy_caps(oidp, arg1, arg2, req, 3418 ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA); 3419 } 3420 3421 /** 3422 * ice_sysctl_phy_topo_caps - Display response from Get PHY abililties 3423 * @oidp: sysctl oid structure 3424 * @arg1: pointer to private data structure 3425 * @arg2: unused 3426 * @req: sysctl request pointer 3427 * 3428 * On read: Display the response from Get PHY abillities reporting the 3429 * topology configuration. 3430 */ 3431 static int 3432 ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS) 3433 { 3434 return ice_sysctl_phy_caps(oidp, arg1, arg2, req, 3435 ICE_AQC_REPORT_TOPO_CAP_MEDIA); 3436 } 3437 3438 /** 3439 * ice_sysctl_phy_link_status - Display response from Get Link Status 3440 * @oidp: sysctl oid structure 3441 * @arg1: pointer to private data structure 3442 * @arg2: unused 3443 * @req: sysctl request pointer 3444 * 3445 * On read: Display the response from firmware for the Get Link Status 3446 * request. 3447 */ 3448 static int 3449 ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS) 3450 { 3451 struct ice_aqc_get_link_status_data link_data = { 0 }; 3452 struct ice_softc *sc = (struct ice_softc *)arg1; 3453 struct ice_hw *hw = &sc->hw; 3454 struct ice_port_info *pi = hw->port_info; 3455 struct ice_aqc_get_link_status *resp; 3456 struct ice_aq_desc desc; 3457 device_t dev = sc->dev; 3458 enum ice_status status; 3459 int ret; 3460 3461 UNREFERENCED_PARAMETER(arg2); 3462 3463 /* 3464 * Ensure that only contexts with driver privilege are allowed to 3465 * access this information 3466 */ 3467 ret = priv_check(curthread, PRIV_DRIVER); 3468 if (ret) 3469 return (ret); 3470 3471 if (ice_driver_is_detaching(sc)) 3472 return (ESHUTDOWN); 3473 3474 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 3475 resp = &desc.params.get_link_status; 3476 resp->lport_num = pi->lport; 3477 3478 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), NULL); 3479 if (status != ICE_SUCCESS) { 3480 device_printf(dev, 3481 "%s: ice_aq_send_cmd failed; status %s, aq_err %s\n", 3482 __func__, ice_status_str(status), 3483 ice_aq_str(hw->adminq.sq_last_status)); 3484 return (EIO); 3485 } 3486 3487 ret = sysctl_handle_opaque(oidp, &link_data, sizeof(link_data), req); 3488 if (req->newptr != NULL) 3489 return (EPERM); 3490 3491 return (ret); 3492 } 3493 3494 /** 3495 * ice_sysctl_fw_cur_lldp_persist_status - Display current FW LLDP status 3496 * @oidp: sysctl oid structure 3497 * @arg1: pointer to private softc structure 3498 * @arg2: unused 3499 * @req: sysctl request pointer 3500 * 3501 * On read: Displays current persistent LLDP status. 3502 */ 3503 static int 3504 ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS) 3505 { 3506 struct ice_softc *sc = (struct ice_softc *)arg1; 3507 struct ice_hw *hw = &sc->hw; 3508 device_t dev = sc->dev; 3509 enum ice_status status; 3510 struct sbuf *sbuf; 3511 u32 lldp_state; 3512 3513 UNREFERENCED_PARAMETER(arg2); 3514 UNREFERENCED_PARAMETER(oidp); 3515 3516 if (ice_driver_is_detaching(sc)) 3517 return (ESHUTDOWN); 3518 3519 status = ice_get_cur_lldp_persist_status(hw, &lldp_state); 3520 if (status) { 3521 device_printf(dev, 3522 "Could not acquire current LLDP persistence status, err %s aq_err %s\n", 3523 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 3524 return (EIO); 3525 } 3526 3527 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3528 sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state)); 3529 sbuf_finish(sbuf); 3530 sbuf_delete(sbuf); 3531 3532 return (0); 3533 } 3534 3535 /** 3536 * ice_sysctl_fw_dflt_lldp_persist_status - Display default FW LLDP status 3537 * @oidp: sysctl oid structure 3538 * @arg1: pointer to private softc structure 3539 * @arg2: unused 3540 * @req: sysctl request pointer 3541 * 3542 * On read: Displays default persistent LLDP status. 3543 */ 3544 static int 3545 ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS) 3546 { 3547 struct ice_softc *sc = (struct ice_softc *)arg1; 3548 struct ice_hw *hw = &sc->hw; 3549 device_t dev = sc->dev; 3550 enum ice_status status; 3551 struct sbuf *sbuf; 3552 u32 lldp_state; 3553 3554 UNREFERENCED_PARAMETER(arg2); 3555 UNREFERENCED_PARAMETER(oidp); 3556 3557 if (ice_driver_is_detaching(sc)) 3558 return (ESHUTDOWN); 3559 3560 status = ice_get_dflt_lldp_persist_status(hw, &lldp_state); 3561 if (status) { 3562 device_printf(dev, 3563 "Could not acquire default LLDP persistence status, err %s aq_err %s\n", 3564 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 3565 return (EIO); 3566 } 3567 3568 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3569 sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state)); 3570 sbuf_finish(sbuf); 3571 sbuf_delete(sbuf); 3572 3573 return (0); 3574 } 3575 3576 #define ICE_SYSCTL_HELP_FW_LLDP_AGENT \ 3577 "\nDisplay or change FW LLDP agent state:" \ 3578 "\n\t0 - disabled" \ 3579 "\n\t1 - enabled" 3580 3581 /** 3582 * ice_sysctl_fw_lldp_agent - Display or change the FW LLDP agent status 3583 * @oidp: sysctl oid structure 3584 * @arg1: pointer to private softc structure 3585 * @arg2: unused 3586 * @req: sysctl request pointer 3587 * 3588 * On read: Displays whether the FW LLDP agent is running 3589 * On write: Persistently enables or disables the FW LLDP agent 3590 */ 3591 static int 3592 ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS) 3593 { 3594 struct ice_softc *sc = (struct ice_softc *)arg1; 3595 struct ice_hw *hw = &sc->hw; 3596 device_t dev = sc->dev; 3597 enum ice_status status; 3598 int ret; 3599 u32 old_state; 3600 u8 fw_lldp_enabled; 3601 bool retried_start_lldp = false; 3602 3603 UNREFERENCED_PARAMETER(arg2); 3604 3605 if (ice_driver_is_detaching(sc)) 3606 return (ESHUTDOWN); 3607 3608 status = ice_get_cur_lldp_persist_status(hw, &old_state); 3609 if (status) { 3610 device_printf(dev, 3611 "Could not acquire current LLDP persistence status, err %s aq_err %s\n", 3612 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 3613 return (EIO); 3614 } 3615 3616 if (old_state > ICE_LLDP_ADMINSTATUS_ENA_RXTX) { 3617 status = ice_get_dflt_lldp_persist_status(hw, &old_state); 3618 if (status) { 3619 device_printf(dev, 3620 "Could not acquire default LLDP persistence status, err %s aq_err %s\n", 3621 ice_status_str(status), 3622 ice_aq_str(hw->adminq.sq_last_status)); 3623 return (EIO); 3624 } 3625 } 3626 if (old_state == 0) 3627 fw_lldp_enabled = false; 3628 else 3629 fw_lldp_enabled = true; 3630 3631 ret = sysctl_handle_bool(oidp, &fw_lldp_enabled, 0, req); 3632 if ((ret) || (req->newptr == NULL)) 3633 return (ret); 3634 3635 if (old_state == 0 && fw_lldp_enabled == false) 3636 return (0); 3637 3638 if (old_state != 0 && fw_lldp_enabled == true) 3639 return (0); 3640 3641 if (fw_lldp_enabled == false) { 3642 status = ice_aq_stop_lldp(hw, true, true, NULL); 3643 /* EPERM is returned if the LLDP agent is already shutdown */ 3644 if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EPERM) { 3645 device_printf(dev, 3646 "%s: ice_aq_stop_lldp failed; status %s, aq_err %s\n", 3647 __func__, ice_status_str(status), 3648 ice_aq_str(hw->adminq.sq_last_status)); 3649 return (EIO); 3650 } 3651 ice_aq_set_dcb_parameters(hw, true, NULL); 3652 hw->port_info->qos_cfg.is_sw_lldp = true; 3653 ice_add_rx_lldp_filter(sc); 3654 } else { 3655 ice_del_rx_lldp_filter(sc); 3656 retry_start_lldp: 3657 status = ice_aq_start_lldp(hw, true, NULL); 3658 if (status) { 3659 switch (hw->adminq.sq_last_status) { 3660 /* EEXIST is returned if the LLDP agent is already started */ 3661 case ICE_AQ_RC_EEXIST: 3662 break; 3663 case ICE_AQ_RC_EAGAIN: 3664 /* Retry command after a 2 second wait */ 3665 if (retried_start_lldp == false) { 3666 retried_start_lldp = true; 3667 pause("slldp", ICE_START_LLDP_RETRY_WAIT); 3668 goto retry_start_lldp; 3669 } 3670 /* Fallthrough */ 3671 default: 3672 device_printf(dev, 3673 "%s: ice_aq_start_lldp failed; status %s, aq_err %s\n", 3674 __func__, ice_status_str(status), 3675 ice_aq_str(hw->adminq.sq_last_status)); 3676 return (EIO); 3677 } 3678 } 3679 hw->port_info->qos_cfg.is_sw_lldp = false; 3680 } 3681 3682 return (ret); 3683 } 3684 3685 /** 3686 * ice_add_device_sysctls - add device specific dynamic sysctls 3687 * @sc: device private structure 3688 * 3689 * Add per-device dynamic sysctls which show device configuration or enable 3690 * configuring device functionality. For tunable values which can be set prior 3691 * to load, see ice_add_device_tunables. 3692 * 3693 * This function depends on the sysctl layout setup by ice_add_device_tunables, 3694 * and likely should be called near the end of the attach process. 3695 */ 3696 void 3697 ice_add_device_sysctls(struct ice_softc *sc) 3698 { 3699 struct sysctl_oid *hw_node; 3700 device_t dev = sc->dev; 3701 3702 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 3703 struct sysctl_oid_list *ctx_list = 3704 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 3705 3706 SYSCTL_ADD_PROC(ctx, ctx_list, 3707 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, 3708 sc, 0, ice_sysctl_show_fw, "A", "Firmware version"); 3709 3710 SYSCTL_ADD_PROC(ctx, ctx_list, 3711 OID_AUTO, "pba_number", CTLTYPE_STRING | CTLFLAG_RD, 3712 sc, 0, ice_sysctl_pba_number, "A", "Product Board Assembly Number"); 3713 3714 SYSCTL_ADD_PROC(ctx, ctx_list, 3715 OID_AUTO, "ddp_version", CTLTYPE_STRING | CTLFLAG_RD, 3716 sc, 0, ice_sysctl_pkg_version, "A", "Active DDP package name and version"); 3717 3718 SYSCTL_ADD_PROC(ctx, ctx_list, 3719 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, 3720 sc, 0, ice_sysctl_current_speed, "A", "Current Port Link Speed"); 3721 3722 SYSCTL_ADD_PROC(ctx, ctx_list, 3723 OID_AUTO, "requested_fec", CTLTYPE_STRING | CTLFLAG_RW, 3724 sc, 0, ice_sysctl_fec_config, "A", ICE_SYSCTL_HELP_FEC_CONFIG); 3725 3726 SYSCTL_ADD_PROC(ctx, ctx_list, 3727 OID_AUTO, "negotiated_fec", CTLTYPE_STRING | CTLFLAG_RD, 3728 sc, 0, ice_sysctl_negotiated_fec, "A", "Current Negotiated FEC mode"); 3729 3730 SYSCTL_ADD_PROC(ctx, ctx_list, 3731 OID_AUTO, "fc", CTLTYPE_STRING | CTLFLAG_RW, 3732 sc, 0, ice_sysctl_fc_config, "A", ICE_SYSCTL_HELP_FC_CONFIG); 3733 3734 SYSCTL_ADD_PROC(ctx, ctx_list, 3735 OID_AUTO, "advertise_speed", CTLTYPE_U16 | CTLFLAG_RW, 3736 sc, 0, ice_sysctl_advertise_speed, "SU", ICE_SYSCTL_HELP_ADVERTISE_SPEED); 3737 3738 SYSCTL_ADD_PROC(ctx, ctx_list, 3739 OID_AUTO, "fw_lldp_agent", CTLTYPE_U8 | CTLFLAG_RWTUN, 3740 sc, 0, ice_sysctl_fw_lldp_agent, "CU", ICE_SYSCTL_HELP_FW_LLDP_AGENT); 3741 3742 /* Differentiate software and hardware statistics, by keeping hw stats 3743 * in their own node. This isn't in ice_add_device_tunables, because 3744 * we won't have any CTLFLAG_TUN sysctls under this node. 3745 */ 3746 hw_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "hw", CTLFLAG_RD, 3747 NULL, "Port Hardware Statistics"); 3748 3749 ice_add_sysctls_mac_stats(ctx, hw_node, &sc->stats.cur); 3750 3751 /* Add the main PF VSI stats now. Other VSIs will add their own stats 3752 * during creation 3753 */ 3754 ice_add_vsi_sysctls(&sc->pf_vsi); 3755 3756 /* Add sysctls related to debugging the device driver. This includes 3757 * sysctls which display additional internal driver state for use in 3758 * understanding what is happening within the driver. 3759 */ 3760 ice_add_debug_sysctls(sc); 3761 } 3762 3763 /** 3764 * @enum hmc_error_type 3765 * @brief enumeration of HMC errors 3766 * 3767 * Enumeration defining the possible HMC errors that might occur. 3768 */ 3769 enum hmc_error_type { 3770 HMC_ERR_PMF_INVALID = 0, 3771 HMC_ERR_VF_IDX_INVALID = 1, 3772 HMC_ERR_VF_PARENT_PF_INVALID = 2, 3773 /* 3 is reserved */ 3774 HMC_ERR_INDEX_TOO_BIG = 4, 3775 HMC_ERR_ADDRESS_TOO_LARGE = 5, 3776 HMC_ERR_SEGMENT_DESC_INVALID = 6, 3777 HMC_ERR_SEGMENT_DESC_TOO_SMALL = 7, 3778 HMC_ERR_PAGE_DESC_INVALID = 8, 3779 HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION = 9, 3780 /* 10 is reserved */ 3781 HMC_ERR_INVALID_OBJECT_TYPE = 11, 3782 /* 12 is reserved */ 3783 }; 3784 3785 /** 3786 * ice_log_hmc_error - Log an HMC error message 3787 * @hw: device hw structure 3788 * @dev: the device to pass to device_printf() 3789 * 3790 * Log a message when an HMC error interrupt is triggered. 3791 */ 3792 void 3793 ice_log_hmc_error(struct ice_hw *hw, device_t dev) 3794 { 3795 u32 info, data; 3796 u8 index, errtype, objtype; 3797 bool isvf; 3798 3799 info = rd32(hw, PFHMC_ERRORINFO); 3800 data = rd32(hw, PFHMC_ERRORDATA); 3801 3802 index = (u8)(info & PFHMC_ERRORINFO_PMF_INDEX_M); 3803 errtype = (u8)((info & PFHMC_ERRORINFO_HMC_ERROR_TYPE_M) >> 3804 PFHMC_ERRORINFO_HMC_ERROR_TYPE_S); 3805 objtype = (u8)((info & PFHMC_ERRORINFO_HMC_OBJECT_TYPE_M) >> 3806 PFHMC_ERRORINFO_HMC_OBJECT_TYPE_S); 3807 3808 isvf = info & PFHMC_ERRORINFO_PMF_ISVF_M; 3809 3810 device_printf(dev, "%s HMC Error detected on PMF index %d:\n", 3811 isvf ? "VF" : "PF", index); 3812 3813 device_printf(dev, "error type %d, object type %d, data 0x%08x\n", 3814 errtype, objtype, data); 3815 3816 switch (errtype) { 3817 case HMC_ERR_PMF_INVALID: 3818 device_printf(dev, "Private Memory Function is not valid\n"); 3819 break; 3820 case HMC_ERR_VF_IDX_INVALID: 3821 device_printf(dev, "Invalid Private Memory Function index for PE enabled VF\n"); 3822 break; 3823 case HMC_ERR_VF_PARENT_PF_INVALID: 3824 device_printf(dev, "Invalid parent PF for PE enabled VF\n"); 3825 break; 3826 case HMC_ERR_INDEX_TOO_BIG: 3827 device_printf(dev, "Object index too big\n"); 3828 break; 3829 case HMC_ERR_ADDRESS_TOO_LARGE: 3830 device_printf(dev, "Address extends beyond segment descriptor limit\n"); 3831 break; 3832 case HMC_ERR_SEGMENT_DESC_INVALID: 3833 device_printf(dev, "Segment descriptor is invalid\n"); 3834 break; 3835 case HMC_ERR_SEGMENT_DESC_TOO_SMALL: 3836 device_printf(dev, "Segment descriptor is too small\n"); 3837 break; 3838 case HMC_ERR_PAGE_DESC_INVALID: 3839 device_printf(dev, "Page descriptor is invalid\n"); 3840 break; 3841 case HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION: 3842 device_printf(dev, "Unsupported Request completion received from PCIe\n"); 3843 break; 3844 case HMC_ERR_INVALID_OBJECT_TYPE: 3845 device_printf(dev, "Invalid object type\n"); 3846 break; 3847 default: 3848 device_printf(dev, "Unknown HMC error\n"); 3849 } 3850 3851 /* Clear the error indication */ 3852 wr32(hw, PFHMC_ERRORINFO, 0); 3853 } 3854 3855 /** 3856 * @struct ice_sysctl_info 3857 * @brief sysctl information 3858 * 3859 * Structure used to simplify the process of defining the many similar 3860 * statistics sysctls. 3861 */ 3862 struct ice_sysctl_info { 3863 u64 *stat; 3864 const char *name; 3865 const char *description; 3866 }; 3867 3868 /** 3869 * ice_add_sysctls_eth_stats - Add sysctls for ethernet statistics 3870 * @ctx: sysctl ctx to use 3871 * @parent: the parent node to add sysctls under 3872 * @stats: the ethernet stats structure to source values from 3873 * 3874 * Adds statistics sysctls for the ethernet statistics of the MAC or a VSI. 3875 * Will add them under the parent node specified. 3876 * 3877 * Note that tx_errors is only meaningful for VSIs and not the global MAC/PF 3878 * statistics, so it is not included here. Similarly, rx_discards has different 3879 * descriptions for VSIs and MAC/PF stats, so it is also not included here. 3880 */ 3881 void 3882 ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, 3883 struct sysctl_oid *parent, 3884 struct ice_eth_stats *stats) 3885 { 3886 const struct ice_sysctl_info ctls[] = { 3887 /* Rx Stats */ 3888 { &stats->rx_bytes, "good_octets_rcvd", "Good Octets Received" }, 3889 { &stats->rx_unicast, "ucast_pkts_rcvd", "Unicast Packets Received" }, 3890 { &stats->rx_multicast, "mcast_pkts_rcvd", "Multicast Packets Received" }, 3891 { &stats->rx_broadcast, "bcast_pkts_rcvd", "Broadcast Packets Received" }, 3892 /* Tx Stats */ 3893 { &stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted" }, 3894 { &stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted" }, 3895 { &stats->tx_multicast, "mcast_pkts_txd", "Multicast Packets Transmitted" }, 3896 { &stats->tx_broadcast, "bcast_pkts_txd", "Broadcast Packets Transmitted" }, 3897 /* End */ 3898 { 0, 0, 0 } 3899 }; 3900 3901 struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); 3902 3903 const struct ice_sysctl_info *entry = ctls; 3904 while (entry->stat != 0) { 3905 SYSCTL_ADD_U64(ctx, parent_list, OID_AUTO, entry->name, 3906 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 3907 entry->description); 3908 entry++; 3909 } 3910 } 3911 3912 /** 3913 * ice_sysctl_tx_cso_stat - Display Tx checksum offload statistic 3914 * @oidp: sysctl oid structure 3915 * @arg1: pointer to private data structure 3916 * @arg2: Tx CSO stat to read 3917 * @req: sysctl request pointer 3918 * 3919 * On read: Sums the per-queue Tx CSO stat and displays it. 3920 */ 3921 static int 3922 ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS) 3923 { 3924 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 3925 enum ice_tx_cso_stat type = (enum ice_tx_cso_stat)arg2; 3926 u64 stat = 0; 3927 int i; 3928 3929 if (ice_driver_is_detaching(vsi->sc)) 3930 return (ESHUTDOWN); 3931 3932 /* Check that the type is valid */ 3933 if (type >= ICE_CSO_STAT_TX_COUNT) 3934 return (EDOOFUS); 3935 3936 /* Sum the stat for each of the Tx queues */ 3937 for (i = 0; i < vsi->num_tx_queues; i++) 3938 stat += vsi->tx_queues[i].stats.cso[type]; 3939 3940 return sysctl_handle_64(oidp, NULL, stat, req); 3941 } 3942 3943 /** 3944 * ice_sysctl_rx_cso_stat - Display Rx checksum offload statistic 3945 * @oidp: sysctl oid structure 3946 * @arg1: pointer to private data structure 3947 * @arg2: Rx CSO stat to read 3948 * @req: sysctl request pointer 3949 * 3950 * On read: Sums the per-queue Rx CSO stat and displays it. 3951 */ 3952 static int 3953 ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS) 3954 { 3955 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 3956 enum ice_rx_cso_stat type = (enum ice_rx_cso_stat)arg2; 3957 u64 stat = 0; 3958 int i; 3959 3960 if (ice_driver_is_detaching(vsi->sc)) 3961 return (ESHUTDOWN); 3962 3963 /* Check that the type is valid */ 3964 if (type >= ICE_CSO_STAT_RX_COUNT) 3965 return (EDOOFUS); 3966 3967 /* Sum the stat for each of the Rx queues */ 3968 for (i = 0; i < vsi->num_rx_queues; i++) 3969 stat += vsi->rx_queues[i].stats.cso[type]; 3970 3971 return sysctl_handle_64(oidp, NULL, stat, req); 3972 } 3973 3974 /** 3975 * ice_sysctl_rx_errors_stat - Display aggregate of Rx errors 3976 * @oidp: sysctl oid structure 3977 * @arg1: pointer to private data structure 3978 * @arg2: unused 3979 * @req: sysctl request pointer 3980 * 3981 * On read: Sums current values of Rx error statistics and 3982 * displays it. 3983 */ 3984 static int 3985 ice_sysctl_rx_errors_stat(SYSCTL_HANDLER_ARGS) 3986 { 3987 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 3988 struct ice_hw_port_stats *hs = &vsi->sc->stats.cur; 3989 u64 stat = 0; 3990 int i, type; 3991 3992 UNREFERENCED_PARAMETER(arg2); 3993 3994 if (ice_driver_is_detaching(vsi->sc)) 3995 return (ESHUTDOWN); 3996 3997 stat += hs->rx_undersize; 3998 stat += hs->rx_fragments; 3999 stat += hs->rx_oversize; 4000 stat += hs->rx_jabber; 4001 stat += hs->rx_len_errors; 4002 stat += hs->crc_errors; 4003 stat += hs->illegal_bytes; 4004 4005 /* Checksum error stats */ 4006 for (i = 0; i < vsi->num_rx_queues; i++) 4007 for (type = ICE_CSO_STAT_RX_IP4_ERR; 4008 type < ICE_CSO_STAT_RX_COUNT; 4009 type++) 4010 stat += vsi->rx_queues[i].stats.cso[type]; 4011 4012 return sysctl_handle_64(oidp, NULL, stat, req); 4013 } 4014 4015 /** 4016 * @struct ice_rx_cso_stat_info 4017 * @brief sysctl information for an Rx checksum offload statistic 4018 * 4019 * Structure used to simplify the process of defining the checksum offload 4020 * statistics. 4021 */ 4022 struct ice_rx_cso_stat_info { 4023 enum ice_rx_cso_stat type; 4024 const char *name; 4025 const char *description; 4026 }; 4027 4028 /** 4029 * @struct ice_tx_cso_stat_info 4030 * @brief sysctl information for a Tx checksum offload statistic 4031 * 4032 * Structure used to simplify the process of defining the checksum offload 4033 * statistics. 4034 */ 4035 struct ice_tx_cso_stat_info { 4036 enum ice_tx_cso_stat type; 4037 const char *name; 4038 const char *description; 4039 }; 4040 4041 /** 4042 * ice_add_sysctls_sw_stats - Add sysctls for software statistics 4043 * @vsi: pointer to the VSI to add sysctls for 4044 * @ctx: sysctl ctx to use 4045 * @parent: the parent node to add sysctls under 4046 * 4047 * Add statistics sysctls for software tracked statistics of a VSI. 4048 * 4049 * Currently this only adds checksum offload statistics, but more counters may 4050 * be added in the future. 4051 */ 4052 static void 4053 ice_add_sysctls_sw_stats(struct ice_vsi *vsi, 4054 struct sysctl_ctx_list *ctx, 4055 struct sysctl_oid *parent) 4056 { 4057 struct sysctl_oid *cso_node; 4058 struct sysctl_oid_list *cso_list; 4059 4060 /* Tx CSO Stats */ 4061 const struct ice_tx_cso_stat_info tx_ctls[] = { 4062 { ICE_CSO_STAT_TX_TCP, "tx_tcp", "Transmit TCP Packets marked for HW checksum" }, 4063 { ICE_CSO_STAT_TX_UDP, "tx_udp", "Transmit UDP Packets marked for HW checksum" }, 4064 { ICE_CSO_STAT_TX_SCTP, "tx_sctp", "Transmit SCTP Packets marked for HW checksum" }, 4065 { ICE_CSO_STAT_TX_IP4, "tx_ip4", "Transmit IPv4 Packets marked for HW checksum" }, 4066 { ICE_CSO_STAT_TX_IP6, "tx_ip6", "Transmit IPv6 Packets marked for HW checksum" }, 4067 { ICE_CSO_STAT_TX_L3_ERR, "tx_l3_err", "Transmit packets that driver failed to set L3 HW CSO bits for" }, 4068 { ICE_CSO_STAT_TX_L4_ERR, "tx_l4_err", "Transmit packets that driver failed to set L4 HW CSO bits for" }, 4069 /* End */ 4070 { ICE_CSO_STAT_TX_COUNT, 0, 0 } 4071 }; 4072 4073 /* Rx CSO Stats */ 4074 const struct ice_rx_cso_stat_info rx_ctls[] = { 4075 { ICE_CSO_STAT_RX_IP4_ERR, "rx_ip4_err", "Received packets with invalid IPv4 checksum indicated by HW" }, 4076 { ICE_CSO_STAT_RX_IP6_ERR, "rx_ip6_err", "Received IPv6 packets with extension headers" }, 4077 { ICE_CSO_STAT_RX_L3_ERR, "rx_l3_err", "Received packets with an unexpected invalid L3 checksum indicated by HW" }, 4078 { ICE_CSO_STAT_RX_TCP_ERR, "rx_tcp_err", "Received packets with invalid TCP checksum indicated by HW" }, 4079 { ICE_CSO_STAT_RX_UDP_ERR, "rx_udp_err", "Received packets with invalid UDP checksum indicated by HW" }, 4080 { ICE_CSO_STAT_RX_SCTP_ERR, "rx_sctp_err", "Received packets with invalid SCTP checksum indicated by HW" }, 4081 { ICE_CSO_STAT_RX_L4_ERR, "rx_l4_err", "Received packets with an unexpected invalid L4 checksum indicated by HW" }, 4082 /* End */ 4083 { ICE_CSO_STAT_RX_COUNT, 0, 0 } 4084 }; 4085 4086 struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); 4087 4088 /* Add a node for statistics tracked by software. */ 4089 cso_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "cso", CTLFLAG_RD, 4090 NULL, "Checksum offload Statistics"); 4091 cso_list = SYSCTL_CHILDREN(cso_node); 4092 4093 const struct ice_tx_cso_stat_info *tx_entry = tx_ctls; 4094 while (tx_entry->name && tx_entry->description) { 4095 SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, tx_entry->name, 4096 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4097 vsi, tx_entry->type, ice_sysctl_tx_cso_stat, "QU", 4098 tx_entry->description); 4099 tx_entry++; 4100 } 4101 4102 const struct ice_rx_cso_stat_info *rx_entry = rx_ctls; 4103 while (rx_entry->name && rx_entry->description) { 4104 SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, rx_entry->name, 4105 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4106 vsi, rx_entry->type, ice_sysctl_rx_cso_stat, "QU", 4107 rx_entry->description); 4108 rx_entry++; 4109 } 4110 } 4111 4112 /** 4113 * ice_add_vsi_sysctls - Add sysctls for a VSI 4114 * @vsi: pointer to VSI structure 4115 * 4116 * Add various sysctls for a given VSI. 4117 */ 4118 void 4119 ice_add_vsi_sysctls(struct ice_vsi *vsi) 4120 { 4121 struct sysctl_ctx_list *ctx = &vsi->ctx; 4122 struct sysctl_oid *hw_node, *sw_node; 4123 struct sysctl_oid_list *vsi_list, *hw_list, *sw_list; 4124 4125 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 4126 4127 /* Keep hw stats in their own node. */ 4128 hw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "hw", CTLFLAG_RD, 4129 NULL, "VSI Hardware Statistics"); 4130 hw_list = SYSCTL_CHILDREN(hw_node); 4131 4132 /* Add the ethernet statistics for this VSI */ 4133 ice_add_sysctls_eth_stats(ctx, hw_node, &vsi->hw_stats.cur); 4134 4135 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_discards", 4136 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_discards, 4137 0, "Discarded Rx Packets (see rx_errors or rx_no_desc)"); 4138 4139 SYSCTL_ADD_PROC(ctx, hw_list, OID_AUTO, "rx_errors", 4140 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4141 vsi, 0, ice_sysctl_rx_errors_stat, "QU", 4142 "Aggregate of all Rx errors"); 4143 4144 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_no_desc", 4145 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_no_desc, 4146 0, "Rx Packets Discarded Due To Lack Of Descriptors"); 4147 4148 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "tx_errors", 4149 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.tx_errors, 4150 0, "Tx Packets Discarded Due To Error"); 4151 4152 /* Add a node for statistics tracked by software. */ 4153 sw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "sw", CTLFLAG_RD, 4154 NULL, "VSI Software Statistics"); 4155 sw_list = SYSCTL_CHILDREN(sw_node); 4156 4157 ice_add_sysctls_sw_stats(vsi, ctx, sw_node); 4158 } 4159 4160 /** 4161 * ice_add_sysctls_mac_stats - Add sysctls for global MAC statistics 4162 * @ctx: the sysctl ctx to use 4163 * @parent: parent node to add the sysctls under 4164 * @stats: the hw ports stat structure to pull values from 4165 * 4166 * Add global MAC statistics sysctls. 4167 */ 4168 void 4169 ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 4170 struct sysctl_oid *parent, 4171 struct ice_hw_port_stats *stats) 4172 { 4173 struct sysctl_oid *mac_node; 4174 struct sysctl_oid_list *parent_list, *mac_list; 4175 4176 parent_list = SYSCTL_CHILDREN(parent); 4177 4178 mac_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "mac", CTLFLAG_RD, 4179 NULL, "Mac Hardware Statistics"); 4180 mac_list = SYSCTL_CHILDREN(mac_node); 4181 4182 /* add the common ethernet statistics */ 4183 ice_add_sysctls_eth_stats(ctx, mac_node, &stats->eth); 4184 4185 const struct ice_sysctl_info ctls[] = { 4186 /* Packet Reception Stats */ 4187 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 4188 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 4189 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 4190 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 4191 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 4192 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 4193 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 4194 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 4195 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 4196 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 4197 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 4198 {&stats->rx_len_errors, "rx_length_errors", "Receive Length Errors"}, 4199 {&stats->eth.rx_discards, "rx_discards", 4200 "Discarded Rx Packets by Port (shortage of storage space)"}, 4201 /* Packet Transmission Stats */ 4202 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 4203 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 4204 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 4205 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 4206 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 4207 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 4208 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 4209 {&stats->tx_dropped_link_down, "tx_dropped", "Tx Dropped Due To Link Down"}, 4210 /* Flow control */ 4211 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 4212 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 4213 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 4214 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 4215 /* Other */ 4216 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 4217 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 4218 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 4219 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 4220 /* End */ 4221 { 0, 0, 0 } 4222 }; 4223 4224 const struct ice_sysctl_info *entry = ctls; 4225 while (entry->stat != 0) { 4226 SYSCTL_ADD_U64(ctx, mac_list, OID_AUTO, entry->name, 4227 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 4228 entry->description); 4229 entry++; 4230 } 4231 } 4232 4233 /** 4234 * ice_configure_misc_interrupts - enable 'other' interrupt causes 4235 * @sc: pointer to device private softc 4236 * 4237 * Enable various "other" interrupt causes, and associate them to interrupt 0, 4238 * which is our administrative interrupt. 4239 */ 4240 void 4241 ice_configure_misc_interrupts(struct ice_softc *sc) 4242 { 4243 struct ice_hw *hw = &sc->hw; 4244 u32 val; 4245 4246 /* Read the OICR register to clear it */ 4247 rd32(hw, PFINT_OICR); 4248 4249 /* Enable useful "other" interrupt causes */ 4250 val = (PFINT_OICR_ECC_ERR_M | 4251 PFINT_OICR_MAL_DETECT_M | 4252 PFINT_OICR_GRST_M | 4253 PFINT_OICR_PCI_EXCEPTION_M | 4254 PFINT_OICR_VFLR_M | 4255 PFINT_OICR_HMC_ERR_M | 4256 PFINT_OICR_PE_CRITERR_M); 4257 4258 wr32(hw, PFINT_OICR_ENA, val); 4259 4260 /* Note that since we're using MSI-X index 0, and ITR index 0, we do 4261 * not explicitly program them when writing to the PFINT_*_CTL 4262 * registers. Nevertheless, these writes are associating the 4263 * interrupts with the ITR 0 vector 4264 */ 4265 4266 /* Associate the OICR interrupt with ITR 0, and enable it */ 4267 wr32(hw, PFINT_OICR_CTL, PFINT_OICR_CTL_CAUSE_ENA_M); 4268 4269 /* Associate the Mailbox interrupt with ITR 0, and enable it */ 4270 wr32(hw, PFINT_MBX_CTL, PFINT_MBX_CTL_CAUSE_ENA_M); 4271 4272 /* Associate the AdminQ interrupt with ITR 0, and enable it */ 4273 wr32(hw, PFINT_FW_CTL, PFINT_FW_CTL_CAUSE_ENA_M); 4274 } 4275 4276 /** 4277 * ice_filter_is_mcast - Check if info is a multicast filter 4278 * @vsi: vsi structure addresses are targeted towards 4279 * @info: filter info 4280 * 4281 * @returns true if the provided info is a multicast filter, and false 4282 * otherwise. 4283 */ 4284 static bool 4285 ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info) 4286 { 4287 const u8 *addr = info->l_data.mac.mac_addr; 4288 4289 /* 4290 * Check if this info matches a multicast filter added by 4291 * ice_add_mac_to_list 4292 */ 4293 if ((info->flag == ICE_FLTR_TX) && 4294 (info->src_id == ICE_SRC_ID_VSI) && 4295 (info->lkup_type == ICE_SW_LKUP_MAC) && 4296 (info->vsi_handle == vsi->idx) && 4297 ETHER_IS_MULTICAST(addr) && !ETHER_IS_BROADCAST(addr)) 4298 return true; 4299 4300 return false; 4301 } 4302 4303 /** 4304 * @struct ice_mcast_sync_data 4305 * @brief data used by ice_sync_one_mcast_filter function 4306 * 4307 * Structure used to store data needed for processing by the 4308 * ice_sync_one_mcast_filter. This structure contains a linked list of filters 4309 * to be added, an error indication, and a pointer to the device softc. 4310 */ 4311 struct ice_mcast_sync_data { 4312 struct ice_list_head add_list; 4313 struct ice_softc *sc; 4314 int err; 4315 }; 4316 4317 /** 4318 * ice_sync_one_mcast_filter - Check if we need to program the filter 4319 * @p: void pointer to algorithm data 4320 * @sdl: link level socket address 4321 * @count: unused count value 4322 * 4323 * Called by if_foreach_llmaddr to operate on each filter in the ifp filter 4324 * list. For the given address, search our internal list to see if we have 4325 * found the filter. If not, add it to our list of filters that need to be 4326 * programmed. 4327 * 4328 * @returns (1) if we've actually setup the filter to be added 4329 */ 4330 static u_int 4331 ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, 4332 u_int __unused count) 4333 { 4334 struct ice_mcast_sync_data *data = (struct ice_mcast_sync_data *)p; 4335 struct ice_softc *sc = data->sc; 4336 struct ice_hw *hw = &sc->hw; 4337 struct ice_switch_info *sw = hw->switch_info; 4338 const u8 *sdl_addr = (const u8 *)LLADDR(sdl); 4339 struct ice_fltr_mgmt_list_entry *itr; 4340 struct ice_list_head *rules; 4341 int err; 4342 4343 rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 4344 4345 /* 4346 * If a previous filter already indicated an error, there is no need 4347 * for us to finish processing the rest of the filters. 4348 */ 4349 if (data->err) 4350 return (0); 4351 4352 /* See if this filter has already been programmed */ 4353 LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) { 4354 struct ice_fltr_info *info = &itr->fltr_info; 4355 const u8 *addr = info->l_data.mac.mac_addr; 4356 4357 /* Only check multicast filters */ 4358 if (!ice_filter_is_mcast(&sc->pf_vsi, info)) 4359 continue; 4360 4361 /* 4362 * If this filter matches, mark the internal filter as 4363 * "found", and exit. 4364 */ 4365 if (bcmp(addr, sdl_addr, ETHER_ADDR_LEN) == 0) { 4366 itr->marker = ICE_FLTR_FOUND; 4367 return (1); 4368 } 4369 } 4370 4371 /* 4372 * If we failed to locate the filter in our internal list, we need to 4373 * place it into our add list. 4374 */ 4375 err = ice_add_mac_to_list(&sc->pf_vsi, &data->add_list, sdl_addr, 4376 ICE_FWD_TO_VSI); 4377 if (err) { 4378 device_printf(sc->dev, 4379 "Failed to place MAC %6D onto add list, err %s\n", 4380 sdl_addr, ":", ice_err_str(err)); 4381 data->err = err; 4382 4383 return (0); 4384 } 4385 4386 return (1); 4387 } 4388 4389 /** 4390 * ice_sync_multicast_filters - Synchronize OS and internal filter list 4391 * @sc: device private structure 4392 * 4393 * Called in response to SIOCDELMULTI to synchronize the operating system 4394 * multicast address list with the internal list of filters programmed to 4395 * firmware. 4396 * 4397 * Works in one phase to find added and deleted filters using a marker bit on 4398 * the internal list. 4399 * 4400 * First, a loop over the internal list clears the marker bit. Second, for 4401 * each filter in the ifp list is checked. If we find it in the internal list, 4402 * the marker bit is set. Otherwise, the filter is added to the add list. 4403 * Third, a loop over the internal list determines if any filters have not 4404 * been found. Each of these is added to the delete list. Finally, the add and 4405 * delete lists are programmed to firmware to update the filters. 4406 * 4407 * @returns zero on success or an integer error code on failure. 4408 */ 4409 int 4410 ice_sync_multicast_filters(struct ice_softc *sc) 4411 { 4412 struct ice_hw *hw = &sc->hw; 4413 struct ice_switch_info *sw = hw->switch_info; 4414 struct ice_fltr_mgmt_list_entry *itr; 4415 struct ice_mcast_sync_data data = {}; 4416 struct ice_list_head *rules, remove_list; 4417 enum ice_status status; 4418 int err = 0; 4419 4420 INIT_LIST_HEAD(&data.add_list); 4421 INIT_LIST_HEAD(&remove_list); 4422 data.sc = sc; 4423 data.err = 0; 4424 4425 rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 4426 4427 /* Acquire the lock for the entire duration */ 4428 ice_acquire_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4429 4430 /* (1) Reset the marker state for all filters */ 4431 LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) 4432 itr->marker = ICE_FLTR_NOT_FOUND; 4433 4434 /* (2) determine which filters need to be added and removed */ 4435 if_foreach_llmaddr(sc->ifp, ice_sync_one_mcast_filter, (void *)&data); 4436 if (data.err) { 4437 /* ice_sync_one_mcast_filter already prints an error */ 4438 err = data.err; 4439 ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4440 goto free_filter_lists; 4441 } 4442 4443 LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) { 4444 struct ice_fltr_info *info = &itr->fltr_info; 4445 const u8 *addr = info->l_data.mac.mac_addr; 4446 4447 /* Only check multicast filters */ 4448 if (!ice_filter_is_mcast(&sc->pf_vsi, info)) 4449 continue; 4450 4451 /* 4452 * If the filter is not marked as found, then it must no 4453 * longer be in the ifp address list, so we need to remove it. 4454 */ 4455 if (itr->marker == ICE_FLTR_NOT_FOUND) { 4456 err = ice_add_mac_to_list(&sc->pf_vsi, &remove_list, 4457 addr, ICE_FWD_TO_VSI); 4458 if (err) { 4459 device_printf(sc->dev, 4460 "Failed to place MAC %6D onto remove list, err %s\n", 4461 addr, ":", ice_err_str(err)); 4462 ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4463 goto free_filter_lists; 4464 } 4465 } 4466 } 4467 4468 ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4469 4470 status = ice_add_mac(hw, &data.add_list); 4471 if (status) { 4472 device_printf(sc->dev, 4473 "Could not add new MAC filters, err %s aq_err %s\n", 4474 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 4475 err = (EIO); 4476 goto free_filter_lists; 4477 } 4478 4479 status = ice_remove_mac(hw, &remove_list); 4480 if (status) { 4481 device_printf(sc->dev, 4482 "Could not remove old MAC filters, err %s aq_err %s\n", 4483 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 4484 err = (EIO); 4485 goto free_filter_lists; 4486 } 4487 4488 free_filter_lists: 4489 ice_free_fltr_list(&data.add_list); 4490 ice_free_fltr_list(&remove_list); 4491 4492 return (err); 4493 } 4494 4495 /** 4496 * ice_add_vlan_hw_filter - Add a VLAN filter for a given VSI 4497 * @vsi: The VSI to add the filter for 4498 * @vid: VLAN to add 4499 * 4500 * Programs a HW filter so that the given VSI will receive the specified VLAN. 4501 */ 4502 enum ice_status 4503 ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid) 4504 { 4505 struct ice_hw *hw = &vsi->sc->hw; 4506 struct ice_list_head vlan_list; 4507 struct ice_fltr_list_entry vlan_entry; 4508 4509 INIT_LIST_HEAD(&vlan_list); 4510 memset(&vlan_entry, 0, sizeof(vlan_entry)); 4511 4512 vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 4513 vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 4514 vlan_entry.fltr_info.flag = ICE_FLTR_TX; 4515 vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI; 4516 vlan_entry.fltr_info.vsi_handle = vsi->idx; 4517 vlan_entry.fltr_info.l_data.vlan.vlan_id = vid; 4518 4519 LIST_ADD(&vlan_entry.list_entry, &vlan_list); 4520 4521 return ice_add_vlan(hw, &vlan_list); 4522 } 4523 4524 /** 4525 * ice_remove_vlan_hw_filter - Remove a VLAN filter for a given VSI 4526 * @vsi: The VSI to add the filter for 4527 * @vid: VLAN to remove 4528 * 4529 * Removes a previously programmed HW filter for the specified VSI. 4530 */ 4531 enum ice_status 4532 ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid) 4533 { 4534 struct ice_hw *hw = &vsi->sc->hw; 4535 struct ice_list_head vlan_list; 4536 struct ice_fltr_list_entry vlan_entry; 4537 4538 INIT_LIST_HEAD(&vlan_list); 4539 memset(&vlan_entry, 0, sizeof(vlan_entry)); 4540 4541 vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 4542 vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 4543 vlan_entry.fltr_info.flag = ICE_FLTR_TX; 4544 vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI; 4545 vlan_entry.fltr_info.vsi_handle = vsi->idx; 4546 vlan_entry.fltr_info.l_data.vlan.vlan_id = vid; 4547 4548 LIST_ADD(&vlan_entry.list_entry, &vlan_list); 4549 4550 return ice_remove_vlan(hw, &vlan_list); 4551 } 4552 4553 #define ICE_SYSCTL_HELP_RX_ITR \ 4554 "\nControl Rx interrupt throttle rate." \ 4555 "\n\t0-8160 - sets interrupt rate in usecs" \ 4556 "\n\t -1 - reset the Rx itr to default" 4557 4558 /** 4559 * ice_sysctl_rx_itr - Display or change the Rx ITR for a VSI 4560 * @oidp: sysctl oid structure 4561 * @arg1: pointer to private data structure 4562 * @arg2: unused 4563 * @req: sysctl request pointer 4564 * 4565 * On read: Displays the current Rx ITR value 4566 * on write: Sets the Rx ITR value, reconfiguring device if it is up 4567 */ 4568 static int 4569 ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS) 4570 { 4571 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4572 struct ice_softc *sc = vsi->sc; 4573 int increment, ret; 4574 4575 UNREFERENCED_PARAMETER(arg2); 4576 4577 if (ice_driver_is_detaching(sc)) 4578 return (ESHUTDOWN); 4579 4580 ret = sysctl_handle_16(oidp, &vsi->rx_itr, 0, req); 4581 if ((ret) || (req->newptr == NULL)) 4582 return (ret); 4583 4584 if (vsi->rx_itr < 0) 4585 vsi->rx_itr = ICE_DFLT_RX_ITR; 4586 if (vsi->rx_itr > ICE_ITR_MAX) 4587 vsi->rx_itr = ICE_ITR_MAX; 4588 4589 /* Assume 2usec increment if it hasn't been loaded yet */ 4590 increment = sc->hw.itr_gran ? : 2; 4591 4592 /* We need to round the value to the hardware's ITR granularity */ 4593 vsi->rx_itr = (vsi->rx_itr / increment ) * increment; 4594 4595 /* If the driver has finished initializing, then we need to reprogram 4596 * the ITR registers now. Otherwise, they will be programmed during 4597 * driver initialization. 4598 */ 4599 if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) 4600 ice_configure_rx_itr(vsi); 4601 4602 return (0); 4603 } 4604 4605 #define ICE_SYSCTL_HELP_TX_ITR \ 4606 "\nControl Tx interrupt throttle rate." \ 4607 "\n\t0-8160 - sets interrupt rate in usecs" \ 4608 "\n\t -1 - reset the Tx itr to default" 4609 4610 /** 4611 * ice_sysctl_tx_itr - Display or change the Tx ITR for a VSI 4612 * @oidp: sysctl oid structure 4613 * @arg1: pointer to private data structure 4614 * @arg2: unused 4615 * @req: sysctl request pointer 4616 * 4617 * On read: Displays the current Tx ITR value 4618 * on write: Sets the Tx ITR value, reconfiguring device if it is up 4619 */ 4620 static int 4621 ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS) 4622 { 4623 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4624 struct ice_softc *sc = vsi->sc; 4625 int increment, ret; 4626 4627 UNREFERENCED_PARAMETER(arg2); 4628 4629 if (ice_driver_is_detaching(sc)) 4630 return (ESHUTDOWN); 4631 4632 ret = sysctl_handle_16(oidp, &vsi->tx_itr, 0, req); 4633 if ((ret) || (req->newptr == NULL)) 4634 return (ret); 4635 4636 /* Allow configuring a negative value to reset to the default */ 4637 if (vsi->tx_itr < 0) 4638 vsi->tx_itr = ICE_DFLT_TX_ITR; 4639 if (vsi->tx_itr > ICE_ITR_MAX) 4640 vsi->tx_itr = ICE_ITR_MAX; 4641 4642 /* Assume 2usec increment if it hasn't been loaded yet */ 4643 increment = sc->hw.itr_gran ? : 2; 4644 4645 /* We need to round the value to the hardware's ITR granularity */ 4646 vsi->tx_itr = (vsi->tx_itr / increment ) * increment; 4647 4648 /* If the driver has finished initializing, then we need to reprogram 4649 * the ITR registers now. Otherwise, they will be programmed during 4650 * driver initialization. 4651 */ 4652 if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) 4653 ice_configure_tx_itr(vsi); 4654 4655 return (0); 4656 } 4657 4658 /** 4659 * ice_add_vsi_tunables - Add tunables and nodes for a VSI 4660 * @vsi: pointer to VSI structure 4661 * @parent: parent node to add the tunables under 4662 * 4663 * Create a sysctl context for the VSI, so that sysctls for the VSI can be 4664 * dynamically removed upon VSI removal. 4665 * 4666 * Add various tunables and set up the basic node structure for the VSI. Must 4667 * be called *prior* to ice_add_vsi_sysctls. It should be called as soon as 4668 * possible after the VSI memory is initialized. 4669 * 4670 * VSI specific sysctls with CTLFLAG_TUN should be initialized here so that 4671 * their values can be read from loader.conf prior to their first use in the 4672 * driver. 4673 */ 4674 void 4675 ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent) 4676 { 4677 struct sysctl_oid_list *vsi_list; 4678 char vsi_name[32], vsi_desc[32]; 4679 4680 struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); 4681 4682 /* Initialize the sysctl context for this VSI */ 4683 sysctl_ctx_init(&vsi->ctx); 4684 4685 /* Add a node to collect this VSI's statistics together */ 4686 snprintf(vsi_name, sizeof(vsi_name), "%u", vsi->idx); 4687 snprintf(vsi_desc, sizeof(vsi_desc), "VSI %u", vsi->idx); 4688 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->ctx, parent_list, OID_AUTO, vsi_name, 4689 CTLFLAG_RD, NULL, vsi_desc); 4690 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 4691 4692 vsi->rx_itr = ICE_DFLT_TX_ITR; 4693 SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "rx_itr", 4694 CTLTYPE_S16 | CTLFLAG_RWTUN, 4695 vsi, 0, ice_sysctl_rx_itr, "S", 4696 ICE_SYSCTL_HELP_RX_ITR); 4697 4698 vsi->tx_itr = ICE_DFLT_TX_ITR; 4699 SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "tx_itr", 4700 CTLTYPE_S16 | CTLFLAG_RWTUN, 4701 vsi, 0, ice_sysctl_tx_itr, "S", 4702 ICE_SYSCTL_HELP_TX_ITR); 4703 } 4704 4705 /** 4706 * ice_del_vsi_sysctl_ctx - Delete the sysctl context(s) of a VSI 4707 * @vsi: the VSI to remove contexts for 4708 * 4709 * Free the context for the VSI sysctls. This includes the main context, as 4710 * well as the per-queue sysctls. 4711 */ 4712 void 4713 ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi) 4714 { 4715 device_t dev = vsi->sc->dev; 4716 int err; 4717 4718 if (vsi->vsi_node) { 4719 err = sysctl_ctx_free(&vsi->ctx); 4720 if (err) 4721 device_printf(dev, "failed to free VSI %d sysctl context, err %s\n", 4722 vsi->idx, ice_err_str(err)); 4723 vsi->vsi_node = NULL; 4724 } 4725 } 4726 4727 /** 4728 * ice_add_device_tunables - Add early tunable sysctls and sysctl nodes 4729 * @sc: device private structure 4730 * 4731 * Add per-device dynamic tunable sysctls, and setup the general sysctl trees 4732 * for re-use by ice_add_device_sysctls. 4733 * 4734 * In order for the sysctl fields to be initialized before use, this function 4735 * should be called as early as possible during attach activities. 4736 * 4737 * Any non-global sysctl marked as CTLFLAG_TUN should likely be initialized 4738 * here in this function, rather than later in ice_add_device_sysctls. 4739 * 4740 * To make things easier, this function is also expected to setup the various 4741 * sysctl nodes in addition to tunables so that other sysctls which can't be 4742 * initialized early can hook into the same nodes. 4743 */ 4744 void 4745 ice_add_device_tunables(struct ice_softc *sc) 4746 { 4747 device_t dev = sc->dev; 4748 4749 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 4750 struct sysctl_oid_list *ctx_list = 4751 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 4752 4753 sc->enable_health_events = ice_enable_health_events; 4754 4755 SYSCTL_ADD_BOOL(ctx, ctx_list, OID_AUTO, "enable_health_events", 4756 CTLFLAG_RDTUN, &sc->enable_health_events, 0, 4757 "Enable FW health event reporting for this PF"); 4758 4759 /* Add a node to track VSI sysctls. Keep track of the node in the 4760 * softc so that we can hook other sysctls into it later. This 4761 * includes both the VSI statistics, as well as potentially dynamic 4762 * VSIs in the future. 4763 */ 4764 4765 sc->vsi_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "vsi", 4766 CTLFLAG_RD, NULL, "VSI Configuration and Statistics"); 4767 4768 /* Add debug tunables */ 4769 ice_add_debug_tunables(sc); 4770 } 4771 4772 /** 4773 * ice_sysctl_dump_mac_filters - Dump a list of all HW MAC Filters 4774 * @oidp: sysctl oid structure 4775 * @arg1: pointer to private data structure 4776 * @arg2: unused 4777 * @req: sysctl request pointer 4778 * 4779 * Callback for "mac_filters" sysctl to dump the programmed MAC filters. 4780 */ 4781 static int 4782 ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS) 4783 { 4784 struct ice_softc *sc = (struct ice_softc *)arg1; 4785 struct ice_hw *hw = &sc->hw; 4786 struct ice_switch_info *sw = hw->switch_info; 4787 struct ice_fltr_mgmt_list_entry *fm_entry; 4788 struct ice_list_head *rule_head; 4789 struct ice_lock *rule_lock; 4790 struct ice_fltr_info *fi; 4791 struct sbuf *sbuf; 4792 int ret; 4793 4794 UNREFERENCED_PARAMETER(oidp); 4795 UNREFERENCED_PARAMETER(arg2); 4796 4797 if (ice_driver_is_detaching(sc)) 4798 return (ESHUTDOWN); 4799 4800 /* Wire the old buffer so we can take a non-sleepable lock */ 4801 ret = sysctl_wire_old_buffer(req, 0); 4802 if (ret) 4803 return (ret); 4804 4805 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4806 4807 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 4808 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 4809 4810 sbuf_printf(sbuf, "MAC Filter List"); 4811 4812 ice_acquire_lock(rule_lock); 4813 4814 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 4815 fi = &fm_entry->fltr_info; 4816 4817 sbuf_printf(sbuf, 4818 "\nmac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %d", 4819 fi->l_data.mac.mac_addr, ":", fi->vsi_handle, 4820 ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, 4821 ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); 4822 4823 /* if we have a vsi_list_info, print some information about that */ 4824 if (fm_entry->vsi_list_info) { 4825 sbuf_printf(sbuf, 4826 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 4827 fm_entry->vsi_count, 4828 fm_entry->vsi_list_info->vsi_list_id, 4829 fm_entry->vsi_list_info->ref_cnt); 4830 } 4831 } 4832 4833 ice_release_lock(rule_lock); 4834 4835 sbuf_finish(sbuf); 4836 sbuf_delete(sbuf); 4837 4838 return (0); 4839 } 4840 4841 /** 4842 * ice_sysctl_dump_vlan_filters - Dump a list of all HW VLAN Filters 4843 * @oidp: sysctl oid structure 4844 * @arg1: pointer to private data structure 4845 * @arg2: unused 4846 * @req: sysctl request pointer 4847 * 4848 * Callback for "vlan_filters" sysctl to dump the programmed VLAN filters. 4849 */ 4850 static int 4851 ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS) 4852 { 4853 struct ice_softc *sc = (struct ice_softc *)arg1; 4854 struct ice_hw *hw = &sc->hw; 4855 struct ice_switch_info *sw = hw->switch_info; 4856 struct ice_fltr_mgmt_list_entry *fm_entry; 4857 struct ice_list_head *rule_head; 4858 struct ice_lock *rule_lock; 4859 struct ice_fltr_info *fi; 4860 struct sbuf *sbuf; 4861 int ret; 4862 4863 UNREFERENCED_PARAMETER(oidp); 4864 UNREFERENCED_PARAMETER(arg2); 4865 4866 if (ice_driver_is_detaching(sc)) 4867 return (ESHUTDOWN); 4868 4869 /* Wire the old buffer so we can take a non-sleepable lock */ 4870 ret = sysctl_wire_old_buffer(req, 0); 4871 if (ret) 4872 return (ret); 4873 4874 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4875 4876 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 4877 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 4878 4879 sbuf_printf(sbuf, "VLAN Filter List"); 4880 4881 ice_acquire_lock(rule_lock); 4882 4883 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 4884 fi = &fm_entry->fltr_info; 4885 4886 sbuf_printf(sbuf, 4887 "\nvlan_id = %4d, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", 4888 fi->l_data.vlan.vlan_id, fi->vsi_handle, 4889 ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, 4890 ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); 4891 4892 /* if we have a vsi_list_info, print some information about that */ 4893 if (fm_entry->vsi_list_info) { 4894 sbuf_printf(sbuf, 4895 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 4896 fm_entry->vsi_count, 4897 fm_entry->vsi_list_info->vsi_list_id, 4898 fm_entry->vsi_list_info->ref_cnt); 4899 } 4900 } 4901 4902 ice_release_lock(rule_lock); 4903 4904 sbuf_finish(sbuf); 4905 sbuf_delete(sbuf); 4906 4907 return (0); 4908 } 4909 4910 /** 4911 * ice_sysctl_dump_ethertype_filters - Dump a list of all HW Ethertype filters 4912 * @oidp: sysctl oid structure 4913 * @arg1: pointer to private data structure 4914 * @arg2: unused 4915 * @req: sysctl request pointer 4916 * 4917 * Callback for "ethertype_filters" sysctl to dump the programmed Ethertype 4918 * filters. 4919 */ 4920 static int 4921 ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS) 4922 { 4923 struct ice_softc *sc = (struct ice_softc *)arg1; 4924 struct ice_hw *hw = &sc->hw; 4925 struct ice_switch_info *sw = hw->switch_info; 4926 struct ice_fltr_mgmt_list_entry *fm_entry; 4927 struct ice_list_head *rule_head; 4928 struct ice_lock *rule_lock; 4929 struct ice_fltr_info *fi; 4930 struct sbuf *sbuf; 4931 int ret; 4932 4933 UNREFERENCED_PARAMETER(oidp); 4934 UNREFERENCED_PARAMETER(arg2); 4935 4936 if (ice_driver_is_detaching(sc)) 4937 return (ESHUTDOWN); 4938 4939 /* Wire the old buffer so we can take a non-sleepable lock */ 4940 ret = sysctl_wire_old_buffer(req, 0); 4941 if (ret) 4942 return (ret); 4943 4944 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4945 4946 rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rule_lock; 4947 rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rules; 4948 4949 sbuf_printf(sbuf, "Ethertype Filter List"); 4950 4951 ice_acquire_lock(rule_lock); 4952 4953 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 4954 fi = &fm_entry->fltr_info; 4955 4956 sbuf_printf(sbuf, 4957 "\nethertype = 0x%04x, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", 4958 fi->l_data.ethertype_mac.ethertype, 4959 fi->vsi_handle, ice_fltr_flag_str(fi->flag), 4960 fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), 4961 fi->fltr_rule_id); 4962 4963 /* if we have a vsi_list_info, print some information about that */ 4964 if (fm_entry->vsi_list_info) { 4965 sbuf_printf(sbuf, 4966 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 4967 fm_entry->vsi_count, 4968 fm_entry->vsi_list_info->vsi_list_id, 4969 fm_entry->vsi_list_info->ref_cnt); 4970 } 4971 } 4972 4973 ice_release_lock(rule_lock); 4974 4975 sbuf_finish(sbuf); 4976 sbuf_delete(sbuf); 4977 4978 return (0); 4979 } 4980 4981 /** 4982 * ice_sysctl_dump_ethertype_mac_filters - Dump a list of all HW Ethertype/MAC filters 4983 * @oidp: sysctl oid structure 4984 * @arg1: pointer to private data structure 4985 * @arg2: unused 4986 * @req: sysctl request pointer 4987 * 4988 * Callback for "ethertype_mac_filters" sysctl to dump the programmed 4989 * Ethertype/MAC filters. 4990 */ 4991 static int 4992 ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS) 4993 { 4994 struct ice_softc *sc = (struct ice_softc *)arg1; 4995 struct ice_hw *hw = &sc->hw; 4996 struct ice_switch_info *sw = hw->switch_info; 4997 struct ice_fltr_mgmt_list_entry *fm_entry; 4998 struct ice_list_head *rule_head; 4999 struct ice_lock *rule_lock; 5000 struct ice_fltr_info *fi; 5001 struct sbuf *sbuf; 5002 int ret; 5003 5004 UNREFERENCED_PARAMETER(oidp); 5005 UNREFERENCED_PARAMETER(arg2); 5006 5007 if (ice_driver_is_detaching(sc)) 5008 return (ESHUTDOWN); 5009 5010 /* Wire the old buffer so we can take a non-sleepable lock */ 5011 ret = sysctl_wire_old_buffer(req, 0); 5012 if (ret) 5013 return (ret); 5014 5015 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5016 5017 rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rule_lock; 5018 rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rules; 5019 5020 sbuf_printf(sbuf, "Ethertype/MAC Filter List"); 5021 5022 ice_acquire_lock(rule_lock); 5023 5024 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 5025 fi = &fm_entry->fltr_info; 5026 5027 sbuf_printf(sbuf, 5028 "\nethertype = 0x%04x, mac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", 5029 fi->l_data.ethertype_mac.ethertype, 5030 fi->l_data.ethertype_mac.mac_addr, ":", 5031 fi->vsi_handle, ice_fltr_flag_str(fi->flag), 5032 fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), 5033 fi->fltr_rule_id); 5034 5035 /* if we have a vsi_list_info, print some information about that */ 5036 if (fm_entry->vsi_list_info) { 5037 sbuf_printf(sbuf, 5038 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 5039 fm_entry->vsi_count, 5040 fm_entry->vsi_list_info->vsi_list_id, 5041 fm_entry->vsi_list_info->ref_cnt); 5042 } 5043 } 5044 5045 ice_release_lock(rule_lock); 5046 5047 sbuf_finish(sbuf); 5048 sbuf_delete(sbuf); 5049 5050 return (0); 5051 } 5052 5053 /** 5054 * ice_sysctl_dump_state_flags - Dump device driver state flags 5055 * @oidp: sysctl oid structure 5056 * @arg1: pointer to private data structure 5057 * @arg2: unused 5058 * @req: sysctl request pointer 5059 * 5060 * Callback for "state" sysctl to display currently set driver state flags. 5061 */ 5062 static int 5063 ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS) 5064 { 5065 struct ice_softc *sc = (struct ice_softc *)arg1; 5066 struct sbuf *sbuf; 5067 u32 copied_state; 5068 unsigned int i; 5069 bool at_least_one = false; 5070 5071 UNREFERENCED_PARAMETER(oidp); 5072 UNREFERENCED_PARAMETER(arg2); 5073 5074 if (ice_driver_is_detaching(sc)) 5075 return (ESHUTDOWN); 5076 5077 /* Make a copy of the state to ensure we display coherent values */ 5078 copied_state = atomic_load_acq_32(&sc->state); 5079 5080 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5081 5082 /* Add the string for each set state to the sbuf */ 5083 for (i = 0; i < 32; i++) { 5084 if (copied_state & BIT(i)) { 5085 const char *str = ice_state_to_str((enum ice_state)i); 5086 5087 at_least_one = true; 5088 5089 if (str) 5090 sbuf_printf(sbuf, "\n%s", str); 5091 else 5092 sbuf_printf(sbuf, "\nBIT(%u)", i); 5093 } 5094 } 5095 5096 if (!at_least_one) 5097 sbuf_printf(sbuf, "Nothing set"); 5098 5099 sbuf_finish(sbuf); 5100 sbuf_delete(sbuf); 5101 5102 return (0); 5103 } 5104 5105 /** 5106 * ice_add_debug_tunables - Add tunables helpful for debugging the device driver 5107 * @sc: device private structure 5108 * 5109 * Add sysctl tunable values related to debugging the device driver. For now, 5110 * this means a tunable to set the debug mask early during driver load. 5111 * 5112 * The debug node will be marked CTLFLAG_SKIP unless INVARIANTS is defined, so 5113 * that in normal kernel builds, these will all be hidden, but on a debug 5114 * kernel they will be more easily visible. 5115 */ 5116 static void 5117 ice_add_debug_tunables(struct ice_softc *sc) 5118 { 5119 struct sysctl_oid_list *debug_list; 5120 device_t dev = sc->dev; 5121 5122 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 5123 struct sysctl_oid_list *ctx_list = 5124 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 5125 5126 sc->debug_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug", 5127 ICE_CTLFLAG_DEBUG | CTLFLAG_RD, 5128 NULL, "Debug Sysctls"); 5129 debug_list = SYSCTL_CHILDREN(sc->debug_sysctls); 5130 5131 SYSCTL_ADD_U64(ctx, debug_list, OID_AUTO, "debug_mask", 5132 CTLFLAG_RW | CTLFLAG_TUN, &sc->hw.debug_mask, 0, 5133 "Debug message enable/disable mask"); 5134 5135 /* Load the default value from the global sysctl first */ 5136 sc->enable_tx_fc_filter = ice_enable_tx_fc_filter; 5137 5138 SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_fc_filter", 5139 CTLFLAG_RDTUN, &sc->enable_tx_fc_filter, 0, 5140 "Drop Ethertype 0x8808 control frames originating from software on this PF"); 5141 5142 /* Load the default value from the global sysctl first */ 5143 sc->enable_tx_lldp_filter = ice_enable_tx_lldp_filter; 5144 5145 SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_lldp_filter", 5146 CTLFLAG_RDTUN, &sc->enable_tx_lldp_filter, 0, 5147 "Drop Ethertype 0x88cc LLDP frames originating from software on this PF"); 5148 5149 } 5150 5151 #define ICE_SYSCTL_HELP_REQUEST_RESET \ 5152 "\nRequest the driver to initiate a reset." \ 5153 "\n\tpfr - Initiate a PF reset" \ 5154 "\n\tcorer - Initiate a CORE reset" \ 5155 "\n\tglobr - Initiate a GLOBAL reset" 5156 5157 /** 5158 * @var rl_sysctl_ticks 5159 * @brief timestamp for latest reset request sysctl call 5160 * 5161 * Helps rate-limit the call to the sysctl which resets the device 5162 */ 5163 int rl_sysctl_ticks = 0; 5164 5165 /** 5166 * ice_sysctl_request_reset - Request that the driver initiate a reset 5167 * @oidp: sysctl oid structure 5168 * @arg1: pointer to private data structure 5169 * @arg2: unused 5170 * @req: sysctl request pointer 5171 * 5172 * Callback for "request_reset" sysctl to request that the driver initiate 5173 * a reset. Expects to be passed one of the following strings 5174 * 5175 * "pfr" - Initiate a PF reset 5176 * "corer" - Initiate a CORE reset 5177 * "globr" - Initiate a Global reset 5178 */ 5179 static int 5180 ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS) 5181 { 5182 struct ice_softc *sc = (struct ice_softc *)arg1; 5183 struct ice_hw *hw = &sc->hw; 5184 enum ice_status status; 5185 enum ice_reset_req reset_type = ICE_RESET_INVAL; 5186 const char *reset_message; 5187 int ret; 5188 5189 /* Buffer to store the requested reset string. Must contain enough 5190 * space to store the largest expected reset string, which currently 5191 * means 6 bytes of space. 5192 */ 5193 char reset[6] = ""; 5194 5195 UNREFERENCED_PARAMETER(arg2); 5196 5197 ret = priv_check(curthread, PRIV_DRIVER); 5198 if (ret) 5199 return (ret); 5200 5201 if (ice_driver_is_detaching(sc)) 5202 return (ESHUTDOWN); 5203 5204 /* Read in the requested reset type. */ 5205 ret = sysctl_handle_string(oidp, reset, sizeof(reset), req); 5206 if ((ret) || (req->newptr == NULL)) 5207 return (ret); 5208 5209 if (strcmp(reset, "pfr") == 0) { 5210 reset_message = "Requesting a PF reset"; 5211 reset_type = ICE_RESET_PFR; 5212 } else if (strcmp(reset, "corer") == 0) { 5213 reset_message = "Initiating a CORE reset"; 5214 reset_type = ICE_RESET_CORER; 5215 } else if (strcmp(reset, "globr") == 0) { 5216 reset_message = "Initiating a GLOBAL reset"; 5217 reset_type = ICE_RESET_GLOBR; 5218 } else if (strcmp(reset, "empr") == 0) { 5219 device_printf(sc->dev, "Triggering an EMP reset via software is not currently supported\n"); 5220 return (EOPNOTSUPP); 5221 } 5222 5223 if (reset_type == ICE_RESET_INVAL) { 5224 device_printf(sc->dev, "%s is not a valid reset request\n", reset); 5225 return (EINVAL); 5226 } 5227 5228 /* 5229 * Rate-limit the frequency at which this function is called. 5230 * Assuming this is called successfully once, typically, 5231 * everything should be handled within the allotted time frame. 5232 * However, in the odd setup situations, we've also put in 5233 * guards for when the reset has finished, but we're in the 5234 * process of rebuilding. And instead of queueing an intent, 5235 * simply error out and let the caller retry, if so desired. 5236 */ 5237 if (TICKS_2_MSEC(ticks - rl_sysctl_ticks) < 500) { 5238 device_printf(sc->dev, 5239 "Call frequency too high. Operation aborted.\n"); 5240 return (EBUSY); 5241 } 5242 rl_sysctl_ticks = ticks; 5243 5244 if (TICKS_2_MSEC(ticks - sc->rebuild_ticks) < 100) { 5245 device_printf(sc->dev, "Device rebuilding. Operation aborted.\n"); 5246 return (EBUSY); 5247 } 5248 5249 if (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) { 5250 device_printf(sc->dev, "Device in reset. Operation aborted.\n"); 5251 return (EBUSY); 5252 } 5253 5254 device_printf(sc->dev, "%s\n", reset_message); 5255 5256 /* Initiate the PF reset during the admin status task */ 5257 if (reset_type == ICE_RESET_PFR) { 5258 ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); 5259 return (0); 5260 } 5261 5262 /* 5263 * Other types of resets including CORE and GLOBAL resets trigger an 5264 * interrupt on all PFs. Initiate the reset now. Preparation and 5265 * rebuild logic will be handled by the admin status task. 5266 */ 5267 status = ice_reset(hw, reset_type); 5268 5269 /* 5270 * Resets can take a long time and we still don't want another call 5271 * to this function before we settle down. 5272 */ 5273 rl_sysctl_ticks = ticks; 5274 5275 if (status) { 5276 device_printf(sc->dev, "failed to initiate device reset, err %s\n", 5277 ice_status_str(status)); 5278 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); 5279 return (EFAULT); 5280 } 5281 5282 return (0); 5283 } 5284 5285 /** 5286 * ice_add_debug_sysctls - Add sysctls helpful for debugging the device driver 5287 * @sc: device private structure 5288 * 5289 * Add sysctls related to debugging the device driver. Generally these should 5290 * simply be sysctls which dump internal driver state, to aid in understanding 5291 * what the driver is doing. 5292 */ 5293 static void 5294 ice_add_debug_sysctls(struct ice_softc *sc) 5295 { 5296 struct sysctl_oid *sw_node; 5297 struct sysctl_oid_list *debug_list, *sw_list; 5298 device_t dev = sc->dev; 5299 5300 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 5301 5302 debug_list = SYSCTL_CHILDREN(sc->debug_sysctls); 5303 5304 SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "request_reset", 5305 CTLTYPE_STRING | CTLFLAG_WR, sc, 0, 5306 ice_sysctl_request_reset, "A", 5307 ICE_SYSCTL_HELP_REQUEST_RESET); 5308 5309 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "pfr_count", CTLFLAG_RD, 5310 &sc->soft_stats.pfr_count, 0, "# of PF resets handled"); 5311 5312 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "corer_count", CTLFLAG_RD, 5313 &sc->soft_stats.corer_count, 0, "# of CORE resets handled"); 5314 5315 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "globr_count", CTLFLAG_RD, 5316 &sc->soft_stats.globr_count, 0, "# of Global resets handled"); 5317 5318 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "empr_count", CTLFLAG_RD, 5319 &sc->soft_stats.empr_count, 0, "# of EMP resets handled"); 5320 5321 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "tx_mdd_count", CTLFLAG_RD, 5322 &sc->soft_stats.tx_mdd_count, 0, "# of Tx MDD events detected"); 5323 5324 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "rx_mdd_count", CTLFLAG_RD, 5325 &sc->soft_stats.rx_mdd_count, 0, "# of Rx MDD events detected"); 5326 5327 SYSCTL_ADD_PROC(ctx, debug_list, 5328 OID_AUTO, "state", CTLTYPE_STRING | CTLFLAG_RD, 5329 sc, 0, ice_sysctl_dump_state_flags, "A", "Driver State Flags"); 5330 5331 SYSCTL_ADD_PROC(ctx, debug_list, 5332 OID_AUTO, "phy_type_low", CTLTYPE_U64 | CTLFLAG_RW, 5333 sc, 0, ice_sysctl_phy_type_low, "QU", 5334 "PHY type Low from Get PHY Caps/Set PHY Cfg"); 5335 5336 SYSCTL_ADD_PROC(ctx, debug_list, 5337 OID_AUTO, "phy_type_high", CTLTYPE_U64 | CTLFLAG_RW, 5338 sc, 0, ice_sysctl_phy_type_high, "QU", 5339 "PHY type High from Get PHY Caps/Set PHY Cfg"); 5340 5341 SYSCTL_ADD_PROC(ctx, debug_list, 5342 OID_AUTO, "phy_sw_caps", CTLTYPE_STRUCT | CTLFLAG_RD, 5343 sc, 0, ice_sysctl_phy_sw_caps, "", 5344 "Get PHY Capabilities (Software configuration)"); 5345 5346 SYSCTL_ADD_PROC(ctx, debug_list, 5347 OID_AUTO, "phy_nvm_caps", CTLTYPE_STRUCT | CTLFLAG_RD, 5348 sc, 0, ice_sysctl_phy_nvm_caps, "", 5349 "Get PHY Capabilities (NVM configuration)"); 5350 5351 SYSCTL_ADD_PROC(ctx, debug_list, 5352 OID_AUTO, "phy_topo_caps", CTLTYPE_STRUCT | CTLFLAG_RD, 5353 sc, 0, ice_sysctl_phy_topo_caps, "", 5354 "Get PHY Capabilities (Topology configuration)"); 5355 5356 SYSCTL_ADD_PROC(ctx, debug_list, 5357 OID_AUTO, "phy_link_status", CTLTYPE_STRUCT | CTLFLAG_RD, 5358 sc, 0, ice_sysctl_phy_link_status, "", 5359 "Get PHY Link Status"); 5360 5361 SYSCTL_ADD_PROC(ctx, debug_list, 5362 OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD, 5363 sc, 0, ice_sysctl_read_i2c_diag_data, "A", 5364 "Dump selected diagnostic data from FW"); 5365 5366 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "fw_build", CTLFLAG_RD, 5367 &sc->hw.fw_build, 0, "FW Build ID"); 5368 5369 SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "os_ddp_version", CTLTYPE_STRING | CTLFLAG_RD, 5370 sc, 0, ice_sysctl_os_pkg_version, "A", 5371 "DDP package name and version found in ice_ddp"); 5372 5373 SYSCTL_ADD_PROC(ctx, debug_list, 5374 OID_AUTO, "cur_lldp_persist_status", CTLTYPE_STRING | CTLFLAG_RD, 5375 sc, 0, ice_sysctl_fw_cur_lldp_persist_status, "A", "Current LLDP persistent status"); 5376 5377 SYSCTL_ADD_PROC(ctx, debug_list, 5378 OID_AUTO, "dflt_lldp_persist_status", CTLTYPE_STRING | CTLFLAG_RD, 5379 sc, 0, ice_sysctl_fw_dflt_lldp_persist_status, "A", "Default LLDP persistent status"); 5380 5381 SYSCTL_ADD_PROC(ctx, debug_list, 5382 OID_AUTO, "negotiated_fc", CTLTYPE_STRING | CTLFLAG_RD, 5383 sc, 0, ice_sysctl_negotiated_fc, "A", "Current Negotiated Flow Control mode"); 5384 5385 sw_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "switch", 5386 CTLFLAG_RD, NULL, "Switch Configuration"); 5387 sw_list = SYSCTL_CHILDREN(sw_node); 5388 5389 SYSCTL_ADD_PROC(ctx, sw_list, 5390 OID_AUTO, "mac_filters", CTLTYPE_STRING | CTLFLAG_RD, 5391 sc, 0, ice_sysctl_dump_mac_filters, "A", "MAC Filters"); 5392 5393 SYSCTL_ADD_PROC(ctx, sw_list, 5394 OID_AUTO, "vlan_filters", CTLTYPE_STRING | CTLFLAG_RD, 5395 sc, 0, ice_sysctl_dump_vlan_filters, "A", "VLAN Filters"); 5396 5397 SYSCTL_ADD_PROC(ctx, sw_list, 5398 OID_AUTO, "ethertype_filters", CTLTYPE_STRING | CTLFLAG_RD, 5399 sc, 0, ice_sysctl_dump_ethertype_filters, "A", "Ethertype Filters"); 5400 5401 SYSCTL_ADD_PROC(ctx, sw_list, 5402 OID_AUTO, "ethertype_mac_filters", CTLTYPE_STRING | CTLFLAG_RD, 5403 sc, 0, ice_sysctl_dump_ethertype_mac_filters, "A", "Ethertype/MAC Filters"); 5404 5405 } 5406 5407 /** 5408 * ice_vsi_disable_tx - Disable (unconfigure) Tx queues for a VSI 5409 * @vsi: the VSI to disable 5410 * 5411 * Disables the Tx queues associated with this VSI. Essentially the opposite 5412 * of ice_cfg_vsi_for_tx. 5413 */ 5414 int 5415 ice_vsi_disable_tx(struct ice_vsi *vsi) 5416 { 5417 struct ice_softc *sc = vsi->sc; 5418 struct ice_hw *hw = &sc->hw; 5419 enum ice_status status; 5420 u32 *q_teids; 5421 u16 *q_ids, *q_handles; 5422 int i, err = 0; 5423 5424 if (vsi->num_tx_queues > 255) 5425 return (ENOSYS); 5426 5427 q_teids = (u32 *)malloc(sizeof(*q_teids) * vsi->num_tx_queues, 5428 M_ICE, M_NOWAIT|M_ZERO); 5429 if (!q_teids) 5430 return (ENOMEM); 5431 5432 q_ids = (u16 *)malloc(sizeof(*q_ids) * vsi->num_tx_queues, 5433 M_ICE, M_NOWAIT|M_ZERO); 5434 if (!q_ids) { 5435 err = (ENOMEM); 5436 goto free_q_teids; 5437 } 5438 5439 q_handles = (u16 *)malloc(sizeof(*q_handles) * vsi->num_tx_queues, 5440 M_ICE, M_NOWAIT|M_ZERO); 5441 if (!q_handles) { 5442 err = (ENOMEM); 5443 goto free_q_ids; 5444 } 5445 5446 5447 for (i = 0; i < vsi->num_tx_queues; i++) { 5448 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 5449 5450 q_ids[i] = vsi->tx_qmap[i]; 5451 q_handles[i] = i; 5452 q_teids[i] = txq->q_teid; 5453 } 5454 5455 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, vsi->num_tx_queues, 5456 q_handles, q_ids, q_teids, ICE_NO_RESET, 0, NULL); 5457 if (status == ICE_ERR_DOES_NOT_EXIST) { 5458 ; /* Queues have already been disabled, no need to report this as an error */ 5459 } else if (status == ICE_ERR_RESET_ONGOING) { 5460 device_printf(sc->dev, 5461 "Reset in progress. LAN Tx queues already disabled\n"); 5462 } else if (status) { 5463 device_printf(sc->dev, 5464 "Failed to disable LAN Tx queues: err %s aq_err %s\n", 5465 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5466 err = (ENODEV); 5467 } 5468 5469 /* free_q_handles: */ 5470 free(q_handles, M_ICE); 5471 free_q_ids: 5472 free(q_ids, M_ICE); 5473 free_q_teids: 5474 free(q_teids, M_ICE); 5475 5476 return err; 5477 } 5478 5479 /** 5480 * ice_vsi_set_rss_params - Set the RSS parameters for the VSI 5481 * @vsi: the VSI to configure 5482 * 5483 * Sets the RSS table size and lookup table type for the VSI based on its 5484 * VSI type. 5485 */ 5486 static void 5487 ice_vsi_set_rss_params(struct ice_vsi *vsi) 5488 { 5489 struct ice_softc *sc = vsi->sc; 5490 struct ice_hw_common_caps *cap; 5491 5492 cap = &sc->hw.func_caps.common_cap; 5493 5494 switch (vsi->type) { 5495 case ICE_VSI_PF: 5496 /* The PF VSI inherits RSS instance of the PF */ 5497 vsi->rss_table_size = cap->rss_table_size; 5498 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 5499 break; 5500 case ICE_VSI_VF: 5501 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 5502 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 5503 break; 5504 default: 5505 device_printf(sc->dev, 5506 "VSI %d: RSS not supported for VSI type %d\n", 5507 vsi->idx, vsi->type); 5508 break; 5509 } 5510 } 5511 5512 /** 5513 * ice_vsi_add_txqs_ctx - Create a sysctl context and node to store txq sysctls 5514 * @vsi: The VSI to add the context for 5515 * 5516 * Creates a sysctl context for storing txq sysctls. Additionally creates 5517 * a node rooted at the given VSI's main sysctl node. This context will be 5518 * used to store per-txq sysctls which may need to be released during the 5519 * driver's lifetime. 5520 */ 5521 void 5522 ice_vsi_add_txqs_ctx(struct ice_vsi *vsi) 5523 { 5524 struct sysctl_oid_list *vsi_list; 5525 5526 sysctl_ctx_init(&vsi->txqs_ctx); 5527 5528 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 5529 5530 vsi->txqs_node = SYSCTL_ADD_NODE(&vsi->txqs_ctx, vsi_list, OID_AUTO, "txqs", 5531 CTLFLAG_RD, NULL, "Tx Queues"); 5532 } 5533 5534 /** 5535 * ice_vsi_add_rxqs_ctx - Create a sysctl context and node to store rxq sysctls 5536 * @vsi: The VSI to add the context for 5537 * 5538 * Creates a sysctl context for storing rxq sysctls. Additionally creates 5539 * a node rooted at the given VSI's main sysctl node. This context will be 5540 * used to store per-rxq sysctls which may need to be released during the 5541 * driver's lifetime. 5542 */ 5543 void 5544 ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi) 5545 { 5546 struct sysctl_oid_list *vsi_list; 5547 5548 sysctl_ctx_init(&vsi->rxqs_ctx); 5549 5550 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 5551 5552 vsi->rxqs_node = SYSCTL_ADD_NODE(&vsi->rxqs_ctx, vsi_list, OID_AUTO, "rxqs", 5553 CTLFLAG_RD, NULL, "Rx Queues"); 5554 } 5555 5556 /** 5557 * ice_vsi_del_txqs_ctx - Delete the Tx queue sysctl context for this VSI 5558 * @vsi: The VSI to delete from 5559 * 5560 * Frees the txq sysctl context created for storing the per-queue Tx sysctls. 5561 * Must be called prior to freeing the Tx queue memory, in order to avoid 5562 * having sysctls point at stale memory. 5563 */ 5564 void 5565 ice_vsi_del_txqs_ctx(struct ice_vsi *vsi) 5566 { 5567 device_t dev = vsi->sc->dev; 5568 int err; 5569 5570 if (vsi->txqs_node) { 5571 err = sysctl_ctx_free(&vsi->txqs_ctx); 5572 if (err) 5573 device_printf(dev, "failed to free VSI %d txqs_ctx, err %s\n", 5574 vsi->idx, ice_err_str(err)); 5575 vsi->txqs_node = NULL; 5576 } 5577 } 5578 5579 /** 5580 * ice_vsi_del_rxqs_ctx - Delete the Rx queue sysctl context for this VSI 5581 * @vsi: The VSI to delete from 5582 * 5583 * Frees the rxq sysctl context created for storing the per-queue Rx sysctls. 5584 * Must be called prior to freeing the Rx queue memory, in order to avoid 5585 * having sysctls point at stale memory. 5586 */ 5587 void 5588 ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi) 5589 { 5590 device_t dev = vsi->sc->dev; 5591 int err; 5592 5593 if (vsi->rxqs_node) { 5594 err = sysctl_ctx_free(&vsi->rxqs_ctx); 5595 if (err) 5596 device_printf(dev, "failed to free VSI %d rxqs_ctx, err %s\n", 5597 vsi->idx, ice_err_str(err)); 5598 vsi->rxqs_node = NULL; 5599 } 5600 } 5601 5602 /** 5603 * ice_add_txq_sysctls - Add per-queue sysctls for a Tx queue 5604 * @txq: pointer to the Tx queue 5605 * 5606 * Add per-queue sysctls for a given Tx queue. Can't be called during 5607 * ice_add_vsi_sysctls, since the queue memory has not yet been setup. 5608 */ 5609 void 5610 ice_add_txq_sysctls(struct ice_tx_queue *txq) 5611 { 5612 struct ice_vsi *vsi = txq->vsi; 5613 struct sysctl_ctx_list *ctx = &vsi->txqs_ctx; 5614 struct sysctl_oid_list *txqs_list, *this_txq_list; 5615 struct sysctl_oid *txq_node; 5616 char txq_name[32], txq_desc[32]; 5617 5618 const struct ice_sysctl_info ctls[] = { 5619 { &txq->stats.tx_packets, "tx_packets", "Queue Packets Transmitted" }, 5620 { &txq->stats.tx_bytes, "tx_bytes", "Queue Bytes Transmitted" }, 5621 { &txq->stats.mss_too_small, "mss_too_small", "TSO sends with an MSS less than 64" }, 5622 { 0, 0, 0 } 5623 }; 5624 5625 const struct ice_sysctl_info *entry = ctls; 5626 5627 txqs_list = SYSCTL_CHILDREN(vsi->txqs_node); 5628 5629 snprintf(txq_name, sizeof(txq_name), "%u", txq->me); 5630 snprintf(txq_desc, sizeof(txq_desc), "Tx Queue %u", txq->me); 5631 txq_node = SYSCTL_ADD_NODE(ctx, txqs_list, OID_AUTO, txq_name, 5632 CTLFLAG_RD, NULL, txq_desc); 5633 this_txq_list = SYSCTL_CHILDREN(txq_node); 5634 5635 /* Add the Tx queue statistics */ 5636 while (entry->stat != 0) { 5637 SYSCTL_ADD_U64(ctx, this_txq_list, OID_AUTO, entry->name, 5638 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 5639 entry->description); 5640 entry++; 5641 } 5642 } 5643 5644 /** 5645 * ice_add_rxq_sysctls - Add per-queue sysctls for an Rx queue 5646 * @rxq: pointer to the Rx queue 5647 * 5648 * Add per-queue sysctls for a given Rx queue. Can't be called during 5649 * ice_add_vsi_sysctls, since the queue memory has not yet been setup. 5650 */ 5651 void 5652 ice_add_rxq_sysctls(struct ice_rx_queue *rxq) 5653 { 5654 struct ice_vsi *vsi = rxq->vsi; 5655 struct sysctl_ctx_list *ctx = &vsi->rxqs_ctx; 5656 struct sysctl_oid_list *rxqs_list, *this_rxq_list; 5657 struct sysctl_oid *rxq_node; 5658 char rxq_name[32], rxq_desc[32]; 5659 5660 const struct ice_sysctl_info ctls[] = { 5661 { &rxq->stats.rx_packets, "rx_packets", "Queue Packets Received" }, 5662 { &rxq->stats.rx_bytes, "rx_bytes", "Queue Bytes Received" }, 5663 { &rxq->stats.desc_errs, "rx_desc_errs", "Queue Rx Descriptor Errors" }, 5664 { 0, 0, 0 } 5665 }; 5666 5667 const struct ice_sysctl_info *entry = ctls; 5668 5669 rxqs_list = SYSCTL_CHILDREN(vsi->rxqs_node); 5670 5671 snprintf(rxq_name, sizeof(rxq_name), "%u", rxq->me); 5672 snprintf(rxq_desc, sizeof(rxq_desc), "Rx Queue %u", rxq->me); 5673 rxq_node = SYSCTL_ADD_NODE(ctx, rxqs_list, OID_AUTO, rxq_name, 5674 CTLFLAG_RD, NULL, rxq_desc); 5675 this_rxq_list = SYSCTL_CHILDREN(rxq_node); 5676 5677 /* Add the Rx queue statistics */ 5678 while (entry->stat != 0) { 5679 SYSCTL_ADD_U64(ctx, this_rxq_list, OID_AUTO, entry->name, 5680 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 5681 entry->description); 5682 entry++; 5683 } 5684 } 5685 5686 /** 5687 * ice_get_default_rss_key - Obtain a default RSS key 5688 * @seed: storage for the RSS key data 5689 * 5690 * Copies a pre-generated RSS key into the seed memory. The seed pointer must 5691 * point to a block of memory that is at least 40 bytes in size. 5692 * 5693 * The key isn't randomly generated each time this function is called because 5694 * that makes the RSS key change every time we reconfigure RSS. This does mean 5695 * that we're hard coding a possibly 'well known' key. We might want to 5696 * investigate randomly generating this key once during the first call. 5697 */ 5698 static void 5699 ice_get_default_rss_key(u8 *seed) 5700 { 5701 const u8 default_seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE] = { 5702 0x39, 0xed, 0xff, 0x4d, 0x43, 0x58, 0x42, 0xc3, 0x5f, 0xb8, 5703 0xa5, 0x32, 0x95, 0x65, 0x81, 0xcd, 0x36, 0x79, 0x71, 0x97, 5704 0xde, 0xa4, 0x41, 0x40, 0x6f, 0x27, 0xe9, 0x81, 0x13, 0xa0, 5705 0x95, 0x93, 0x5b, 0x1e, 0x9d, 0x27, 0x9d, 0x24, 0x84, 0xb5, 5706 }; 5707 5708 bcopy(default_seed, seed, ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); 5709 } 5710 5711 /** 5712 * ice_set_rss_key - Configure a given VSI with the default RSS key 5713 * @vsi: the VSI to configure 5714 * 5715 * Program the hardware RSS key. We use rss_getkey to grab the kernel RSS key. 5716 * If the kernel RSS interface is not available, this will fall back to our 5717 * pre-generated hash seed from ice_get_default_rss_key(). 5718 */ 5719 static int 5720 ice_set_rss_key(struct ice_vsi *vsi) 5721 { 5722 struct ice_aqc_get_set_rss_keys keydata = { .standard_rss_key = {0} }; 5723 struct ice_softc *sc = vsi->sc; 5724 struct ice_hw *hw = &sc->hw; 5725 enum ice_status status; 5726 5727 /* 5728 * If the RSS kernel interface is disabled, this will return the 5729 * default RSS key above. 5730 */ 5731 rss_getkey(keydata.standard_rss_key); 5732 5733 status = ice_aq_set_rss_key(hw, vsi->idx, &keydata); 5734 if (status) { 5735 device_printf(sc->dev, 5736 "ice_aq_set_rss_key status %s, error %s\n", 5737 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5738 return (EIO); 5739 } 5740 5741 return (0); 5742 } 5743 5744 /** 5745 * ice_set_rss_flow_flds - Program the RSS hash flows after package init 5746 * @vsi: the VSI to configure 5747 * 5748 * If the package file is initialized, the default RSS flows are reset. We 5749 * need to reprogram the expected hash configuration. We'll use 5750 * rss_gethashconfig() to determine which flows to enable. If RSS kernel 5751 * support is not enabled, this macro will fall back to suitable defaults. 5752 */ 5753 static void 5754 ice_set_rss_flow_flds(struct ice_vsi *vsi) 5755 { 5756 struct ice_softc *sc = vsi->sc; 5757 struct ice_hw *hw = &sc->hw; 5758 struct ice_rss_hash_cfg rss_cfg = { 0, 0, ICE_RSS_ANY_HEADERS, false }; 5759 device_t dev = sc->dev; 5760 enum ice_status status; 5761 u_int rss_hash_config; 5762 5763 rss_hash_config = rss_gethashconfig(); 5764 5765 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) { 5766 rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4; 5767 rss_cfg.hash_flds = ICE_FLOW_HASH_IPV4; 5768 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); 5769 if (status) 5770 device_printf(dev, 5771 "ice_add_rss_cfg on VSI %d failed for ipv4 flow, err %s aq_err %s\n", 5772 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5773 } 5774 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) { 5775 rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP; 5776 rss_cfg.hash_flds = ICE_HASH_TCP_IPV4; 5777 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); 5778 if (status) 5779 device_printf(dev, 5780 "ice_add_rss_cfg on VSI %d failed for tcp4 flow, err %s aq_err %s\n", 5781 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5782 } 5783 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) { 5784 rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP; 5785 rss_cfg.hash_flds = ICE_HASH_UDP_IPV4; 5786 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); 5787 if (status) 5788 device_printf(dev, 5789 "ice_add_rss_cfg on VSI %d failed for udp4 flow, err %s aq_err %s\n", 5790 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5791 } 5792 if (rss_hash_config & (RSS_HASHTYPE_RSS_IPV6 | RSS_HASHTYPE_RSS_IPV6_EX)) { 5793 rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6; 5794 rss_cfg.hash_flds = ICE_FLOW_HASH_IPV6; 5795 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); 5796 if (status) 5797 device_printf(dev, 5798 "ice_add_rss_cfg on VSI %d failed for ipv6 flow, err %s aq_err %s\n", 5799 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5800 } 5801 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) { 5802 rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP; 5803 rss_cfg.hash_flds = ICE_HASH_TCP_IPV6; 5804 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); 5805 if (status) 5806 device_printf(dev, 5807 "ice_add_rss_cfg on VSI %d failed for tcp6 flow, err %s aq_err %s\n", 5808 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5809 } 5810 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) { 5811 rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP; 5812 rss_cfg.hash_flds = ICE_HASH_UDP_IPV6; 5813 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); 5814 if (status) 5815 device_printf(dev, 5816 "ice_add_rss_cfg on VSI %d failed for udp6 flow, err %s aq_err %s\n", 5817 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5818 } 5819 5820 /* Warn about RSS hash types which are not supported */ 5821 /* coverity[dead_error_condition] */ 5822 if (rss_hash_config & ~ICE_DEFAULT_RSS_HASH_CONFIG) { 5823 device_printf(dev, 5824 "ice_add_rss_cfg on VSI %d could not configure every requested hash type\n", 5825 vsi->idx); 5826 } 5827 } 5828 5829 /** 5830 * ice_set_rss_lut - Program the RSS lookup table for a VSI 5831 * @vsi: the VSI to configure 5832 * 5833 * Programs the RSS lookup table for a given VSI. We use 5834 * rss_get_indirection_to_bucket which will use the indirection table provided 5835 * by the kernel RSS interface when available. If the kernel RSS interface is 5836 * not available, we will fall back to a simple round-robin fashion queue 5837 * assignment. 5838 */ 5839 static int 5840 ice_set_rss_lut(struct ice_vsi *vsi) 5841 { 5842 struct ice_softc *sc = vsi->sc; 5843 struct ice_hw *hw = &sc->hw; 5844 device_t dev = sc->dev; 5845 struct ice_aq_get_set_rss_lut_params lut_params; 5846 enum ice_status status; 5847 int i, err = 0; 5848 u8 *lut; 5849 5850 lut = (u8 *)malloc(vsi->rss_table_size, M_ICE, M_NOWAIT|M_ZERO); 5851 if (!lut) { 5852 device_printf(dev, "Failed to allocate RSS lut memory\n"); 5853 return (ENOMEM); 5854 } 5855 5856 /* Populate the LUT with max no. of queues. If the RSS kernel 5857 * interface is disabled, this will assign the lookup table in 5858 * a simple round robin fashion 5859 */ 5860 for (i = 0; i < vsi->rss_table_size; i++) { 5861 /* XXX: this needs to be changed if num_rx_queues ever counts 5862 * more than just the RSS queues */ 5863 lut[i] = rss_get_indirection_to_bucket(i) % vsi->num_rx_queues; 5864 } 5865 5866 lut_params.vsi_handle = vsi->idx; 5867 lut_params.lut_size = vsi->rss_table_size; 5868 lut_params.lut_type = vsi->rss_lut_type; 5869 lut_params.lut = lut; 5870 lut_params.global_lut_id = 0; 5871 status = ice_aq_set_rss_lut(hw, &lut_params); 5872 if (status) { 5873 device_printf(dev, 5874 "Cannot set RSS lut, err %s aq_err %s\n", 5875 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5876 err = (EIO); 5877 } 5878 5879 free(lut, M_ICE); 5880 return err; 5881 } 5882 5883 /** 5884 * ice_config_rss - Configure RSS for a VSI 5885 * @vsi: the VSI to configure 5886 * 5887 * If FEATURE_RSS is enabled, configures the RSS lookup table and hash key for 5888 * a given VSI. 5889 */ 5890 int 5891 ice_config_rss(struct ice_vsi *vsi) 5892 { 5893 int err; 5894 5895 /* Nothing to do, if RSS is not enabled */ 5896 if (!ice_is_bit_set(vsi->sc->feat_en, ICE_FEATURE_RSS)) 5897 return 0; 5898 5899 err = ice_set_rss_key(vsi); 5900 if (err) 5901 return err; 5902 5903 ice_set_rss_flow_flds(vsi); 5904 5905 return ice_set_rss_lut(vsi); 5906 } 5907 5908 /** 5909 * ice_log_pkg_init - Log a message about status of DDP initialization 5910 * @sc: the device softc pointer 5911 * @pkg_status: the status result of ice_copy_and_init_pkg 5912 * 5913 * Called by ice_load_pkg after an attempt to download the DDP package 5914 * contents to the device. Determines whether the download was successful or 5915 * not and logs an appropriate message for the system administrator. 5916 * 5917 * @post if a DDP package was previously downloaded on another port and it 5918 * is not compatible with this driver, pkg_status will be updated to reflect 5919 * this, and the driver will transition to safe mode. 5920 */ 5921 void 5922 ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status) 5923 { 5924 struct ice_hw *hw = &sc->hw; 5925 device_t dev = sc->dev; 5926 struct sbuf *active_pkg, *os_pkg; 5927 5928 active_pkg = sbuf_new_auto(); 5929 ice_active_pkg_version_str(hw, active_pkg); 5930 sbuf_finish(active_pkg); 5931 5932 os_pkg = sbuf_new_auto(); 5933 ice_os_pkg_version_str(hw, os_pkg); 5934 sbuf_finish(os_pkg); 5935 5936 switch (*pkg_status) { 5937 case ICE_SUCCESS: 5938 /* The package download AdminQ command returned success because 5939 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is 5940 * already a package loaded on the device. 5941 */ 5942 if (hw->pkg_ver.major == hw->active_pkg_ver.major && 5943 hw->pkg_ver.minor == hw->active_pkg_ver.minor && 5944 hw->pkg_ver.update == hw->active_pkg_ver.update && 5945 hw->pkg_ver.draft == hw->active_pkg_ver.draft && 5946 !memcmp(hw->pkg_name, hw->active_pkg_name, 5947 sizeof(hw->pkg_name))) { 5948 switch (hw->pkg_dwnld_status) { 5949 case ICE_AQ_RC_OK: 5950 device_printf(dev, 5951 "The DDP package was successfully loaded: %s.\n", 5952 sbuf_data(active_pkg)); 5953 break; 5954 case ICE_AQ_RC_EEXIST: 5955 device_printf(dev, 5956 "DDP package already present on device: %s.\n", 5957 sbuf_data(active_pkg)); 5958 break; 5959 default: 5960 /* We do not expect this to occur, but the 5961 * extra messaging is here in case something 5962 * changes in the ice_init_pkg flow. 5963 */ 5964 device_printf(dev, 5965 "DDP package already present on device: %s. An unexpected error occurred, pkg_dwnld_status %s.\n", 5966 sbuf_data(active_pkg), 5967 ice_aq_str(hw->pkg_dwnld_status)); 5968 break; 5969 } 5970 } else if (pkg_ver_compatible(&hw->active_pkg_ver) == 0) { 5971 device_printf(dev, 5972 "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package %s. The ice_ddp module has package: %s.\n", 5973 sbuf_data(active_pkg), 5974 sbuf_data(os_pkg)); 5975 } else if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) { 5976 device_printf(dev, 5977 "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 5978 sbuf_data(active_pkg), 5979 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 5980 *pkg_status = ICE_ERR_NOT_SUPPORTED; 5981 } else { 5982 device_printf(dev, 5983 "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 5984 sbuf_data(active_pkg), 5985 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 5986 *pkg_status = ICE_ERR_NOT_SUPPORTED; 5987 } 5988 break; 5989 case ICE_ERR_NOT_SUPPORTED: 5990 /* 5991 * This assumes that the active_pkg_ver will not be 5992 * initialized if the ice_ddp package version is not 5993 * supported. 5994 */ 5995 if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) { 5996 /* The ice_ddp version is not supported */ 5997 if (pkg_ver_compatible(&hw->pkg_ver) > 0) { 5998 device_printf(dev, 5999 "The DDP package in the ice_ddp module is higher than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated driver. Entering Safe Mode.\n", 6000 sbuf_data(os_pkg), 6001 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6002 } else if (pkg_ver_compatible(&hw->pkg_ver) < 0) { 6003 device_printf(dev, 6004 "The DDP package in the ice_ddp module is lower than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated ice_ddp module. Entering Safe Mode.\n", 6005 sbuf_data(os_pkg), 6006 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6007 } else { 6008 device_printf(dev, 6009 "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6010 ice_status_str(*pkg_status), 6011 ice_aq_str(hw->pkg_dwnld_status), 6012 sbuf_data(os_pkg), 6013 sbuf_data(active_pkg), 6014 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6015 } 6016 } else { 6017 if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) { 6018 device_printf(dev, 6019 "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6020 sbuf_data(active_pkg), 6021 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6022 } else if (pkg_ver_compatible(&hw->active_pkg_ver) < 0) { 6023 device_printf(dev, 6024 "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6025 sbuf_data(active_pkg), 6026 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6027 } else { 6028 device_printf(dev, 6029 "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6030 ice_status_str(*pkg_status), 6031 ice_aq_str(hw->pkg_dwnld_status), 6032 sbuf_data(os_pkg), 6033 sbuf_data(active_pkg), 6034 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6035 } 6036 } 6037 break; 6038 case ICE_ERR_CFG: 6039 case ICE_ERR_BUF_TOO_SHORT: 6040 case ICE_ERR_PARAM: 6041 device_printf(dev, 6042 "The DDP package in the ice_ddp module is invalid. Entering Safe Mode\n"); 6043 break; 6044 case ICE_ERR_FW_DDP_MISMATCH: 6045 device_printf(dev, 6046 "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 6047 break; 6048 case ICE_ERR_AQ_ERROR: 6049 switch (hw->pkg_dwnld_status) { 6050 case ICE_AQ_RC_ENOSEC: 6051 case ICE_AQ_RC_EBADSIG: 6052 device_printf(dev, 6053 "The DDP package in the ice_ddp module cannot be loaded because its signature is not valid. Please use a valid ice_ddp module. Entering Safe Mode.\n"); 6054 goto free_sbufs; 6055 case ICE_AQ_RC_ESVN: 6056 device_printf(dev, 6057 "The DDP package in the ice_ddp module could not be loaded because its security revision is too low. Please use an updated ice_ddp module. Entering Safe Mode.\n"); 6058 goto free_sbufs; 6059 case ICE_AQ_RC_EBADMAN: 6060 case ICE_AQ_RC_EBADBUF: 6061 device_printf(dev, 6062 "An error occurred on the device while loading the DDP package. Entering Safe Mode.\n"); 6063 goto free_sbufs; 6064 default: 6065 break; 6066 } 6067 /* fall-through */ 6068 default: 6069 device_printf(dev, 6070 "An unknown error (%s aq_err %s) occurred when loading the DDP package. Entering Safe Mode.\n", 6071 ice_status_str(*pkg_status), 6072 ice_aq_str(hw->pkg_dwnld_status)); 6073 break; 6074 } 6075 6076 free_sbufs: 6077 sbuf_delete(active_pkg); 6078 sbuf_delete(os_pkg); 6079 } 6080 6081 /** 6082 * ice_load_pkg_file - Load the DDP package file using firmware_get 6083 * @sc: device private softc 6084 * 6085 * Use firmware_get to load the DDP package memory and then request that 6086 * firmware download the package contents and program the relevant hardware 6087 * bits. 6088 * 6089 * This function makes a copy of the DDP package memory which is tracked in 6090 * the ice_hw structure. The copy will be managed and released by 6091 * ice_deinit_hw(). This allows the firmware reference to be immediately 6092 * released using firmware_put. 6093 */ 6094 void 6095 ice_load_pkg_file(struct ice_softc *sc) 6096 { 6097 struct ice_hw *hw = &sc->hw; 6098 device_t dev = sc->dev; 6099 enum ice_status status; 6100 const struct firmware *pkg; 6101 6102 pkg = firmware_get("ice_ddp"); 6103 if (!pkg) { 6104 device_printf(dev, "The DDP package module (ice_ddp) failed to load or could not be found. Entering Safe Mode.\n"); 6105 if (cold) 6106 device_printf(dev, 6107 "The DDP package module cannot be automatically loaded while booting. You may want to specify ice_ddp_load=\"YES\" in your loader.conf\n"); 6108 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); 6109 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); 6110 return; 6111 } 6112 6113 /* Copy and download the pkg contents */ 6114 status = ice_copy_and_init_pkg(hw, (const u8 *)pkg->data, pkg->datasize); 6115 6116 /* Release the firmware reference */ 6117 firmware_put(pkg, FIRMWARE_UNLOAD); 6118 6119 /* Check the active DDP package version and log a message */ 6120 ice_log_pkg_init(sc, &status); 6121 6122 /* Place the driver into safe mode */ 6123 if (status != ICE_SUCCESS) { 6124 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); 6125 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); 6126 } 6127 } 6128 6129 /** 6130 * ice_get_ifnet_counter - Retrieve counter value for a given ifnet counter 6131 * @vsi: the vsi to retrieve the value for 6132 * @counter: the counter type to retrieve 6133 * 6134 * Returns the value for a given ifnet counter. To do so, we calculate the 6135 * value based on the matching hardware statistics. 6136 */ 6137 uint64_t 6138 ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter) 6139 { 6140 struct ice_hw_port_stats *hs = &vsi->sc->stats.cur; 6141 struct ice_eth_stats *es = &vsi->hw_stats.cur; 6142 6143 /* For some statistics, especially those related to error flows, we do 6144 * not have per-VSI counters. In this case, we just report the global 6145 * counters. 6146 */ 6147 6148 switch (counter) { 6149 case IFCOUNTER_IPACKETS: 6150 return (es->rx_unicast + es->rx_multicast + es->rx_broadcast); 6151 case IFCOUNTER_IERRORS: 6152 return (hs->crc_errors + hs->illegal_bytes + 6153 hs->mac_local_faults + hs->mac_remote_faults + 6154 hs->rx_len_errors + hs->rx_undersize + 6155 hs->rx_oversize + hs->rx_fragments + hs->rx_jabber); 6156 case IFCOUNTER_OPACKETS: 6157 return (es->tx_unicast + es->tx_multicast + es->tx_broadcast); 6158 case IFCOUNTER_OERRORS: 6159 return (es->tx_errors); 6160 case IFCOUNTER_COLLISIONS: 6161 return (0); 6162 case IFCOUNTER_IBYTES: 6163 return (es->rx_bytes); 6164 case IFCOUNTER_OBYTES: 6165 return (es->tx_bytes); 6166 case IFCOUNTER_IMCASTS: 6167 return (es->rx_multicast); 6168 case IFCOUNTER_OMCASTS: 6169 return (es->tx_multicast); 6170 case IFCOUNTER_IQDROPS: 6171 return (es->rx_discards); 6172 case IFCOUNTER_OQDROPS: 6173 return (hs->tx_dropped_link_down); 6174 case IFCOUNTER_NOPROTO: 6175 return (es->rx_unknown_protocol); 6176 default: 6177 return if_get_counter_default(vsi->sc->ifp, counter); 6178 } 6179 } 6180 6181 /** 6182 * ice_save_pci_info - Save PCI configuration fields in HW struct 6183 * @hw: the ice_hw struct to save the PCI information in 6184 * @dev: the device to get the PCI information from 6185 * 6186 * This should only be called once, early in the device attach 6187 * process. 6188 */ 6189 void 6190 ice_save_pci_info(struct ice_hw *hw, device_t dev) 6191 { 6192 hw->vendor_id = pci_get_vendor(dev); 6193 hw->device_id = pci_get_device(dev); 6194 hw->subsystem_vendor_id = pci_get_subvendor(dev); 6195 hw->subsystem_device_id = pci_get_subdevice(dev); 6196 hw->revision_id = pci_get_revid(dev); 6197 hw->bus.device = pci_get_slot(dev); 6198 hw->bus.func = pci_get_function(dev); 6199 } 6200 6201 /** 6202 * ice_replay_all_vsi_cfg - Replace configuration for all VSIs after reset 6203 * @sc: the device softc 6204 * 6205 * Replace the configuration for each VSI, and then cleanup replay 6206 * information. Called after a hardware reset in order to reconfigure the 6207 * active VSIs. 6208 */ 6209 int 6210 ice_replay_all_vsi_cfg(struct ice_softc *sc) 6211 { 6212 struct ice_hw *hw = &sc->hw; 6213 enum ice_status status; 6214 int i; 6215 6216 for (i = 0 ; i < sc->num_available_vsi; i++) { 6217 struct ice_vsi *vsi = sc->all_vsi[i]; 6218 6219 if (!vsi) 6220 continue; 6221 6222 status = ice_replay_vsi(hw, vsi->idx); 6223 if (status) { 6224 device_printf(sc->dev, "Failed to replay VSI %d, err %s aq_err %s\n", 6225 vsi->idx, ice_status_str(status), 6226 ice_aq_str(hw->adminq.sq_last_status)); 6227 return (EIO); 6228 } 6229 } 6230 6231 /* Cleanup replay filters after successful reconfiguration */ 6232 ice_replay_post(hw); 6233 return (0); 6234 } 6235 6236 /** 6237 * ice_clean_vsi_rss_cfg - Cleanup RSS configuration for a given VSI 6238 * @vsi: pointer to the VSI structure 6239 * 6240 * Cleanup the advanced RSS configuration for a given VSI. This is necessary 6241 * during driver removal to ensure that all RSS resources are properly 6242 * released. 6243 * 6244 * @remark this function doesn't report an error as it is expected to be 6245 * called during driver reset and unload, and there isn't much the driver can 6246 * do if freeing RSS resources fails. 6247 */ 6248 static void 6249 ice_clean_vsi_rss_cfg(struct ice_vsi *vsi) 6250 { 6251 struct ice_softc *sc = vsi->sc; 6252 struct ice_hw *hw = &sc->hw; 6253 device_t dev = sc->dev; 6254 enum ice_status status; 6255 6256 status = ice_rem_vsi_rss_cfg(hw, vsi->idx); 6257 if (status) 6258 device_printf(dev, 6259 "Failed to remove RSS configuration for VSI %d, err %s\n", 6260 vsi->idx, ice_status_str(status)); 6261 6262 /* Remove this VSI from the RSS list */ 6263 ice_rem_vsi_rss_list(hw, vsi->idx); 6264 } 6265 6266 /** 6267 * ice_clean_all_vsi_rss_cfg - Cleanup RSS configuration for all VSIs 6268 * @sc: the device softc pointer 6269 * 6270 * Cleanup the advanced RSS configuration for all VSIs on a given PF 6271 * interface. 6272 * 6273 * @remark This should be called while preparing for a reset, to cleanup stale 6274 * RSS configuration for all VSIs. 6275 */ 6276 void 6277 ice_clean_all_vsi_rss_cfg(struct ice_softc *sc) 6278 { 6279 int i; 6280 6281 /* No need to cleanup if RSS is not enabled */ 6282 if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS)) 6283 return; 6284 6285 for (i = 0; i < sc->num_available_vsi; i++) { 6286 struct ice_vsi *vsi = sc->all_vsi[i]; 6287 6288 if (vsi) 6289 ice_clean_vsi_rss_cfg(vsi); 6290 } 6291 } 6292 6293 /** 6294 * ice_requested_fec_mode - Return the requested FEC mode as a string 6295 * @pi: The port info structure 6296 * 6297 * Return a string representing the requested FEC mode. 6298 */ 6299 static const char * 6300 ice_requested_fec_mode(struct ice_port_info *pi) 6301 { 6302 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 6303 enum ice_status status; 6304 6305 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 6306 &pcaps, NULL); 6307 if (status) 6308 /* Just report unknown if we can't get capabilities */ 6309 return "Unknown"; 6310 6311 /* Check if RS-FEC has been requested first */ 6312 if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 6313 ICE_AQC_PHY_FEC_25G_RS_544_REQ)) 6314 return ice_fec_str(ICE_FEC_RS); 6315 6316 /* If RS FEC has not been requested, then check BASE-R */ 6317 if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 6318 ICE_AQC_PHY_FEC_25G_KR_REQ)) 6319 return ice_fec_str(ICE_FEC_BASER); 6320 6321 return ice_fec_str(ICE_FEC_NONE); 6322 } 6323 6324 /** 6325 * ice_negotiated_fec_mode - Return the negotiated FEC mode as a string 6326 * @pi: The port info structure 6327 * 6328 * Return a string representing the current FEC mode. 6329 */ 6330 static const char * 6331 ice_negotiated_fec_mode(struct ice_port_info *pi) 6332 { 6333 /* First, check if RS has been requested first */ 6334 if (pi->phy.link_info.fec_info & (ICE_AQ_LINK_25G_RS_528_FEC_EN | 6335 ICE_AQ_LINK_25G_RS_544_FEC_EN)) 6336 return ice_fec_str(ICE_FEC_RS); 6337 6338 /* If RS FEC has not been requested, then check BASE-R */ 6339 if (pi->phy.link_info.fec_info & ICE_AQ_LINK_25G_KR_FEC_EN) 6340 return ice_fec_str(ICE_FEC_BASER); 6341 6342 return ice_fec_str(ICE_FEC_NONE); 6343 } 6344 6345 /** 6346 * ice_autoneg_mode - Return string indicating of autoneg completed 6347 * @pi: The port info structure 6348 * 6349 * Return "True" if autonegotiation is completed, "False" otherwise. 6350 */ 6351 static const char * 6352 ice_autoneg_mode(struct ice_port_info *pi) 6353 { 6354 if (pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 6355 return "True"; 6356 else 6357 return "False"; 6358 } 6359 6360 /** 6361 * ice_flowcontrol_mode - Return string indicating the Flow Control mode 6362 * @pi: The port info structure 6363 * 6364 * Returns the current Flow Control mode as a string. 6365 */ 6366 static const char * 6367 ice_flowcontrol_mode(struct ice_port_info *pi) 6368 { 6369 return ice_fc_str(pi->fc.current_mode); 6370 } 6371 6372 /** 6373 * ice_link_up_msg - Log a link up message with associated info 6374 * @sc: the device private softc 6375 * 6376 * Log a link up message with LOG_NOTICE message level. Include information 6377 * about the duplex, FEC mode, autonegotiation and flow control. 6378 */ 6379 void 6380 ice_link_up_msg(struct ice_softc *sc) 6381 { 6382 struct ice_hw *hw = &sc->hw; 6383 struct ifnet *ifp = sc->ifp; 6384 const char *speed, *req_fec, *neg_fec, *autoneg, *flowcontrol; 6385 6386 speed = ice_aq_speed_to_str(hw->port_info); 6387 req_fec = ice_requested_fec_mode(hw->port_info); 6388 neg_fec = ice_negotiated_fec_mode(hw->port_info); 6389 autoneg = ice_autoneg_mode(hw->port_info); 6390 flowcontrol = ice_flowcontrol_mode(hw->port_info); 6391 6392 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 6393 ifp->if_xname, speed, req_fec, neg_fec, autoneg, flowcontrol); 6394 } 6395 6396 /** 6397 * ice_update_laa_mac - Update MAC address if Locally Administered 6398 * @sc: the device softc 6399 * 6400 * Update the device MAC address when a Locally Administered Address is 6401 * assigned. 6402 * 6403 * This function does *not* update the MAC filter list itself. Instead, it 6404 * should be called after ice_rm_pf_default_mac_filters, so that the previous 6405 * address filter will be removed, and before ice_cfg_pf_default_mac_filters, 6406 * so that the new address filter will be assigned. 6407 */ 6408 int 6409 ice_update_laa_mac(struct ice_softc *sc) 6410 { 6411 const u8 *lladdr = (const u8 *)IF_LLADDR(sc->ifp); 6412 struct ice_hw *hw = &sc->hw; 6413 enum ice_status status; 6414 6415 /* If the address is the same, then there is nothing to update */ 6416 if (!memcmp(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN)) 6417 return (0); 6418 6419 /* Reject Multicast addresses */ 6420 if (ETHER_IS_MULTICAST(lladdr)) 6421 return (EINVAL); 6422 6423 status = ice_aq_manage_mac_write(hw, lladdr, ICE_AQC_MAN_MAC_UPDATE_LAA_WOL, NULL); 6424 if (status) { 6425 device_printf(sc->dev, "Failed to write mac %6D to firmware, err %s aq_err %s\n", 6426 lladdr, ":", ice_status_str(status), 6427 ice_aq_str(hw->adminq.sq_last_status)); 6428 return (EFAULT); 6429 } 6430 6431 /* Copy the address into place of the LAN address. */ 6432 bcopy(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN); 6433 6434 return (0); 6435 } 6436 6437 /** 6438 * ice_get_and_print_bus_info - Save (PCI) bus info and print messages 6439 * @sc: device softc 6440 * 6441 * This will potentially print out a warning message if bus bandwidth 6442 * is insufficient for full-speed operation. 6443 * 6444 * This should only be called once, during the attach process, after 6445 * hw->port_info has been filled out with port link topology information 6446 * (from the Get PHY Capabilities Admin Queue command). 6447 */ 6448 void 6449 ice_get_and_print_bus_info(struct ice_softc *sc) 6450 { 6451 struct ice_hw *hw = &sc->hw; 6452 device_t dev = sc->dev; 6453 u16 pci_link_status; 6454 int offset; 6455 6456 pci_find_cap(dev, PCIY_EXPRESS, &offset); 6457 pci_link_status = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 6458 6459 /* Fill out hw struct with PCIE link status info */ 6460 ice_set_pci_link_status_data(hw, pci_link_status); 6461 6462 /* Use info to print out bandwidth messages */ 6463 ice_print_bus_link_data(dev, hw); 6464 6465 if (ice_pcie_bandwidth_check(sc)) { 6466 device_printf(dev, 6467 "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 6468 device_printf(dev, 6469 "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 6470 } 6471 } 6472 6473 /** 6474 * ice_pcie_bus_speed_to_rate - Convert driver bus speed enum value to 6475 * a 64-bit baudrate. 6476 * @speed: enum value to convert 6477 * 6478 * This only goes up to PCIE Gen 4. 6479 */ 6480 static uint64_t 6481 ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed) 6482 { 6483 /* If the PCI-E speed is Gen1 or Gen2, then report 6484 * only 80% of bus speed to account for encoding overhead. 6485 */ 6486 switch (speed) { 6487 case ice_pcie_speed_2_5GT: 6488 return IF_Gbps(2); 6489 case ice_pcie_speed_5_0GT: 6490 return IF_Gbps(4); 6491 case ice_pcie_speed_8_0GT: 6492 return IF_Gbps(8); 6493 case ice_pcie_speed_16_0GT: 6494 return IF_Gbps(16); 6495 case ice_pcie_speed_unknown: 6496 default: 6497 return 0; 6498 } 6499 } 6500 6501 /** 6502 * ice_pcie_lnk_width_to_int - Convert driver pci-e width enum value to 6503 * a 32-bit number. 6504 * @width: enum value to convert 6505 */ 6506 static int 6507 ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width) 6508 { 6509 switch (width) { 6510 case ice_pcie_lnk_x1: 6511 return (1); 6512 case ice_pcie_lnk_x2: 6513 return (2); 6514 case ice_pcie_lnk_x4: 6515 return (4); 6516 case ice_pcie_lnk_x8: 6517 return (8); 6518 case ice_pcie_lnk_x12: 6519 return (12); 6520 case ice_pcie_lnk_x16: 6521 return (16); 6522 case ice_pcie_lnk_x32: 6523 return (32); 6524 case ice_pcie_lnk_width_resrv: 6525 case ice_pcie_lnk_width_unknown: 6526 default: 6527 return (0); 6528 } 6529 } 6530 6531 /** 6532 * ice_pcie_bandwidth_check - Check if PCI-E bandwidth is sufficient for 6533 * full-speed device operation. 6534 * @sc: adapter softc 6535 * 6536 * Returns 0 if sufficient; 1 if not. 6537 */ 6538 static uint8_t 6539 ice_pcie_bandwidth_check(struct ice_softc *sc) 6540 { 6541 struct ice_hw *hw = &sc->hw; 6542 int num_ports, pcie_width; 6543 u64 pcie_speed, port_speed; 6544 6545 MPASS(hw->port_info); 6546 6547 num_ports = bitcount32(hw->func_caps.common_cap.valid_functions); 6548 port_speed = ice_phy_types_to_max_rate(hw->port_info); 6549 pcie_speed = ice_pcie_bus_speed_to_rate(hw->bus.speed); 6550 pcie_width = ice_pcie_lnk_width_to_int(hw->bus.width); 6551 6552 /* 6553 * If 2x100, clamp ports to 1 -- 2nd port is intended for 6554 * failover. 6555 */ 6556 if (port_speed == IF_Gbps(100)) 6557 num_ports = 1; 6558 6559 return !!((num_ports * port_speed) > pcie_speed * pcie_width); 6560 } 6561 6562 /** 6563 * ice_print_bus_link_data - Print PCI-E bandwidth information 6564 * @dev: device to print string for 6565 * @hw: hw struct with PCI-e link information 6566 */ 6567 static void 6568 ice_print_bus_link_data(device_t dev, struct ice_hw *hw) 6569 { 6570 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 6571 ((hw->bus.speed == ice_pcie_speed_16_0GT) ? "16.0GT/s" : 6572 (hw->bus.speed == ice_pcie_speed_8_0GT) ? "8.0GT/s" : 6573 (hw->bus.speed == ice_pcie_speed_5_0GT) ? "5.0GT/s" : 6574 (hw->bus.speed == ice_pcie_speed_2_5GT) ? "2.5GT/s" : "Unknown"), 6575 (hw->bus.width == ice_pcie_lnk_x32) ? "Width x32" : 6576 (hw->bus.width == ice_pcie_lnk_x16) ? "Width x16" : 6577 (hw->bus.width == ice_pcie_lnk_x12) ? "Width x12" : 6578 (hw->bus.width == ice_pcie_lnk_x8) ? "Width x8" : 6579 (hw->bus.width == ice_pcie_lnk_x4) ? "Width x4" : 6580 (hw->bus.width == ice_pcie_lnk_x2) ? "Width x2" : 6581 (hw->bus.width == ice_pcie_lnk_x1) ? "Width x1" : "Width Unknown"); 6582 } 6583 6584 /** 6585 * ice_set_pci_link_status_data - store PCI bus info 6586 * @hw: pointer to hardware structure 6587 * @link_status: the link status word from PCI config space 6588 * 6589 * Stores the PCI bus info (speed, width, type) within the ice_hw structure 6590 **/ 6591 static void 6592 ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status) 6593 { 6594 u16 reg; 6595 6596 hw->bus.type = ice_bus_pci_express; 6597 6598 reg = (link_status & PCIEM_LINK_STA_WIDTH) >> 4; 6599 6600 switch (reg) { 6601 case ice_pcie_lnk_x1: 6602 case ice_pcie_lnk_x2: 6603 case ice_pcie_lnk_x4: 6604 case ice_pcie_lnk_x8: 6605 case ice_pcie_lnk_x12: 6606 case ice_pcie_lnk_x16: 6607 case ice_pcie_lnk_x32: 6608 hw->bus.width = (enum ice_pcie_link_width)reg; 6609 break; 6610 default: 6611 hw->bus.width = ice_pcie_lnk_width_unknown; 6612 break; 6613 } 6614 6615 reg = (link_status & PCIEM_LINK_STA_SPEED) + 0x13; 6616 6617 switch (reg) { 6618 case ice_pcie_speed_2_5GT: 6619 case ice_pcie_speed_5_0GT: 6620 case ice_pcie_speed_8_0GT: 6621 case ice_pcie_speed_16_0GT: 6622 hw->bus.speed = (enum ice_pcie_bus_speed)reg; 6623 break; 6624 default: 6625 hw->bus.speed = ice_pcie_speed_unknown; 6626 break; 6627 } 6628 } 6629 6630 /** 6631 * ice_init_link_events - Initialize Link Status Events mask 6632 * @sc: the device softc 6633 * 6634 * Initialize the Link Status Events mask to disable notification of link 6635 * events we don't care about in software. Also request that link status 6636 * events be enabled. 6637 */ 6638 int 6639 ice_init_link_events(struct ice_softc *sc) 6640 { 6641 struct ice_hw *hw = &sc->hw; 6642 enum ice_status status; 6643 u16 wanted_events; 6644 6645 /* Set the bits for the events that we want to be notified by */ 6646 wanted_events = (ICE_AQ_LINK_EVENT_UPDOWN | 6647 ICE_AQ_LINK_EVENT_MEDIA_NA | 6648 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL); 6649 6650 /* request that every event except the wanted events be masked */ 6651 status = ice_aq_set_event_mask(hw, hw->port_info->lport, ~wanted_events, NULL); 6652 if (status) { 6653 device_printf(sc->dev, 6654 "Failed to set link status event mask, err %s aq_err %s\n", 6655 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 6656 return (EIO); 6657 } 6658 6659 /* Request link info with the LSE bit set to enable link status events */ 6660 status = ice_aq_get_link_info(hw->port_info, true, NULL, NULL); 6661 if (status) { 6662 device_printf(sc->dev, 6663 "Failed to enable link status events, err %s aq_err %s\n", 6664 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 6665 return (EIO); 6666 } 6667 6668 return (0); 6669 } 6670 6671 /** 6672 * ice_handle_mdd_event - Handle possibly malicious events 6673 * @sc: the device softc 6674 * 6675 * Called by the admin task if an MDD detection interrupt is triggered. 6676 * Identifies possibly malicious events coming from VFs. Also triggers for 6677 * similar incorrect behavior from the PF as well. 6678 */ 6679 void 6680 ice_handle_mdd_event(struct ice_softc *sc) 6681 { 6682 struct ice_hw *hw = &sc->hw; 6683 bool mdd_detected = false, request_reinit = false; 6684 device_t dev = sc->dev; 6685 u32 reg; 6686 6687 if (!ice_testandclear_state(&sc->state, ICE_STATE_MDD_PENDING)) 6688 return; 6689 6690 reg = rd32(hw, GL_MDET_TX_TCLAN); 6691 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 6692 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> GL_MDET_TX_TCLAN_PF_NUM_S; 6693 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> GL_MDET_TX_TCLAN_VF_NUM_S; 6694 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> GL_MDET_TX_TCLAN_MAL_TYPE_S; 6695 u16 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >> GL_MDET_TX_TCLAN_QNUM_S; 6696 6697 device_printf(dev, "Malicious Driver Detection Tx Descriptor check event '%s' on Tx queue %u PF# %u VF# %u\n", 6698 ice_mdd_tx_tclan_str(event), queue, pf_num, vf_num); 6699 6700 /* Only clear this event if it matches this PF, that way other 6701 * PFs can read the event and determine VF and queue number. 6702 */ 6703 if (pf_num == hw->pf_id) 6704 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 6705 6706 mdd_detected = true; 6707 } 6708 6709 /* Determine what triggered the MDD event */ 6710 reg = rd32(hw, GL_MDET_TX_PQM); 6711 if (reg & GL_MDET_TX_PQM_VALID_M) { 6712 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> GL_MDET_TX_PQM_PF_NUM_S; 6713 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> GL_MDET_TX_PQM_VF_NUM_S; 6714 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> GL_MDET_TX_PQM_MAL_TYPE_S; 6715 u16 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >> GL_MDET_TX_PQM_QNUM_S; 6716 6717 device_printf(dev, "Malicious Driver Detection Tx Quanta check event '%s' on Tx queue %u PF# %u VF# %u\n", 6718 ice_mdd_tx_pqm_str(event), queue, pf_num, vf_num); 6719 6720 /* Only clear this event if it matches this PF, that way other 6721 * PFs can read the event and determine VF and queue number. 6722 */ 6723 if (pf_num == hw->pf_id) 6724 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 6725 6726 mdd_detected = true; 6727 } 6728 6729 reg = rd32(hw, GL_MDET_RX); 6730 if (reg & GL_MDET_RX_VALID_M) { 6731 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> GL_MDET_RX_PF_NUM_S; 6732 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> GL_MDET_RX_VF_NUM_S; 6733 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> GL_MDET_RX_MAL_TYPE_S; 6734 u16 queue = (reg & GL_MDET_RX_QNUM_M) >> GL_MDET_RX_QNUM_S; 6735 6736 device_printf(dev, "Malicious Driver Detection Rx event '%s' on Rx queue %u PF# %u VF# %u\n", 6737 ice_mdd_rx_str(event), queue, pf_num, vf_num); 6738 6739 /* Only clear this event if it matches this PF, that way other 6740 * PFs can read the event and determine VF and queue number. 6741 */ 6742 if (pf_num == hw->pf_id) 6743 wr32(hw, GL_MDET_RX, 0xffffffff); 6744 6745 mdd_detected = true; 6746 } 6747 6748 /* Now, confirm that this event actually affects this PF, by checking 6749 * the PF registers. 6750 */ 6751 if (mdd_detected) { 6752 reg = rd32(hw, PF_MDET_TX_TCLAN); 6753 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 6754 wr32(hw, PF_MDET_TX_TCLAN, 0xffff); 6755 sc->soft_stats.tx_mdd_count++; 6756 request_reinit = true; 6757 } 6758 6759 reg = rd32(hw, PF_MDET_TX_PQM); 6760 if (reg & PF_MDET_TX_PQM_VALID_M) { 6761 wr32(hw, PF_MDET_TX_PQM, 0xffff); 6762 sc->soft_stats.tx_mdd_count++; 6763 request_reinit = true; 6764 } 6765 6766 reg = rd32(hw, PF_MDET_RX); 6767 if (reg & PF_MDET_RX_VALID_M) { 6768 wr32(hw, PF_MDET_RX, 0xffff); 6769 sc->soft_stats.rx_mdd_count++; 6770 request_reinit = true; 6771 } 6772 } 6773 6774 /* TODO: Implement logic to detect and handle events caused by VFs. */ 6775 6776 /* request that the upper stack re-initialize the Tx/Rx queues */ 6777 if (request_reinit) 6778 ice_request_stack_reinit(sc); 6779 6780 ice_flush(hw); 6781 } 6782 6783 /** 6784 * ice_init_dcb_setup - Initialize DCB settings for HW 6785 * @sc: the device softc 6786 * 6787 * This needs to be called after the fw_lldp_agent sysctl is added, since that 6788 * can update the device's LLDP agent status if a tunable value is set. 6789 * 6790 * Get and store the initial state of DCB settings on driver load. Print out 6791 * informational messages as well. 6792 */ 6793 void 6794 ice_init_dcb_setup(struct ice_softc *sc) 6795 { 6796 struct ice_hw *hw = &sc->hw; 6797 device_t dev = sc->dev; 6798 bool dcbx_agent_status; 6799 enum ice_status status; 6800 6801 /* Don't do anything if DCB isn't supported */ 6802 if (!hw->func_caps.common_cap.dcb) { 6803 device_printf(dev, "%s: No DCB support\n", 6804 __func__); 6805 return; 6806 } 6807 6808 hw->port_info->qos_cfg.dcbx_status = ice_get_dcbx_status(hw); 6809 if (hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_DONE && 6810 hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) { 6811 /* 6812 * Start DCBX agent, but not LLDP. The return value isn't 6813 * checked here because a more detailed dcbx agent status is 6814 * retrieved and checked in ice_init_dcb() and below. 6815 */ 6816 ice_aq_start_stop_dcbx(hw, true, &dcbx_agent_status, NULL); 6817 } 6818 6819 /* This sets hw->port_info->qos_cfg.is_sw_lldp */ 6820 status = ice_init_dcb(hw, true); 6821 6822 /* If there is an error, then FW LLDP is not in a usable state */ 6823 if (status != 0 && status != ICE_ERR_NOT_READY) { 6824 /* Don't print an error message if the return code from the AQ 6825 * cmd performed in ice_init_dcb() is is EPERM; that means the 6826 * FW LLDP engine is disabled, and that is a valid state. 6827 */ 6828 if (!(status == ICE_ERR_AQ_ERROR && 6829 hw->adminq.sq_last_status == ICE_AQ_RC_EPERM)) { 6830 device_printf(dev, "DCB init failed, err %s aq_err %s\n", 6831 ice_status_str(status), 6832 ice_aq_str(hw->adminq.sq_last_status)); 6833 } 6834 hw->port_info->qos_cfg.dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; 6835 } 6836 6837 switch (hw->port_info->qos_cfg.dcbx_status) { 6838 case ICE_DCBX_STATUS_DIS: 6839 ice_debug(hw, ICE_DBG_DCB, "DCBX disabled\n"); 6840 break; 6841 case ICE_DCBX_STATUS_NOT_STARTED: 6842 ice_debug(hw, ICE_DBG_DCB, "DCBX not started\n"); 6843 break; 6844 case ICE_DCBX_STATUS_MULTIPLE_PEERS: 6845 ice_debug(hw, ICE_DBG_DCB, "DCBX detected multiple peers\n"); 6846 break; 6847 default: 6848 break; 6849 } 6850 6851 /* LLDP disabled in FW */ 6852 if (hw->port_info->qos_cfg.is_sw_lldp) { 6853 ice_add_rx_lldp_filter(sc); 6854 device_printf(dev, "Firmware LLDP agent disabled\n"); 6855 } 6856 } 6857 6858 /** 6859 * ice_handle_mib_change_event - helper function to log LLDP MIB change events 6860 * @sc: device softc 6861 * @event: event received on a control queue 6862 * 6863 * Prints out the type of an LLDP MIB change event in a DCB debug message. 6864 * 6865 * XXX: Should be extended to do more if the driver decides to notify other SW 6866 * of LLDP MIB changes, or needs to extract info from the MIB. 6867 */ 6868 static void 6869 ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *event) 6870 { 6871 struct ice_aqc_lldp_get_mib *params = 6872 (struct ice_aqc_lldp_get_mib *)&event->desc.params.lldp_get_mib; 6873 u8 mib_type, bridge_type, tx_status; 6874 6875 /* XXX: To get the contents of the MIB that caused the event, set the 6876 * ICE_DBG_AQ debug mask and read that output 6877 */ 6878 static const char* mib_type_strings[] = { 6879 "Local MIB", 6880 "Remote MIB", 6881 "Reserved", 6882 "Reserved" 6883 }; 6884 static const char* bridge_type_strings[] = { 6885 "Nearest Bridge", 6886 "Non-TPMR Bridge", 6887 "Reserved", 6888 "Reserved" 6889 }; 6890 static const char* tx_status_strings[] = { 6891 "Port's TX active", 6892 "Port's TX suspended and drained", 6893 "Reserved", 6894 "Port's TX suspended and srained; blocked TC pipe flushed" 6895 }; 6896 6897 mib_type = (params->type & ICE_AQ_LLDP_MIB_TYPE_M) >> 6898 ICE_AQ_LLDP_MIB_TYPE_S; 6899 bridge_type = (params->type & ICE_AQ_LLDP_BRID_TYPE_M) >> 6900 ICE_AQ_LLDP_BRID_TYPE_S; 6901 tx_status = (params->type & ICE_AQ_LLDP_TX_M) >> 6902 ICE_AQ_LLDP_TX_S; 6903 6904 ice_debug(&sc->hw, ICE_DBG_DCB, "LLDP MIB Change Event (%s, %s, %s)\n", 6905 mib_type_strings[mib_type], bridge_type_strings[bridge_type], 6906 tx_status_strings[tx_status]); 6907 } 6908 6909 /** 6910 * ice_send_version - Send driver version to firmware 6911 * @sc: the device private softc 6912 * 6913 * Send the driver version to the firmware. This must be called as early as 6914 * possible after ice_init_hw(). 6915 */ 6916 int 6917 ice_send_version(struct ice_softc *sc) 6918 { 6919 struct ice_driver_ver driver_version = {0}; 6920 struct ice_hw *hw = &sc->hw; 6921 device_t dev = sc->dev; 6922 enum ice_status status; 6923 6924 driver_version.major_ver = ice_major_version; 6925 driver_version.minor_ver = ice_minor_version; 6926 driver_version.build_ver = ice_patch_version; 6927 driver_version.subbuild_ver = ice_rc_version; 6928 6929 strlcpy((char *)driver_version.driver_string, ice_driver_version, 6930 sizeof(driver_version.driver_string)); 6931 6932 status = ice_aq_send_driver_ver(hw, &driver_version, NULL); 6933 if (status) { 6934 device_printf(dev, "Unable to send driver version to firmware, err %s aq_err %s\n", 6935 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 6936 return (EIO); 6937 } 6938 6939 return (0); 6940 } 6941 6942 /** 6943 * ice_handle_lan_overflow_event - helper function to log LAN overflow events 6944 * @sc: device softc 6945 * @event: event received on a control queue 6946 * 6947 * Prints out a message when a LAN overflow event is detected on a receive 6948 * queue. 6949 */ 6950 static void 6951 ice_handle_lan_overflow_event(struct ice_softc *sc, struct ice_rq_event_info *event) 6952 { 6953 struct ice_aqc_event_lan_overflow *params = 6954 (struct ice_aqc_event_lan_overflow *)&event->desc.params.lan_overflow; 6955 struct ice_hw *hw = &sc->hw; 6956 6957 ice_debug(hw, ICE_DBG_DCB, "LAN overflow event detected, prtdcb_ruptq=0x%08x, qtx_ctl=0x%08x\n", 6958 LE32_TO_CPU(params->prtdcb_ruptq), 6959 LE32_TO_CPU(params->qtx_ctl)); 6960 } 6961 6962 /** 6963 * ice_add_ethertype_to_list - Add an Ethertype filter to a filter list 6964 * @vsi: the VSI to target packets to 6965 * @list: the list to add the filter to 6966 * @ethertype: the Ethertype to filter on 6967 * @direction: The direction of the filter (Tx or Rx) 6968 * @action: the action to take 6969 * 6970 * Add an Ethertype filter to a filter list. Used to forward a series of 6971 * filters to the firmware for configuring the switch. 6972 * 6973 * Returns 0 on success, and an error code on failure. 6974 */ 6975 static int 6976 ice_add_ethertype_to_list(struct ice_vsi *vsi, struct ice_list_head *list, 6977 u16 ethertype, u16 direction, 6978 enum ice_sw_fwd_act_type action) 6979 { 6980 struct ice_fltr_list_entry *entry; 6981 6982 MPASS((direction == ICE_FLTR_TX) || (direction == ICE_FLTR_RX)); 6983 6984 entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO); 6985 if (!entry) 6986 return (ENOMEM); 6987 6988 entry->fltr_info.flag = direction; 6989 entry->fltr_info.src_id = ICE_SRC_ID_VSI; 6990 entry->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; 6991 entry->fltr_info.fltr_act = action; 6992 entry->fltr_info.vsi_handle = vsi->idx; 6993 entry->fltr_info.l_data.ethertype_mac.ethertype = ethertype; 6994 6995 LIST_ADD(&entry->list_entry, list); 6996 6997 return 0; 6998 } 6999 7000 #define ETHERTYPE_PAUSE_FRAMES 0x8808 7001 #define ETHERTYPE_LLDP_FRAMES 0x88cc 7002 7003 /** 7004 * ice_cfg_pf_ethertype_filters - Configure switch to drop ethertypes 7005 * @sc: the device private softc 7006 * 7007 * Configure the switch to drop PAUSE frames and LLDP frames transmitted from 7008 * the host. This prevents malicious VFs from sending these frames and being 7009 * able to control or configure the network. 7010 */ 7011 int 7012 ice_cfg_pf_ethertype_filters(struct ice_softc *sc) 7013 { 7014 struct ice_list_head ethertype_list; 7015 struct ice_vsi *vsi = &sc->pf_vsi; 7016 struct ice_hw *hw = &sc->hw; 7017 device_t dev = sc->dev; 7018 enum ice_status status; 7019 int err = 0; 7020 7021 INIT_LIST_HEAD(ðertype_list); 7022 7023 /* 7024 * Note that the switch filters will ignore the VSI index for the drop 7025 * action, so we only need to program drop filters once for the main 7026 * VSI. 7027 */ 7028 7029 /* Configure switch to drop all Tx pause frames coming from any VSI. */ 7030 if (sc->enable_tx_fc_filter) { 7031 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7032 ETHERTYPE_PAUSE_FRAMES, 7033 ICE_FLTR_TX, ICE_DROP_PACKET); 7034 if (err) 7035 goto free_ethertype_list; 7036 } 7037 7038 /* Configure switch to drop LLDP frames coming from any VSI */ 7039 if (sc->enable_tx_lldp_filter) { 7040 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7041 ETHERTYPE_LLDP_FRAMES, 7042 ICE_FLTR_TX, ICE_DROP_PACKET); 7043 if (err) 7044 goto free_ethertype_list; 7045 } 7046 7047 status = ice_add_eth_mac(hw, ðertype_list); 7048 if (status) { 7049 device_printf(dev, 7050 "Failed to add Tx Ethertype filters, err %s aq_err %s\n", 7051 ice_status_str(status), 7052 ice_aq_str(hw->adminq.sq_last_status)); 7053 err = (EIO); 7054 } 7055 7056 free_ethertype_list: 7057 ice_free_fltr_list(ðertype_list); 7058 return err; 7059 } 7060 7061 /** 7062 * ice_add_rx_lldp_filter - add ethertype filter for Rx LLDP frames 7063 * @sc: the device private structure 7064 * 7065 * Add a switch ethertype filter which forwards the LLDP frames to the main PF 7066 * VSI. Called when the fw_lldp_agent is disabled, to allow the LLDP frames to 7067 * be forwarded to the stack. 7068 */ 7069 static void 7070 ice_add_rx_lldp_filter(struct ice_softc *sc) 7071 { 7072 struct ice_list_head ethertype_list; 7073 struct ice_vsi *vsi = &sc->pf_vsi; 7074 struct ice_hw *hw = &sc->hw; 7075 device_t dev = sc->dev; 7076 enum ice_status status; 7077 int err; 7078 u16 vsi_num; 7079 7080 /* 7081 * If FW is new enough, use a direct AQ command to perform the filter 7082 * addition. 7083 */ 7084 if (ice_fw_supports_lldp_fltr_ctrl(hw)) { 7085 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); 7086 status = ice_lldp_fltr_add_remove(hw, vsi_num, true); 7087 if (status) { 7088 device_printf(dev, 7089 "Failed to add Rx LLDP filter, err %s aq_err %s\n", 7090 ice_status_str(status), 7091 ice_aq_str(hw->adminq.sq_last_status)); 7092 } else 7093 ice_set_state(&sc->state, 7094 ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER); 7095 return; 7096 } 7097 7098 INIT_LIST_HEAD(ðertype_list); 7099 7100 /* Forward Rx LLDP frames to the stack */ 7101 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7102 ETHERTYPE_LLDP_FRAMES, 7103 ICE_FLTR_RX, ICE_FWD_TO_VSI); 7104 if (err) { 7105 device_printf(dev, 7106 "Failed to add Rx LLDP filter, err %s\n", 7107 ice_err_str(err)); 7108 goto free_ethertype_list; 7109 } 7110 7111 status = ice_add_eth_mac(hw, ðertype_list); 7112 if (status && status != ICE_ERR_ALREADY_EXISTS) { 7113 device_printf(dev, 7114 "Failed to add Rx LLDP filter, err %s aq_err %s\n", 7115 ice_status_str(status), 7116 ice_aq_str(hw->adminq.sq_last_status)); 7117 } else { 7118 /* 7119 * If status == ICE_ERR_ALREADY_EXISTS, we won't treat an 7120 * already existing filter as an error case. 7121 */ 7122 ice_set_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER); 7123 } 7124 7125 free_ethertype_list: 7126 ice_free_fltr_list(ðertype_list); 7127 } 7128 7129 /** 7130 * ice_del_rx_lldp_filter - Remove ethertype filter for Rx LLDP frames 7131 * @sc: the device private structure 7132 * 7133 * Remove the switch filter forwarding LLDP frames to the main PF VSI, called 7134 * when the firmware LLDP agent is enabled, to stop routing LLDP frames to the 7135 * stack. 7136 */ 7137 static void 7138 ice_del_rx_lldp_filter(struct ice_softc *sc) 7139 { 7140 struct ice_list_head ethertype_list; 7141 struct ice_vsi *vsi = &sc->pf_vsi; 7142 struct ice_hw *hw = &sc->hw; 7143 device_t dev = sc->dev; 7144 enum ice_status status; 7145 int err; 7146 u16 vsi_num; 7147 7148 /* 7149 * Only in the scenario where the driver added the filter during 7150 * this session (while the driver was loaded) would we be able to 7151 * delete this filter. 7152 */ 7153 if (!ice_test_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER)) 7154 return; 7155 7156 /* 7157 * If FW is new enough, use a direct AQ command to perform the filter 7158 * removal. 7159 */ 7160 if (ice_fw_supports_lldp_fltr_ctrl(hw)) { 7161 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); 7162 status = ice_lldp_fltr_add_remove(hw, vsi_num, false); 7163 if (status) { 7164 device_printf(dev, 7165 "Failed to remove Rx LLDP filter, err %s aq_err %s\n", 7166 ice_status_str(status), 7167 ice_aq_str(hw->adminq.sq_last_status)); 7168 } 7169 return; 7170 } 7171 7172 INIT_LIST_HEAD(ðertype_list); 7173 7174 /* Remove filter forwarding Rx LLDP frames to the stack */ 7175 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7176 ETHERTYPE_LLDP_FRAMES, 7177 ICE_FLTR_RX, ICE_FWD_TO_VSI); 7178 if (err) { 7179 device_printf(dev, 7180 "Failed to remove Rx LLDP filter, err %s\n", 7181 ice_err_str(err)); 7182 goto free_ethertype_list; 7183 } 7184 7185 status = ice_remove_eth_mac(hw, ðertype_list); 7186 if (status == ICE_ERR_DOES_NOT_EXIST) { 7187 ; /* Don't complain if we try to remove a filter that doesn't exist */ 7188 } else if (status) { 7189 device_printf(dev, 7190 "Failed to remove Rx LLDP filter, err %s aq_err %s\n", 7191 ice_status_str(status), 7192 ice_aq_str(hw->adminq.sq_last_status)); 7193 } 7194 7195 free_ethertype_list: 7196 ice_free_fltr_list(ðertype_list); 7197 } 7198 7199 /** 7200 * ice_init_link_configuration -- Setup link in different ways depending 7201 * on whether media is available or not. 7202 * @sc: device private structure 7203 * 7204 * Called at the end of the attach process to either set default link 7205 * parameters if there is media available, or force HW link down and 7206 * set a state bit if there is no media. 7207 */ 7208 void 7209 ice_init_link_configuration(struct ice_softc *sc) 7210 { 7211 struct ice_port_info *pi = sc->hw.port_info; 7212 struct ice_hw *hw = &sc->hw; 7213 device_t dev = sc->dev; 7214 enum ice_status status; 7215 7216 pi->phy.get_link_info = true; 7217 status = ice_get_link_status(pi, &sc->link_up); 7218 if (status != ICE_SUCCESS) { 7219 device_printf(dev, 7220 "%s: ice_get_link_status failed; status %s, aq_err %s\n", 7221 __func__, ice_status_str(status), 7222 ice_aq_str(hw->adminq.sq_last_status)); 7223 return; 7224 } 7225 7226 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 7227 ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA); 7228 /* Apply default link settings */ 7229 ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC); 7230 } else { 7231 /* Set link down, and poll for media available in timer. This prevents the 7232 * driver from receiving spurious link-related events. 7233 */ 7234 ice_set_state(&sc->state, ICE_STATE_NO_MEDIA); 7235 status = ice_aq_set_link_restart_an(pi, false, NULL); 7236 if (status != ICE_SUCCESS) 7237 device_printf(dev, 7238 "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", 7239 __func__, ice_status_str(status), 7240 ice_aq_str(hw->adminq.sq_last_status)); 7241 } 7242 } 7243 7244 /** 7245 * ice_apply_saved_phy_req_to_cfg -- Write saved user PHY settings to cfg data 7246 * @sc: device private structure 7247 * @cfg: new PHY config data to be modified 7248 * 7249 * Applies user settings for advertised speeds to the PHY type fields in the 7250 * supplied PHY config struct. It uses the data from pcaps to check if the 7251 * saved settings are invalid and uses the pcaps data instead if they are 7252 * invalid. 7253 */ 7254 static int 7255 ice_apply_saved_phy_req_to_cfg(struct ice_softc *sc, 7256 struct ice_aqc_set_phy_cfg_data *cfg) 7257 { 7258 struct ice_phy_data phy_data = { 0 }; 7259 struct ice_port_info *pi = sc->hw.port_info; 7260 u64 phy_low = 0, phy_high = 0; 7261 u16 link_speeds; 7262 int ret; 7263 7264 link_speeds = pi->phy.curr_user_speed_req; 7265 7266 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LINK_MGMT_VER_2)) { 7267 memset(&phy_data, 0, sizeof(phy_data)); 7268 phy_data.report_mode = ICE_AQC_REPORT_DFLT_CFG; 7269 phy_data.user_speeds_orig = link_speeds; 7270 ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); 7271 if (ret != 0) { 7272 /* Error message already printed within function */ 7273 return (ret); 7274 } 7275 phy_low = phy_data.phy_low_intr; 7276 phy_high = phy_data.phy_high_intr; 7277 7278 if (link_speeds == 0 || phy_data.user_speeds_intr) 7279 goto finalize_link_speed; 7280 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) { 7281 memset(&phy_data, 0, sizeof(phy_data)); 7282 phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA; 7283 phy_data.user_speeds_orig = link_speeds; 7284 ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); 7285 if (ret != 0) { 7286 /* Error message already printed within function */ 7287 return (ret); 7288 } 7289 phy_low = phy_data.phy_low_intr; 7290 phy_high = phy_data.phy_high_intr; 7291 7292 if (!phy_data.user_speeds_intr) { 7293 phy_low = phy_data.phy_low_orig; 7294 phy_high = phy_data.phy_high_orig; 7295 } 7296 goto finalize_link_speed; 7297 } 7298 /* If we're here, then it means the benefits of Version 2 7299 * link management aren't utilized. We fall through to 7300 * handling Strict Link Mode the same as Version 1 link 7301 * management. 7302 */ 7303 } 7304 7305 memset(&phy_data, 0, sizeof(phy_data)); 7306 if ((link_speeds == 0) && 7307 (sc->ldo_tlv.phy_type_low || sc->ldo_tlv.phy_type_high)) 7308 phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA; 7309 else 7310 phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_MEDIA; 7311 phy_data.user_speeds_orig = link_speeds; 7312 ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); 7313 if (ret != 0) { 7314 /* Error message already printed within function */ 7315 return (ret); 7316 } 7317 phy_low = phy_data.phy_low_intr; 7318 phy_high = phy_data.phy_high_intr; 7319 7320 if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) { 7321 if (phy_low == 0 && phy_high == 0) { 7322 device_printf(sc->dev, 7323 "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 7324 return (EINVAL); 7325 } 7326 } else { 7327 if (link_speeds == 0) { 7328 if (sc->ldo_tlv.phy_type_low & phy_low || 7329 sc->ldo_tlv.phy_type_high & phy_high) { 7330 phy_low &= sc->ldo_tlv.phy_type_low; 7331 phy_high &= sc->ldo_tlv.phy_type_high; 7332 } 7333 } else if (phy_low == 0 && phy_high == 0) { 7334 memset(&phy_data, 0, sizeof(phy_data)); 7335 phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA; 7336 phy_data.user_speeds_orig = link_speeds; 7337 ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); 7338 if (ret != 0) { 7339 /* Error message already printed within function */ 7340 return (ret); 7341 } 7342 phy_low = phy_data.phy_low_intr; 7343 phy_high = phy_data.phy_high_intr; 7344 7345 if (!phy_data.user_speeds_intr) { 7346 phy_low = phy_data.phy_low_orig; 7347 phy_high = phy_data.phy_high_orig; 7348 } 7349 } 7350 } 7351 7352 finalize_link_speed: 7353 7354 /* Cache new user settings for speeds */ 7355 pi->phy.curr_user_speed_req = phy_data.user_speeds_intr; 7356 cfg->phy_type_low = htole64(phy_low); 7357 cfg->phy_type_high = htole64(phy_high); 7358 7359 return (ret); 7360 } 7361 7362 /** 7363 * ice_apply_saved_fec_req_to_cfg -- Write saved user FEC mode to cfg data 7364 * @sc: device private structure 7365 * @cfg: new PHY config data to be modified 7366 * 7367 * Applies user setting for FEC mode to PHY config struct. It uses the data 7368 * from pcaps to check if the saved settings are invalid and uses the pcaps 7369 * data instead if they are invalid. 7370 */ 7371 static int 7372 ice_apply_saved_fec_req_to_cfg(struct ice_softc *sc, 7373 struct ice_aqc_set_phy_cfg_data *cfg) 7374 { 7375 struct ice_port_info *pi = sc->hw.port_info; 7376 enum ice_status status; 7377 7378 cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; 7379 status = ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); 7380 if (status) 7381 return (EIO); 7382 7383 return (0); 7384 } 7385 7386 /** 7387 * ice_apply_saved_fc_req_to_cfg -- Write saved user flow control mode to cfg data 7388 * @pi: port info struct 7389 * @cfg: new PHY config data to be modified 7390 * 7391 * Applies user setting for flow control mode to PHY config struct. There are 7392 * no invalid flow control mode settings; if there are, then this function 7393 * treats them like "ICE_FC_NONE". 7394 */ 7395 static void 7396 ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, 7397 struct ice_aqc_set_phy_cfg_data *cfg) 7398 { 7399 cfg->caps &= ~(ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | 7400 ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY); 7401 7402 switch (pi->phy.curr_user_fc_req) { 7403 case ICE_FC_FULL: 7404 cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | 7405 ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; 7406 break; 7407 case ICE_FC_RX_PAUSE: 7408 cfg->caps |= ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; 7409 break; 7410 case ICE_FC_TX_PAUSE: 7411 cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY; 7412 break; 7413 default: 7414 /* ICE_FC_NONE */ 7415 break; 7416 } 7417 } 7418 7419 /** 7420 * ice_apply_saved_phy_cfg -- Re-apply user PHY config settings 7421 * @sc: device private structure 7422 * @settings: which settings to apply 7423 * 7424 * Applies user settings for advertised speeds, FEC mode, and flow 7425 * control mode to a PHY config struct; it uses the data from pcaps 7426 * to check if the saved settings are invalid and uses the pcaps 7427 * data instead if they are invalid. 7428 * 7429 * For things like sysctls where only one setting needs to be 7430 * updated, the bitmap allows the caller to specify which setting 7431 * to update. 7432 */ 7433 int 7434 ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings) 7435 { 7436 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 7437 struct ice_port_info *pi = sc->hw.port_info; 7438 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 7439 struct ice_hw *hw = &sc->hw; 7440 device_t dev = sc->dev; 7441 u64 phy_low, phy_high; 7442 enum ice_status status; 7443 enum ice_fec_mode dflt_fec_mode; 7444 enum ice_fc_mode dflt_fc_mode; 7445 u16 dflt_user_speed; 7446 7447 if (!settings || settings > ICE_APPLY_LS_FEC_FC) { 7448 ice_debug(hw, ICE_DBG_LINK, "Settings out-of-bounds: %u\n", 7449 settings); 7450 } 7451 7452 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 7453 &pcaps, NULL); 7454 if (status != ICE_SUCCESS) { 7455 device_printf(dev, 7456 "%s: ice_aq_get_phy_caps (ACTIVE) failed; status %s, aq_err %s\n", 7457 __func__, ice_status_str(status), 7458 ice_aq_str(hw->adminq.sq_last_status)); 7459 return (EIO); 7460 } 7461 7462 phy_low = le64toh(pcaps.phy_type_low); 7463 phy_high = le64toh(pcaps.phy_type_high); 7464 7465 /* Save off initial config parameters */ 7466 dflt_user_speed = ice_aq_phy_types_to_link_speeds(phy_low, phy_high); 7467 dflt_fec_mode = ice_caps_to_fec_mode(pcaps.caps, pcaps.link_fec_options); 7468 dflt_fc_mode = ice_caps_to_fc_mode(pcaps.caps); 7469 7470 /* Setup new PHY config */ 7471 ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); 7472 7473 /* On error, restore active configuration values */ 7474 if ((settings & ICE_APPLY_LS) && 7475 ice_apply_saved_phy_req_to_cfg(sc, &cfg)) { 7476 pi->phy.curr_user_speed_req = dflt_user_speed; 7477 cfg.phy_type_low = pcaps.phy_type_low; 7478 cfg.phy_type_high = pcaps.phy_type_high; 7479 } 7480 if ((settings & ICE_APPLY_FEC) && 7481 ice_apply_saved_fec_req_to_cfg(sc, &cfg)) { 7482 pi->phy.curr_user_fec_req = dflt_fec_mode; 7483 } 7484 if (settings & ICE_APPLY_FC) { 7485 /* No real error indicators for this process, 7486 * so we'll just have to assume it works. */ 7487 ice_apply_saved_fc_req_to_cfg(pi, &cfg); 7488 } 7489 7490 /* Enable link and re-negotiate it */ 7491 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 7492 7493 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 7494 if (status != ICE_SUCCESS) { 7495 /* Don't indicate failure if there's no media in the port. 7496 * The settings have been saved and will apply when media 7497 * is inserted. 7498 */ 7499 if ((status == ICE_ERR_AQ_ERROR) && 7500 (hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)) { 7501 device_printf(dev, 7502 "%s: Setting will be applied when media is inserted\n", 7503 __func__); 7504 return (0); 7505 } else { 7506 device_printf(dev, 7507 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 7508 __func__, ice_status_str(status), 7509 ice_aq_str(hw->adminq.sq_last_status)); 7510 return (EIO); 7511 } 7512 } 7513 7514 return (0); 7515 } 7516 7517 /** 7518 * ice_print_ldo_tlv - Print out LDO TLV information 7519 * @sc: device private structure 7520 * @tlv: LDO TLV information from the adapter NVM 7521 * 7522 * Dump out the information in tlv to the kernel message buffer; intended for 7523 * debugging purposes. 7524 */ 7525 static void 7526 ice_print_ldo_tlv(struct ice_softc *sc, struct ice_link_default_override_tlv *tlv) 7527 { 7528 device_t dev = sc->dev; 7529 7530 device_printf(dev, "TLV: -options 0x%02x\n", tlv->options); 7531 device_printf(dev, " -phy_config 0x%02x\n", tlv->phy_config); 7532 device_printf(dev, " -fec_options 0x%02x\n", tlv->fec_options); 7533 device_printf(dev, " -phy_high 0x%016llx\n", 7534 (unsigned long long)tlv->phy_type_high); 7535 device_printf(dev, " -phy_low 0x%016llx\n", 7536 (unsigned long long)tlv->phy_type_low); 7537 } 7538 7539 /** 7540 * ice_set_link_management_mode -- Strict or lenient link management 7541 * @sc: device private structure 7542 * 7543 * Some NVMs give the adapter the option to advertise a superset of link 7544 * configurations. This checks to see if that option is enabled. 7545 * Further, the NVM could also provide a specific set of configurations 7546 * to try; these are cached in the driver's private structure if they 7547 * are available. 7548 */ 7549 void 7550 ice_set_link_management_mode(struct ice_softc *sc) 7551 { 7552 struct ice_port_info *pi = sc->hw.port_info; 7553 device_t dev = sc->dev; 7554 struct ice_link_default_override_tlv tlv = { 0 }; 7555 enum ice_status status; 7556 7557 /* Port must be in strict mode if FW version is below a certain 7558 * version. (i.e. Don't set lenient mode features) 7559 */ 7560 if (!(ice_fw_supports_link_override(&sc->hw))) 7561 return; 7562 7563 status = ice_get_link_default_override(&tlv, pi); 7564 if (status != ICE_SUCCESS) { 7565 device_printf(dev, 7566 "%s: ice_get_link_default_override failed; status %s, aq_err %s\n", 7567 __func__, ice_status_str(status), 7568 ice_aq_str(sc->hw.adminq.sq_last_status)); 7569 return; 7570 } 7571 7572 if (sc->hw.debug_mask & ICE_DBG_LINK) 7573 ice_print_ldo_tlv(sc, &tlv); 7574 7575 /* Set lenient link mode */ 7576 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LENIENT_LINK_MODE) && 7577 (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE))) 7578 ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_en); 7579 7580 /* FW supports reporting a default configuration */ 7581 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LINK_MGMT_VER_2) && 7582 ice_fw_supports_report_dflt_cfg(&sc->hw)) { 7583 ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_2, sc->feat_en); 7584 /* Knowing we're at a high enough firmware revision to 7585 * support this link management configuration, we don't 7586 * need to check/support earlier versions. 7587 */ 7588 return; 7589 } 7590 7591 /* Default overrides only work if in lenient link mode */ 7592 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LINK_MGMT_VER_1) && 7593 ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE) && 7594 (tlv.options & ICE_LINK_OVERRIDE_EN)) 7595 ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_1, sc->feat_en); 7596 7597 /* Cache the LDO TLV structure in the driver, since it 7598 * won't change during the driver's lifetime. 7599 */ 7600 sc->ldo_tlv = tlv; 7601 } 7602 7603 /** 7604 * ice_init_saved_phy_cfg -- Set cached user PHY cfg settings with NVM defaults 7605 * @sc: device private structure 7606 * 7607 * This should be called before the tunables for these link settings 7608 * (e.g. advertise_speed) are added -- so that these defaults don't overwrite 7609 * the cached values that the sysctl handlers will write. 7610 * 7611 * This also needs to be called before ice_init_link_configuration, to ensure 7612 * that there are sane values that can be written if there is media available 7613 * in the port. 7614 */ 7615 void 7616 ice_init_saved_phy_cfg(struct ice_softc *sc) 7617 { 7618 struct ice_port_info *pi = sc->hw.port_info; 7619 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 7620 struct ice_hw *hw = &sc->hw; 7621 device_t dev = sc->dev; 7622 enum ice_status status; 7623 u64 phy_low, phy_high; 7624 u8 report_mode = ICE_AQC_REPORT_TOPO_CAP_MEDIA; 7625 7626 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LINK_MGMT_VER_2)) 7627 report_mode = ICE_AQC_REPORT_DFLT_CFG; 7628 status = ice_aq_get_phy_caps(pi, false, report_mode, &pcaps, NULL); 7629 if (status != ICE_SUCCESS) { 7630 device_printf(dev, 7631 "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", 7632 __func__, 7633 report_mode == ICE_AQC_REPORT_DFLT_CFG ? "DFLT" : "w/MEDIA", 7634 ice_status_str(status), 7635 ice_aq_str(hw->adminq.sq_last_status)); 7636 return; 7637 } 7638 7639 phy_low = le64toh(pcaps.phy_type_low); 7640 phy_high = le64toh(pcaps.phy_type_high); 7641 7642 /* Save off initial config parameters */ 7643 pi->phy.curr_user_speed_req = 7644 ice_aq_phy_types_to_link_speeds(phy_low, phy_high); 7645 pi->phy.curr_user_fec_req = ice_caps_to_fec_mode(pcaps.caps, 7646 pcaps.link_fec_options); 7647 pi->phy.curr_user_fc_req = ice_caps_to_fc_mode(pcaps.caps); 7648 } 7649 7650 /** 7651 * ice_module_init - Driver callback to handle module load 7652 * 7653 * Callback for handling module load events. This function should initialize 7654 * any data structures that are used for the life of the device driver. 7655 */ 7656 static int 7657 ice_module_init(void) 7658 { 7659 return (0); 7660 } 7661 7662 /** 7663 * ice_module_exit - Driver callback to handle module exit 7664 * 7665 * Callback for handling module unload events. This function should release 7666 * any resources initialized during ice_module_init. 7667 * 7668 * If this function returns non-zero, the module will not be unloaded. It 7669 * should only return such a value if the module cannot be unloaded at all, 7670 * such as due to outstanding memory references that cannot be revoked. 7671 */ 7672 static int 7673 ice_module_exit(void) 7674 { 7675 return (0); 7676 } 7677 7678 /** 7679 * ice_module_event_handler - Callback for module events 7680 * @mod: unused module_t parameter 7681 * @what: the event requested 7682 * @arg: unused event argument 7683 * 7684 * Callback used to handle module events from the stack. Used to allow the 7685 * driver to define custom behavior that should happen at module load and 7686 * unload. 7687 */ 7688 int 7689 ice_module_event_handler(module_t __unused mod, int what, void __unused *arg) 7690 { 7691 switch (what) { 7692 case MOD_LOAD: 7693 return ice_module_init(); 7694 case MOD_UNLOAD: 7695 return ice_module_exit(); 7696 default: 7697 /* TODO: do we need to handle MOD_QUIESCE and MOD_SHUTDOWN? */ 7698 return (EOPNOTSUPP); 7699 } 7700 } 7701 7702 /** 7703 * ice_handle_nvm_access_ioctl - Handle an NVM access ioctl request 7704 * @sc: the device private softc 7705 * @ifd: ifdrv ioctl request pointer 7706 */ 7707 int 7708 ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd) 7709 { 7710 union ice_nvm_access_data *data; 7711 struct ice_nvm_access_cmd *cmd; 7712 size_t ifd_len = ifd->ifd_len, malloc_len; 7713 struct ice_hw *hw = &sc->hw; 7714 device_t dev = sc->dev; 7715 enum ice_status status; 7716 u8 *nvm_buffer; 7717 int err; 7718 7719 /* 7720 * ifioctl forwards SIOCxDRVSPEC to iflib without performing 7721 * a privilege check. In turn, iflib forwards the ioctl to the driver 7722 * without performing a privilege check. Perform one here to ensure 7723 * that non-privileged threads cannot access this interface. 7724 */ 7725 err = priv_check(curthread, PRIV_DRIVER); 7726 if (err) 7727 return (err); 7728 7729 if (ifd_len < sizeof(struct ice_nvm_access_cmd)) { 7730 device_printf(dev, "%s: ifdrv length is too small. Got %zu, but expected %zu\n", 7731 __func__, ifd_len, sizeof(struct ice_nvm_access_cmd)); 7732 return (EINVAL); 7733 } 7734 7735 if (ifd->ifd_data == NULL) { 7736 device_printf(dev, "%s: ifd data buffer not present.\n", 7737 __func__); 7738 return (EINVAL); 7739 } 7740 7741 /* 7742 * If everything works correctly, ice_handle_nvm_access should not 7743 * modify data past the size of the ioctl length. However, it could 7744 * lead to memory corruption if it did. Make sure to allocate at least 7745 * enough space for the command and data regardless. This 7746 * ensures that any access to the data union will not access invalid 7747 * memory. 7748 */ 7749 malloc_len = max(ifd_len, sizeof(*data) + sizeof(*cmd)); 7750 7751 nvm_buffer = (u8 *)malloc(malloc_len, M_ICE, M_ZERO | M_WAITOK); 7752 if (!nvm_buffer) 7753 return (ENOMEM); 7754 7755 /* Copy the NVM access command and data in from user space */ 7756 /* coverity[tainted_data_argument] */ 7757 err = copyin(ifd->ifd_data, nvm_buffer, ifd_len); 7758 if (err) { 7759 device_printf(dev, "%s: Copying request from user space failed, err %s\n", 7760 __func__, ice_err_str(err)); 7761 goto cleanup_free_nvm_buffer; 7762 } 7763 7764 /* 7765 * The NVM command structure is immediately followed by data which 7766 * varies in size based on the command. 7767 */ 7768 cmd = (struct ice_nvm_access_cmd *)nvm_buffer; 7769 data = (union ice_nvm_access_data *)(nvm_buffer + sizeof(struct ice_nvm_access_cmd)); 7770 7771 /* Handle the NVM access request */ 7772 status = ice_handle_nvm_access(hw, cmd, data); 7773 if (status) 7774 ice_debug(hw, ICE_DBG_NVM, 7775 "NVM access request failed, err %s\n", 7776 ice_status_str(status)); 7777 7778 /* Copy the possibly modified contents of the handled request out */ 7779 err = copyout(nvm_buffer, ifd->ifd_data, ifd_len); 7780 if (err) { 7781 device_printf(dev, "%s: Copying response back to user space failed, err %s\n", 7782 __func__, ice_err_str(err)); 7783 goto cleanup_free_nvm_buffer; 7784 } 7785 7786 /* Convert private status to an error code for proper ioctl response */ 7787 switch (status) { 7788 case ICE_SUCCESS: 7789 err = (0); 7790 break; 7791 case ICE_ERR_NO_MEMORY: 7792 err = (ENOMEM); 7793 break; 7794 case ICE_ERR_OUT_OF_RANGE: 7795 err = (ENOTTY); 7796 break; 7797 case ICE_ERR_PARAM: 7798 default: 7799 err = (EINVAL); 7800 break; 7801 } 7802 7803 cleanup_free_nvm_buffer: 7804 free(nvm_buffer, M_ICE); 7805 return err; 7806 } 7807 7808 /** 7809 * ice_read_sff_eeprom - Read data from SFF eeprom 7810 * @sc: device softc 7811 * @dev_addr: I2C device address (typically 0xA0 or 0xA2) 7812 * @offset: offset into the eeprom 7813 * @data: pointer to data buffer to store read data in 7814 * @length: length to read; max length is 16 7815 * 7816 * Read from the SFF eeprom in the module for this PF's port. For more details 7817 * on the contents of an SFF eeprom, refer to SFF-8724 (SFP), SFF-8636 (QSFP), 7818 * and SFF-8024 (both). 7819 */ 7820 int 7821 ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length) 7822 { 7823 struct ice_hw *hw = &sc->hw; 7824 int ret = 0, retries = 0; 7825 enum ice_status status; 7826 7827 if (length > 16) 7828 return (EINVAL); 7829 7830 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) 7831 return (ENOSYS); 7832 7833 if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) 7834 return (ENXIO); 7835 7836 do { 7837 status = ice_aq_sff_eeprom(hw, 0, dev_addr, 7838 offset, 0, 0, data, length, 7839 false, NULL); 7840 if (!status) { 7841 ret = 0; 7842 break; 7843 } 7844 if (status == ICE_ERR_AQ_ERROR && 7845 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 7846 ret = EBUSY; 7847 continue; 7848 } 7849 if (status == ICE_ERR_AQ_ERROR && 7850 hw->adminq.sq_last_status == ICE_AQ_RC_EACCES) { 7851 /* FW says I2C access isn't supported */ 7852 ret = EACCES; 7853 break; 7854 } 7855 if (status == ICE_ERR_AQ_ERROR && 7856 hw->adminq.sq_last_status == ICE_AQ_RC_EPERM) { 7857 device_printf(sc->dev, 7858 "%s: Module pointer location specified in command does not permit the required operation.\n", 7859 __func__); 7860 ret = EPERM; 7861 break; 7862 } else { 7863 device_printf(sc->dev, 7864 "%s: Error reading I2C data: err %s aq_err %s\n", 7865 __func__, ice_status_str(status), 7866 ice_aq_str(hw->adminq.sq_last_status)); 7867 ret = EIO; 7868 break; 7869 } 7870 } while (retries++ < ICE_I2C_MAX_RETRIES); 7871 7872 if (ret == EBUSY) 7873 device_printf(sc->dev, 7874 "%s: Error reading I2C data after %d retries\n", 7875 __func__, ICE_I2C_MAX_RETRIES); 7876 7877 return (ret); 7878 } 7879 7880 /** 7881 * ice_handle_i2c_req - Driver independent I2C request handler 7882 * @sc: device softc 7883 * @req: The I2C parameters to use 7884 * 7885 * Read from the port's I2C eeprom using the parameters from the ioctl. 7886 */ 7887 int 7888 ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req) 7889 { 7890 return ice_read_sff_eeprom(sc, req->dev_addr, req->offset, req->data, req->len); 7891 } 7892 7893 /** 7894 * ice_sysctl_read_i2c_diag_data - Read some module diagnostic data via i2c 7895 * @oidp: sysctl oid structure 7896 * @arg1: pointer to private data structure 7897 * @arg2: unused 7898 * @req: sysctl request pointer 7899 * 7900 * Read 8 bytes of diagnostic data from the SFF eeprom in the (Q)SFP module 7901 * inserted into the port. 7902 * 7903 * | SFP A2 | QSFP Lower Page 7904 * ------------|---------|---------------- 7905 * Temperature | 96-97 | 22-23 7906 * Vcc | 98-99 | 26-27 7907 * TX power | 102-103 | 34-35..40-41 7908 * RX power | 104-105 | 50-51..56-57 7909 */ 7910 static int 7911 ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 7912 { 7913 struct ice_softc *sc = (struct ice_softc *)arg1; 7914 device_t dev = sc->dev; 7915 struct sbuf *sbuf; 7916 int ret; 7917 u8 data[16]; 7918 7919 UNREFERENCED_PARAMETER(arg2); 7920 UNREFERENCED_PARAMETER(oidp); 7921 7922 if (ice_driver_is_detaching(sc)) 7923 return (ESHUTDOWN); 7924 7925 if (req->oldptr == NULL) { 7926 ret = SYSCTL_OUT(req, 0, 128); 7927 return (ret); 7928 } 7929 7930 ret = ice_read_sff_eeprom(sc, 0xA0, 0, data, 1); 7931 if (ret) 7932 return (ret); 7933 7934 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 7935 if (data[0] == 0x3) { 7936 /* 7937 * Check for: 7938 * - Internally calibrated data 7939 * - Diagnostic monitoring is implemented 7940 */ 7941 ice_read_sff_eeprom(sc, 0xA0, 92, data, 1); 7942 if (!(data[0] & 0x60)) { 7943 device_printf(dev, "Module doesn't support diagnostics: 0xA0[92] = %02X\n", data[0]); 7944 return (ENODEV); 7945 } 7946 7947 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 7948 7949 ice_read_sff_eeprom(sc, 0xA2, 96, data, 4); 7950 for (int i = 0; i < 4; i++) 7951 sbuf_printf(sbuf, "%02X ", data[i]); 7952 7953 ice_read_sff_eeprom(sc, 0xA2, 102, data, 4); 7954 for (int i = 0; i < 4; i++) 7955 sbuf_printf(sbuf, "%02X ", data[i]); 7956 } else if (data[0] == 0xD || data[0] == 0x11) { 7957 /* 7958 * QSFP+ modules are always internally calibrated, and must indicate 7959 * what types of diagnostic monitoring are implemented 7960 */ 7961 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 7962 7963 ice_read_sff_eeprom(sc, 0xA0, 22, data, 2); 7964 for (int i = 0; i < 2; i++) 7965 sbuf_printf(sbuf, "%02X ", data[i]); 7966 7967 ice_read_sff_eeprom(sc, 0xA0, 26, data, 2); 7968 for (int i = 0; i < 2; i++) 7969 sbuf_printf(sbuf, "%02X ", data[i]); 7970 7971 ice_read_sff_eeprom(sc, 0xA0, 34, data, 2); 7972 for (int i = 0; i < 2; i++) 7973 sbuf_printf(sbuf, "%02X ", data[i]); 7974 7975 ice_read_sff_eeprom(sc, 0xA0, 50, data, 2); 7976 for (int i = 0; i < 2; i++) 7977 sbuf_printf(sbuf, "%02X ", data[i]); 7978 } else { 7979 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", data[0]); 7980 return (ENODEV); 7981 } 7982 7983 sbuf_finish(sbuf); 7984 sbuf_delete(sbuf); 7985 7986 return (0); 7987 } 7988 7989 /** 7990 * ice_alloc_intr_tracking - Setup interrupt tracking structures 7991 * @sc: device softc structure 7992 * 7993 * Sets up the resource manager for keeping track of interrupt allocations, 7994 * and initializes the tracking maps for the PF's interrupt allocations. 7995 * 7996 * Unlike the scheme for queues, this is done in one step since both the 7997 * manager and the maps both have the same lifetime. 7998 * 7999 * @returns 0 on success, or an error code on failure. 8000 */ 8001 int 8002 ice_alloc_intr_tracking(struct ice_softc *sc) 8003 { 8004 struct ice_hw *hw = &sc->hw; 8005 device_t dev = sc->dev; 8006 int err; 8007 8008 /* Initialize the interrupt allocation manager */ 8009 err = ice_resmgr_init_contig_only(&sc->imgr, 8010 hw->func_caps.common_cap.num_msix_vectors); 8011 if (err) { 8012 device_printf(dev, "Unable to initialize PF interrupt manager: %s\n", 8013 ice_err_str(err)); 8014 return (err); 8015 } 8016 8017 /* Allocate PF interrupt mapping storage */ 8018 if (!(sc->pf_imap = 8019 (u16 *)malloc(sizeof(u16) * hw->func_caps.common_cap.num_msix_vectors, 8020 M_ICE, M_NOWAIT))) { 8021 device_printf(dev, "Unable to allocate PF imap memory\n"); 8022 err = ENOMEM; 8023 goto free_imgr; 8024 } 8025 for (u32 i = 0; i < hw->func_caps.common_cap.num_msix_vectors; i++) { 8026 sc->pf_imap[i] = ICE_INVALID_RES_IDX; 8027 } 8028 8029 return (0); 8030 8031 free_imgr: 8032 ice_resmgr_destroy(&sc->imgr); 8033 return (err); 8034 } 8035 8036 /** 8037 * ice_free_intr_tracking - Free PF interrupt tracking structures 8038 * @sc: device softc structure 8039 * 8040 * Frees the interrupt resource allocation manager and the PF's owned maps. 8041 * 8042 * VF maps are released when the owning VF's are destroyed, which should always 8043 * happen before this function is called. 8044 */ 8045 void 8046 ice_free_intr_tracking(struct ice_softc *sc) 8047 { 8048 if (sc->pf_imap) { 8049 ice_resmgr_release_map(&sc->imgr, sc->pf_imap, 8050 sc->lan_vectors); 8051 free(sc->pf_imap, M_ICE); 8052 sc->pf_imap = NULL; 8053 } 8054 8055 ice_resmgr_destroy(&sc->imgr); 8056 } 8057 8058 /** 8059 * ice_apply_supported_speed_filter - Mask off unsupported speeds 8060 * @report_speeds: bit-field for the desired link speeds 8061 * 8062 * Given a bitmap of the desired lenient mode link speeds, 8063 * this function will mask off the speeds that are not currently 8064 * supported by the device. 8065 */ 8066 static u16 8067 ice_apply_supported_speed_filter(u16 report_speeds) 8068 { 8069 u16 speed_mask; 8070 8071 /* We won't offer anything lower than 1G for any part, 8072 * but we also won't offer anything under 25G for 100G 8073 * parts or under 10G for 50G parts. 8074 */ 8075 speed_mask = ~((u16)ICE_AQ_LINK_SPEED_1000MB - 1); 8076 if (report_speeds & ICE_AQ_LINK_SPEED_50GB) 8077 speed_mask = ~((u16)ICE_AQ_LINK_SPEED_10GB - 1); 8078 if (report_speeds & ICE_AQ_LINK_SPEED_100GB) 8079 speed_mask = ~((u16)ICE_AQ_LINK_SPEED_25GB - 1); 8080 return (report_speeds & speed_mask); 8081 } 8082 8083 /** 8084 * ice_init_health_events - Enable FW health event reporting 8085 * @sc: device softc 8086 * 8087 * Will try to enable firmware health event reporting, but shouldn't 8088 * cause any grief (to the caller) if this fails. 8089 */ 8090 void 8091 ice_init_health_events(struct ice_softc *sc) 8092 { 8093 enum ice_status status; 8094 u8 health_mask; 8095 8096 if ((!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_HEALTH_STATUS)) || 8097 (!sc->enable_health_events)) 8098 return; 8099 8100 health_mask = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK | 8101 ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK; 8102 8103 status = ice_aq_set_health_status_config(&sc->hw, health_mask, NULL); 8104 if (status) 8105 device_printf(sc->dev, 8106 "Failed to enable firmware health events, err %s aq_err %s\n", 8107 ice_status_str(status), 8108 ice_aq_str(sc->hw.adminq.sq_last_status)); 8109 else 8110 ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_en); 8111 } 8112 8113 /** 8114 * ice_print_health_status_string - Print message for given FW health event 8115 * @dev: the PCIe device 8116 * @elem: health status element containing status code 8117 * 8118 * A rather large list of possible health status codes and their associated 8119 * messages. 8120 */ 8121 static void 8122 ice_print_health_status_string(device_t dev, 8123 struct ice_aqc_health_status_elem *elem) 8124 { 8125 u16 status_code = le16toh(elem->health_status_code); 8126 8127 switch (status_code) { 8128 case ICE_AQC_HEALTH_STATUS_INFO_RECOVERY: 8129 device_printf(dev, "The device is in firmware recovery mode.\n"); 8130 device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); 8131 break; 8132 case ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS: 8133 device_printf(dev, "The flash chip cannot be accessed.\n"); 8134 device_printf(dev, "Possible Solution: If issue persists, call customer support.\n"); 8135 break; 8136 case ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH: 8137 device_printf(dev, "NVM authentication failed.\n"); 8138 device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); 8139 break; 8140 case ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH: 8141 device_printf(dev, "Option ROM authentication failed.\n"); 8142 device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); 8143 break; 8144 case ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH: 8145 device_printf(dev, "DDP package failed.\n"); 8146 device_printf(dev, "Possible Solution: Update to latest base driver and DDP package.\n"); 8147 break; 8148 case ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT: 8149 device_printf(dev, "NVM image is incompatible.\n"); 8150 device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); 8151 break; 8152 case ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT: 8153 device_printf(dev, "Option ROM is incompatible.\n"); 8154 device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); 8155 break; 8156 case ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB: 8157 device_printf(dev, "Supplied MIB file is invalid. DCB reverted to default configuration.\n"); 8158 device_printf(dev, "Possible Solution: Disable FW-LLDP and check DCBx system configuration.\n"); 8159 break; 8160 case ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT: 8161 device_printf(dev, "An unsupported module was detected.\n"); 8162 device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); 8163 device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); 8164 break; 8165 case ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE: 8166 device_printf(dev, "Module type is not supported.\n"); 8167 device_printf(dev, "Possible Solution: Change or replace the module or cable.\n"); 8168 break; 8169 case ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL: 8170 device_printf(dev, "Module is not qualified.\n"); 8171 device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); 8172 device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); 8173 device_printf(dev, "Possible Solution 3: Manually set speed and duplex.\n"); 8174 break; 8175 case ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM: 8176 device_printf(dev, "Device cannot communicate with the module.\n"); 8177 device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); 8178 device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); 8179 device_printf(dev, "Possible Solution 3: Manually set speed and duplex.\n"); 8180 break; 8181 case ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT: 8182 device_printf(dev, "Unresolved module conflict.\n"); 8183 device_printf(dev, "Possible Solution 1: Manually set speed/duplex or use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); 8184 device_printf(dev, "Possible Solution 2: If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.\n"); 8185 break; 8186 case ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT: 8187 device_printf(dev, "Module is not present.\n"); 8188 device_printf(dev, "Possible Solution 1: Check that the module is inserted correctly.\n"); 8189 device_printf(dev, "Possible Solution 2: If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.\n"); 8190 break; 8191 case ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED: 8192 device_printf(dev, "Underutilized module.\n"); 8193 device_printf(dev, "Possible Solution 1: Change or replace the module or cable.\n"); 8194 device_printf(dev, "Possible Solution 2: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); 8195 break; 8196 case ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT: 8197 device_printf(dev, "An unsupported module was detected.\n"); 8198 device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); 8199 device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); 8200 device_printf(dev, "Possible Solution 3: Manually set speed and duplex.\n"); 8201 break; 8202 case ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG: 8203 device_printf(dev, "Invalid link configuration.\n"); 8204 break; 8205 case ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS: 8206 device_printf(dev, "Port hardware access error.\n"); 8207 device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); 8208 break; 8209 case ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE: 8210 device_printf(dev, "A port is unreachable.\n"); 8211 device_printf(dev, "Possible Solution 1: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); 8212 device_printf(dev, "Possible Solution 2: Update to the latest NVM image.\n"); 8213 break; 8214 case ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED: 8215 device_printf(dev, "Port speed is limited due to module.\n"); 8216 device_printf(dev, "Possible Solution: Change the module or use Intel(R) Ethernet Port Configuration Tool to configure the port option to match the current module speed.\n"); 8217 break; 8218 case ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT: 8219 device_printf(dev, "A parallel fault was detected.\n"); 8220 device_printf(dev, "Possible Solution: Check link partner connection and configuration.\n"); 8221 break; 8222 case ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED: 8223 device_printf(dev, "Port speed is limited by PHY capabilities.\n"); 8224 device_printf(dev, "Possible Solution 1: Change the module to align to port option.\n"); 8225 device_printf(dev, "Possible Solution 2: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); 8226 break; 8227 case ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO: 8228 device_printf(dev, "LOM topology netlist is corrupted.\n"); 8229 device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); 8230 break; 8231 case ICE_AQC_HEALTH_STATUS_ERR_NETLIST: 8232 device_printf(dev, "Unrecoverable netlist error.\n"); 8233 device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); 8234 break; 8235 case ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT: 8236 device_printf(dev, "Port topology conflict.\n"); 8237 device_printf(dev, "Possible Solution 1: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); 8238 device_printf(dev, "Possible Solution 2: Update to the latest NVM image.\n"); 8239 break; 8240 case ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS: 8241 device_printf(dev, "Unrecoverable hardware access error.\n"); 8242 device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); 8243 break; 8244 case ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME: 8245 device_printf(dev, "Unrecoverable runtime error.\n"); 8246 device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); 8247 break; 8248 case ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT: 8249 device_printf(dev, "Link management engine failed to initialize.\n"); 8250 device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); 8251 break; 8252 default: 8253 break; 8254 } 8255 } 8256 8257 /** 8258 * ice_handle_health_status_event - helper function to output health status 8259 * @sc: device softc structure 8260 * @event: event received on a control queue 8261 * 8262 * Prints out the appropriate string based on the given Health Status Event 8263 * code. 8264 */ 8265 static void 8266 ice_handle_health_status_event(struct ice_softc *sc, 8267 struct ice_rq_event_info *event) 8268 { 8269 struct ice_aqc_health_status_elem *health_info; 8270 u16 status_count; 8271 int i; 8272 8273 if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_HEALTH_STATUS)) 8274 return; 8275 8276 health_info = (struct ice_aqc_health_status_elem *)event->msg_buf; 8277 status_count = le16toh(event->desc.params.get_health_status.health_status_count); 8278 8279 if (status_count > (event->buf_len / sizeof(*health_info))) { 8280 device_printf(sc->dev, "Received a health status event with invalid event count\n"); 8281 return; 8282 } 8283 8284 for (i = 0; i < status_count; i++) { 8285 ice_print_health_status_string(sc->dev, health_info); 8286 health_info++; 8287 } 8288 } 8289 8290 /** 8291 * ice_set_default_local_lldp_mib - Set Local LLDP MIB to default settings 8292 * @sc: device softc structure 8293 * 8294 * This function needs to be called after link up; it makes sure the FW 8295 * has certain PFC/DCB settings. This is intended to workaround a FW behavior 8296 * where these settings seem to be cleared on link up. 8297 */ 8298 void 8299 ice_set_default_local_lldp_mib(struct ice_softc *sc) 8300 { 8301 struct ice_dcbx_cfg *dcbcfg; 8302 struct ice_hw *hw = &sc->hw; 8303 struct ice_port_info *pi; 8304 device_t dev = sc->dev; 8305 enum ice_status status; 8306 8307 pi = hw->port_info; 8308 dcbcfg = &pi->qos_cfg.local_dcbx_cfg; 8309 8310 /* This value is only 3 bits; 8 TCs maps to 0 */ 8311 u8 maxtcs = hw->func_caps.common_cap.maxtc & ICE_IEEE_ETS_MAXTC_M; 8312 8313 /** 8314 * Setup the default settings used by the driver for the Set Local 8315 * LLDP MIB Admin Queue command (0x0A08). (1TC w/ 100% BW, ETS, no 8316 * PFC). 8317 */ 8318 memset(dcbcfg, 0, sizeof(*dcbcfg)); 8319 dcbcfg->etscfg.willing = 1; 8320 dcbcfg->etscfg.tcbwtable[0] = 100; 8321 dcbcfg->etscfg.maxtcs = maxtcs; 8322 dcbcfg->etsrec.willing = 1; 8323 dcbcfg->etsrec.tcbwtable[0] = 100; 8324 dcbcfg->etsrec.maxtcs = maxtcs; 8325 dcbcfg->pfc.willing = 1; 8326 dcbcfg->pfc.pfccap = maxtcs; 8327 8328 status = ice_set_dcb_cfg(pi); 8329 8330 if (status) 8331 device_printf(dev, 8332 "Error setting Local LLDP MIB: %s aq_err %s\n", 8333 ice_status_str(status), 8334 ice_aq_str(hw->adminq.sq_last_status)); 8335 } 8336