1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2021, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 /** 34 * @file ice_lib.c 35 * @brief Generic device setup and sysctl functions 36 * 37 * Library of generic device functions not specific to the networking stack. 38 * 39 * This includes hardware initialization functions, as well as handlers for 40 * many of the device sysctls used to probe driver status or tune specific 41 * behaviors. 42 */ 43 44 #include "ice_lib.h" 45 #include "ice_iflib.h" 46 #include <dev/pci/pcivar.h> 47 #include <dev/pci/pcireg.h> 48 #include <machine/resource.h> 49 #include <net/if_dl.h> 50 #include <sys/firmware.h> 51 #include <sys/priv.h> 52 53 /** 54 * @var M_ICE 55 * @brief main ice driver allocation type 56 * 57 * malloc(9) allocation type used by the majority of memory allocations in the 58 * ice driver. 59 */ 60 MALLOC_DEFINE(M_ICE, "ice", "Intel(R) 100Gb Network Driver lib allocations"); 61 62 /* 63 * Helper function prototypes 64 */ 65 static int ice_get_next_vsi(struct ice_vsi **all_vsi, int size); 66 static void ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx); 67 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type); 68 static int ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx); 69 static int ice_setup_tx_ctx(struct ice_tx_queue *txq, 70 struct ice_tlan_ctx *tlan_ctx, u16 pf_q); 71 static int ice_setup_rx_ctx(struct ice_rx_queue *rxq); 72 static int ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg); 73 static void ice_free_fltr_list(struct ice_list_head *list); 74 static int ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list, 75 const u8 *addr, enum ice_sw_fwd_act_type action); 76 static void ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname, 77 struct ice_ctl_q_info *cq); 78 static void ice_process_link_event(struct ice_softc *sc, struct ice_rq_event_info *e); 79 static void ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, 80 struct ice_rq_event_info *event); 81 static void ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf); 82 static void ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf); 83 static void ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf); 84 static bool ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info); 85 static u_int ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, u_int errors); 86 static void ice_add_debug_tunables(struct ice_softc *sc); 87 static void ice_add_debug_sysctls(struct ice_softc *sc); 88 static void ice_vsi_set_rss_params(struct ice_vsi *vsi); 89 static void ice_get_default_rss_key(u8 *seed); 90 static int ice_set_rss_key(struct ice_vsi *vsi); 91 static int ice_set_rss_lut(struct ice_vsi *vsi); 92 static void ice_set_rss_flow_flds(struct ice_vsi *vsi); 93 static void ice_clean_vsi_rss_cfg(struct ice_vsi *vsi); 94 static const char *ice_aq_speed_to_str(struct ice_port_info *pi); 95 static const char *ice_requested_fec_mode(struct ice_port_info *pi); 96 static const char *ice_negotiated_fec_mode(struct ice_port_info *pi); 97 static const char *ice_autoneg_mode(struct ice_port_info *pi); 98 static const char *ice_flowcontrol_mode(struct ice_port_info *pi); 99 static void ice_print_bus_link_data(device_t dev, struct ice_hw *hw); 100 static void ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status); 101 static uint8_t ice_pcie_bandwidth_check(struct ice_softc *sc); 102 static uint64_t ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed); 103 static int ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width); 104 static uint64_t ice_phy_types_to_max_rate(struct ice_port_info *pi); 105 static void ice_add_sysctls_sw_stats(struct ice_vsi *vsi, 106 struct sysctl_ctx_list *ctx, 107 struct sysctl_oid *parent); 108 static void ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi, 109 enum ice_vsi_type type, int idx, 110 bool dynamic); 111 static void ice_handle_mib_change_event(struct ice_softc *sc, 112 struct ice_rq_event_info *event); 113 static void 114 ice_handle_lan_overflow_event(struct ice_softc *sc, 115 struct ice_rq_event_info *event); 116 static int ice_add_ethertype_to_list(struct ice_vsi *vsi, 117 struct ice_list_head *list, 118 u16 ethertype, u16 direction, 119 enum ice_sw_fwd_act_type action); 120 static void ice_add_rx_lldp_filter(struct ice_softc *sc); 121 static void ice_del_rx_lldp_filter(struct ice_softc *sc); 122 static u16 ice_aq_phy_types_to_sysctl_speeds(u64 phy_type_low, 123 u64 phy_type_high); 124 static void 125 ice_apply_saved_phy_req_to_cfg(struct ice_port_info *pi, 126 struct ice_aqc_get_phy_caps_data *pcaps, 127 struct ice_aqc_set_phy_cfg_data *cfg); 128 static void 129 ice_apply_saved_fec_req_to_cfg(struct ice_port_info *pi, 130 struct ice_aqc_get_phy_caps_data *pcaps, 131 struct ice_aqc_set_phy_cfg_data *cfg); 132 static void 133 ice_apply_saved_user_req_to_cfg(struct ice_port_info *pi, 134 struct ice_aqc_get_phy_caps_data *pcaps, 135 struct ice_aqc_set_phy_cfg_data *cfg); 136 static void 137 ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, 138 struct ice_aqc_set_phy_cfg_data *cfg); 139 static void 140 ice_print_ldo_tlv(struct ice_softc *sc, 141 struct ice_link_default_override_tlv *tlv); 142 static void 143 ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, 144 u64 *phy_type_high); 145 static int 146 ice_intersect_media_types_with_caps(struct ice_softc *sc, u16 sysctl_speeds, 147 u64 *phy_type_low, u64 *phy_type_high); 148 static int 149 ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low, 150 u64 *phy_type_high); 151 static void 152 ice_apply_supported_speed_filter(u64 *phy_type_low, u64 *phy_type_high); 153 static enum ice_status 154 ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high); 155 156 static int ice_module_init(void); 157 static int ice_module_exit(void); 158 159 /* 160 * package version comparison functions 161 */ 162 static bool pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name); 163 static int pkg_ver_compatible(struct ice_pkg_ver *pkg_ver); 164 165 /* 166 * dynamic sysctl handlers 167 */ 168 static int ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 169 static int ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS); 170 static int ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS); 171 static int ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS); 172 static int ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS); 173 static int ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS); 174 static int ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS); 175 static int ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 176 static int ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS); 177 static int ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS); 178 static int ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS); 179 static int ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS); 180 static int ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS); 181 static int ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS); 182 static int ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS); 183 static int ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS); 184 static int __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, 185 bool is_phy_type_high); 186 static int ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS); 187 static int ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS); 188 static int ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS); 189 static int ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS); 190 static int ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS); 191 static int ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS); 192 static int ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode); 193 static int ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS); 194 static int ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS); 195 static int ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS); 196 static int ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS); 197 static int ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 198 static int ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS); 199 static int ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS); 200 static int ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS); 201 202 /** 203 * ice_map_bar - Map PCIe BAR memory 204 * @dev: the PCIe device 205 * @bar: the BAR info structure 206 * @bar_num: PCIe BAR number 207 * 208 * Maps the specified PCIe BAR. Stores the mapping data in struct 209 * ice_bar_info. 210 */ 211 int 212 ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num) 213 { 214 if (bar->res != NULL) { 215 device_printf(dev, "PCI BAR%d already mapped\n", bar_num); 216 return (EDOOFUS); 217 } 218 219 bar->rid = PCIR_BAR(bar_num); 220 bar->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar->rid, 221 RF_ACTIVE); 222 if (!bar->res) { 223 device_printf(dev, "PCI BAR%d mapping failed\n", bar_num); 224 return (ENXIO); 225 } 226 227 bar->tag = rman_get_bustag(bar->res); 228 bar->handle = rman_get_bushandle(bar->res); 229 bar->size = rman_get_size(bar->res); 230 231 return (0); 232 } 233 234 /** 235 * ice_free_bar - Free PCIe BAR memory 236 * @dev: the PCIe device 237 * @bar: the BAR info structure 238 * 239 * Frees the specified PCIe BAR, releasing its resources. 240 */ 241 void 242 ice_free_bar(device_t dev, struct ice_bar_info *bar) 243 { 244 if (bar->res != NULL) 245 bus_release_resource(dev, SYS_RES_MEMORY, bar->rid, bar->res); 246 bar->res = NULL; 247 } 248 249 /** 250 * ice_set_ctrlq_len - Configure ctrlq lengths for a device 251 * @hw: the device hardware structure 252 * 253 * Configures the control queues for the given device, setting up the 254 * specified lengths, prior to initializing hardware. 255 */ 256 void 257 ice_set_ctrlq_len(struct ice_hw *hw) 258 { 259 hw->adminq.num_rq_entries = ICE_AQ_LEN; 260 hw->adminq.num_sq_entries = ICE_AQ_LEN; 261 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 262 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 263 264 hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; 265 hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; 266 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 267 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 268 269 } 270 271 /** 272 * ice_get_next_vsi - Get the next available VSI slot 273 * @all_vsi: the VSI list 274 * @size: the size of the VSI list 275 * 276 * Returns the index to the first available VSI slot. Will return size (one 277 * past the last index) if there are no slots available. 278 */ 279 static int 280 ice_get_next_vsi(struct ice_vsi **all_vsi, int size) 281 { 282 int i; 283 284 for (i = 0; i < size; i++) { 285 if (all_vsi[i] == NULL) 286 return i; 287 } 288 289 return size; 290 } 291 292 /** 293 * ice_setup_vsi_common - Common VSI setup for both dynamic and static VSIs 294 * @sc: the device private softc structure 295 * @vsi: the VSI to setup 296 * @type: the VSI type of the new VSI 297 * @idx: the index in the all_vsi array to use 298 * @dynamic: whether this VSI memory was dynamically allocated 299 * 300 * Perform setup for a VSI that is common to both dynamically allocated VSIs 301 * and the static PF VSI which is embedded in the softc structure. 302 */ 303 static void 304 ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi, 305 enum ice_vsi_type type, int idx, bool dynamic) 306 { 307 /* Store important values in VSI struct */ 308 vsi->type = type; 309 vsi->sc = sc; 310 vsi->idx = idx; 311 sc->all_vsi[idx] = vsi; 312 vsi->dynamic = dynamic; 313 314 /* Setup the VSI tunables now */ 315 ice_add_vsi_tunables(vsi, sc->vsi_sysctls); 316 } 317 318 /** 319 * ice_alloc_vsi - Allocate a dynamic VSI 320 * @sc: device softc structure 321 * @type: VSI type 322 * 323 * Allocates a new dynamic VSI structure and inserts it into the VSI list. 324 */ 325 struct ice_vsi * 326 ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type) 327 { 328 struct ice_vsi *vsi; 329 int idx; 330 331 /* Find an open index for a new VSI to be allocated. If the returned 332 * index is >= the num_available_vsi then it means no slot is 333 * available. 334 */ 335 idx = ice_get_next_vsi(sc->all_vsi, sc->num_available_vsi); 336 if (idx >= sc->num_available_vsi) { 337 device_printf(sc->dev, "No available VSI slots\n"); 338 return NULL; 339 } 340 341 vsi = (struct ice_vsi *)malloc(sizeof(*vsi), M_ICE, M_WAITOK|M_ZERO); 342 if (!vsi) { 343 device_printf(sc->dev, "Unable to allocate VSI memory\n"); 344 return NULL; 345 } 346 347 ice_setup_vsi_common(sc, vsi, type, idx, true); 348 349 return vsi; 350 } 351 352 /** 353 * ice_setup_pf_vsi - Setup the PF VSI 354 * @sc: the device private softc 355 * 356 * Setup the PF VSI structure which is embedded as sc->pf_vsi in the device 357 * private softc. Unlike other VSIs, the PF VSI memory is allocated as part of 358 * the softc memory, instead of being dynamically allocated at creation. 359 */ 360 void 361 ice_setup_pf_vsi(struct ice_softc *sc) 362 { 363 ice_setup_vsi_common(sc, &sc->pf_vsi, ICE_VSI_PF, 0, false); 364 } 365 366 /** 367 * ice_alloc_vsi_qmap 368 * @vsi: VSI structure 369 * @max_tx_queues: Number of transmit queues to identify 370 * @max_rx_queues: Number of receive queues to identify 371 * 372 * Allocates a max_[t|r]x_queues array of words for the VSI where each 373 * word contains the index of the queue it represents. In here, all 374 * words are initialized to an index of ICE_INVALID_RES_IDX, indicating 375 * all queues for this VSI are not yet assigned an index and thus, 376 * not ready for use. 377 * 378 * Returns an error code on failure. 379 */ 380 int 381 ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues, 382 const int max_rx_queues) 383 { 384 struct ice_softc *sc = vsi->sc; 385 int i; 386 387 MPASS(max_tx_queues > 0); 388 MPASS(max_rx_queues > 0); 389 390 /* Allocate Tx queue mapping memory */ 391 if (!(vsi->tx_qmap = 392 (u16 *) malloc(sizeof(u16) * max_tx_queues, M_ICE, M_WAITOK))) { 393 device_printf(sc->dev, "Unable to allocate Tx qmap memory\n"); 394 return (ENOMEM); 395 } 396 397 /* Allocate Rx queue mapping memory */ 398 if (!(vsi->rx_qmap = 399 (u16 *) malloc(sizeof(u16) * max_rx_queues, M_ICE, M_WAITOK))) { 400 device_printf(sc->dev, "Unable to allocate Rx qmap memory\n"); 401 goto free_tx_qmap; 402 } 403 404 /* Mark every queue map as invalid to start with */ 405 for (i = 0; i < max_tx_queues; i++) { 406 vsi->tx_qmap[i] = ICE_INVALID_RES_IDX; 407 } 408 for (i = 0; i < max_rx_queues; i++) { 409 vsi->rx_qmap[i] = ICE_INVALID_RES_IDX; 410 } 411 412 return 0; 413 414 free_tx_qmap: 415 free(vsi->tx_qmap, M_ICE); 416 vsi->tx_qmap = NULL; 417 418 return (ENOMEM); 419 } 420 421 /** 422 * ice_free_vsi_qmaps - Free the PF qmaps associated with a VSI 423 * @vsi: the VSI private structure 424 * 425 * Frees the PF qmaps associated with the given VSI. Generally this will be 426 * called by ice_release_vsi, but may need to be called during attach cleanup, 427 * depending on when the qmaps were allocated. 428 */ 429 void 430 ice_free_vsi_qmaps(struct ice_vsi *vsi) 431 { 432 struct ice_softc *sc = vsi->sc; 433 434 if (vsi->tx_qmap) { 435 ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, 436 vsi->num_tx_queues); 437 free(vsi->tx_qmap, M_ICE); 438 vsi->tx_qmap = NULL; 439 } 440 441 if (vsi->rx_qmap) { 442 ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap, 443 vsi->num_rx_queues); 444 free(vsi->rx_qmap, M_ICE); 445 vsi->rx_qmap = NULL; 446 } 447 } 448 449 /** 450 * ice_set_default_vsi_ctx - Setup default VSI context parameters 451 * @ctx: the VSI context to initialize 452 * 453 * Initialize and prepare a default VSI context for configuring a new VSI. 454 */ 455 static void 456 ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx) 457 { 458 u32 table = 0; 459 460 memset(&ctx->info, 0, sizeof(ctx->info)); 461 /* VSI will be allocated from shared pool */ 462 ctx->alloc_from_pool = true; 463 /* Enable source pruning by default */ 464 ctx->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 465 /* Traffic from VSI can be sent to LAN */ 466 ctx->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 467 /* Allow all packets untagged/tagged */ 468 ctx->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & 469 ICE_AQ_VSI_VLAN_MODE_M) >> 470 ICE_AQ_VSI_VLAN_MODE_S); 471 /* Show VLAN/UP from packets in Rx descriptors */ 472 ctx->info.vlan_flags |= ((ICE_AQ_VSI_VLAN_EMOD_STR_BOTH & 473 ICE_AQ_VSI_VLAN_EMOD_M) >> 474 ICE_AQ_VSI_VLAN_EMOD_S); 475 /* Have 1:1 UP mapping for both ingress/egress tables */ 476 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 477 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 478 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 479 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 480 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 481 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 482 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 483 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 484 ctx->info.ingress_table = CPU_TO_LE32(table); 485 ctx->info.egress_table = CPU_TO_LE32(table); 486 /* Have 1:1 UP mapping for outer to inner UP table */ 487 ctx->info.outer_up_table = CPU_TO_LE32(table); 488 /* No Outer tag support, so outer_tag_flags remains zero */ 489 } 490 491 /** 492 * ice_set_rss_vsi_ctx - Setup VSI context parameters for RSS 493 * @ctx: the VSI context to configure 494 * @type: the VSI type 495 * 496 * Configures the VSI context for RSS, based on the VSI type. 497 */ 498 static void 499 ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type) 500 { 501 u8 lut_type, hash_type; 502 503 switch (type) { 504 case ICE_VSI_PF: 505 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 506 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 507 break; 508 case ICE_VSI_VF: 509 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 510 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 511 break; 512 default: 513 /* Other VSI types do not support RSS */ 514 return; 515 } 516 517 ctx->info.q_opt_rss = (((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 518 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 519 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & 520 ICE_AQ_VSI_Q_OPT_RSS_HASH_M)); 521 } 522 523 /** 524 * ice_setup_vsi_qmap - Setup the queue mapping for a VSI 525 * @vsi: the VSI to configure 526 * @ctx: the VSI context to configure 527 * 528 * Configures the context for the given VSI, setting up how the firmware 529 * should map the queues for this VSI. 530 */ 531 static int 532 ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 533 { 534 int pow = 0; 535 u16 qmap; 536 537 MPASS(vsi->rx_qmap != NULL); 538 539 /* TODO: 540 * Handle multiple Traffic Classes 541 * Handle scattered queues (for VFs) 542 */ 543 if (vsi->qmap_type != ICE_RESMGR_ALLOC_CONTIGUOUS) 544 return (EOPNOTSUPP); 545 546 ctx->info.mapping_flags |= CPU_TO_LE16(ICE_AQ_VSI_Q_MAP_CONTIG); 547 548 ctx->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]); 549 ctx->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues); 550 551 552 /* Calculate the next power-of-2 of number of queues */ 553 if (vsi->num_rx_queues) 554 pow = flsl(vsi->num_rx_queues - 1); 555 556 /* Assign all the queues to traffic class zero */ 557 qmap = (pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M; 558 ctx->info.tc_mapping[0] = CPU_TO_LE16(qmap); 559 560 return 0; 561 } 562 563 /** 564 * ice_initialize_vsi - Initialize a VSI for use 565 * @vsi: the vsi to initialize 566 * 567 * Initialize a VSI over the adminq and prepare it for operation. 568 */ 569 int 570 ice_initialize_vsi(struct ice_vsi *vsi) 571 { 572 struct ice_vsi_ctx ctx = { 0 }; 573 struct ice_hw *hw = &vsi->sc->hw; 574 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 575 enum ice_status status; 576 int err; 577 578 /* For now, we only have code supporting PF VSIs */ 579 switch (vsi->type) { 580 case ICE_VSI_PF: 581 ctx.flags = ICE_AQ_VSI_TYPE_PF; 582 break; 583 default: 584 return (ENODEV); 585 } 586 587 ice_set_default_vsi_ctx(&ctx); 588 ice_set_rss_vsi_ctx(&ctx, vsi->type); 589 590 /* XXX: VSIs of other types may need different port info? */ 591 ctx.info.sw_id = hw->port_info->sw_id; 592 593 /* Set some RSS parameters based on the VSI type */ 594 ice_vsi_set_rss_params(vsi); 595 596 /* Initialize the Rx queue mapping for this VSI */ 597 err = ice_setup_vsi_qmap(vsi, &ctx); 598 if (err) { 599 return err; 600 } 601 602 /* (Re-)add VSI to HW VSI handle list */ 603 status = ice_add_vsi(hw, vsi->idx, &ctx, NULL); 604 if (status != 0) { 605 device_printf(vsi->sc->dev, 606 "Add VSI AQ call failed, err %s aq_err %s\n", 607 ice_status_str(status), 608 ice_aq_str(hw->adminq.sq_last_status)); 609 return (EIO); 610 } 611 vsi->info = ctx.info; 612 613 /* TODO: DCB traffic class support? */ 614 max_txqs[0] = vsi->num_tx_queues; 615 616 status = ice_cfg_vsi_lan(hw->port_info, vsi->idx, 617 ICE_DFLT_TRAFFIC_CLASS, max_txqs); 618 if (status) { 619 device_printf(vsi->sc->dev, 620 "Failed VSI lan queue config, err %s aq_err %s\n", 621 ice_status_str(status), 622 ice_aq_str(hw->adminq.sq_last_status)); 623 ice_deinit_vsi(vsi); 624 return (ENODEV); 625 } 626 627 /* Reset VSI stats */ 628 ice_reset_vsi_stats(vsi); 629 630 return 0; 631 } 632 633 /** 634 * ice_deinit_vsi - Tell firmware to release resources for a VSI 635 * @vsi: the VSI to release 636 * 637 * Helper function which requests the firmware to release the hardware 638 * resources associated with a given VSI. 639 */ 640 void 641 ice_deinit_vsi(struct ice_vsi *vsi) 642 { 643 struct ice_vsi_ctx ctx = { 0 }; 644 struct ice_softc *sc = vsi->sc; 645 struct ice_hw *hw = &sc->hw; 646 enum ice_status status; 647 648 /* Assert that the VSI pointer matches in the list */ 649 MPASS(vsi == sc->all_vsi[vsi->idx]); 650 651 ctx.info = vsi->info; 652 653 status = ice_rm_vsi_lan_cfg(hw->port_info, vsi->idx); 654 if (status) { 655 /* 656 * This should only fail if the VSI handle is invalid, or if 657 * any of the nodes have leaf nodes which are still in use. 658 */ 659 device_printf(sc->dev, 660 "Unable to remove scheduler nodes for VSI %d, err %s\n", 661 vsi->idx, ice_status_str(status)); 662 } 663 664 /* Tell firmware to release the VSI resources */ 665 status = ice_free_vsi(hw, vsi->idx, &ctx, false, NULL); 666 if (status != 0) { 667 device_printf(sc->dev, 668 "Free VSI %u AQ call failed, err %s aq_err %s\n", 669 vsi->idx, ice_status_str(status), 670 ice_aq_str(hw->adminq.sq_last_status)); 671 } 672 } 673 674 /** 675 * ice_release_vsi - Release resources associated with a VSI 676 * @vsi: the VSI to release 677 * 678 * Release software and firmware resources associated with a VSI. Release the 679 * queue managers associated with this VSI. Also free the VSI structure memory 680 * if the VSI was allocated dynamically using ice_alloc_vsi(). 681 */ 682 void 683 ice_release_vsi(struct ice_vsi *vsi) 684 { 685 struct ice_softc *sc = vsi->sc; 686 int idx = vsi->idx; 687 688 /* Assert that the VSI pointer matches in the list */ 689 MPASS(vsi == sc->all_vsi[idx]); 690 691 /* Cleanup RSS configuration */ 692 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS)) 693 ice_clean_vsi_rss_cfg(vsi); 694 695 ice_del_vsi_sysctl_ctx(vsi); 696 697 ice_deinit_vsi(vsi); 698 699 ice_free_vsi_qmaps(vsi); 700 701 if (vsi->dynamic) { 702 free(sc->all_vsi[idx], M_ICE); 703 } 704 705 sc->all_vsi[idx] = NULL; 706 } 707 708 /** 709 * ice_aq_speed_to_rate - Convert AdminQ speed enum to baudrate 710 * @pi: port info data 711 * 712 * Returns the baudrate value for the current link speed of a given port. 713 */ 714 uint64_t 715 ice_aq_speed_to_rate(struct ice_port_info *pi) 716 { 717 switch (pi->phy.link_info.link_speed) { 718 case ICE_AQ_LINK_SPEED_100GB: 719 return IF_Gbps(100); 720 case ICE_AQ_LINK_SPEED_50GB: 721 return IF_Gbps(50); 722 case ICE_AQ_LINK_SPEED_40GB: 723 return IF_Gbps(40); 724 case ICE_AQ_LINK_SPEED_25GB: 725 return IF_Gbps(25); 726 case ICE_AQ_LINK_SPEED_10GB: 727 return IF_Gbps(10); 728 case ICE_AQ_LINK_SPEED_5GB: 729 return IF_Gbps(5); 730 case ICE_AQ_LINK_SPEED_2500MB: 731 return IF_Mbps(2500); 732 case ICE_AQ_LINK_SPEED_1000MB: 733 return IF_Mbps(1000); 734 case ICE_AQ_LINK_SPEED_100MB: 735 return IF_Mbps(100); 736 case ICE_AQ_LINK_SPEED_10MB: 737 return IF_Mbps(10); 738 case ICE_AQ_LINK_SPEED_UNKNOWN: 739 default: 740 /* return 0 if we don't know the link speed */ 741 return 0; 742 } 743 } 744 745 /** 746 * ice_aq_speed_to_str - Convert AdminQ speed enum to string representation 747 * @pi: port info data 748 * 749 * Returns the string representation of the current link speed for a given 750 * port. 751 */ 752 static const char * 753 ice_aq_speed_to_str(struct ice_port_info *pi) 754 { 755 switch (pi->phy.link_info.link_speed) { 756 case ICE_AQ_LINK_SPEED_100GB: 757 return "100 Gbps"; 758 case ICE_AQ_LINK_SPEED_50GB: 759 return "50 Gbps"; 760 case ICE_AQ_LINK_SPEED_40GB: 761 return "40 Gbps"; 762 case ICE_AQ_LINK_SPEED_25GB: 763 return "25 Gbps"; 764 case ICE_AQ_LINK_SPEED_20GB: 765 return "20 Gbps"; 766 case ICE_AQ_LINK_SPEED_10GB: 767 return "10 Gbps"; 768 case ICE_AQ_LINK_SPEED_5GB: 769 return "5 Gbps"; 770 case ICE_AQ_LINK_SPEED_2500MB: 771 return "2.5 Gbps"; 772 case ICE_AQ_LINK_SPEED_1000MB: 773 return "1 Gbps"; 774 case ICE_AQ_LINK_SPEED_100MB: 775 return "100 Mbps"; 776 case ICE_AQ_LINK_SPEED_10MB: 777 return "10 Mbps"; 778 case ICE_AQ_LINK_SPEED_UNKNOWN: 779 default: 780 return "Unknown speed"; 781 } 782 } 783 784 /** 785 * ice_get_phy_type_low - Get media associated with phy_type_low 786 * @phy_type_low: the low 64bits of phy_type from the AdminQ 787 * 788 * Given the lower 64bits of the phy_type from the hardware, return the 789 * ifm_active bit associated. Return IFM_UNKNOWN when phy_type_low is unknown. 790 * Note that only one of ice_get_phy_type_low or ice_get_phy_type_high should 791 * be called. If phy_type_low is zero, call ice_phy_type_high. 792 */ 793 int 794 ice_get_phy_type_low(uint64_t phy_type_low) 795 { 796 switch (phy_type_low) { 797 case ICE_PHY_TYPE_LOW_100BASE_TX: 798 return IFM_100_TX; 799 case ICE_PHY_TYPE_LOW_100M_SGMII: 800 return IFM_100_SGMII; 801 case ICE_PHY_TYPE_LOW_1000BASE_T: 802 return IFM_1000_T; 803 case ICE_PHY_TYPE_LOW_1000BASE_SX: 804 return IFM_1000_SX; 805 case ICE_PHY_TYPE_LOW_1000BASE_LX: 806 return IFM_1000_LX; 807 case ICE_PHY_TYPE_LOW_1000BASE_KX: 808 return IFM_1000_KX; 809 case ICE_PHY_TYPE_LOW_1G_SGMII: 810 return IFM_1000_SGMII; 811 case ICE_PHY_TYPE_LOW_2500BASE_T: 812 return IFM_2500_T; 813 case ICE_PHY_TYPE_LOW_2500BASE_X: 814 return IFM_2500_X; 815 case ICE_PHY_TYPE_LOW_2500BASE_KX: 816 return IFM_2500_KX; 817 case ICE_PHY_TYPE_LOW_5GBASE_T: 818 return IFM_5000_T; 819 case ICE_PHY_TYPE_LOW_5GBASE_KR: 820 return IFM_5000_KR; 821 case ICE_PHY_TYPE_LOW_10GBASE_T: 822 return IFM_10G_T; 823 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 824 return IFM_10G_TWINAX; 825 case ICE_PHY_TYPE_LOW_10GBASE_SR: 826 return IFM_10G_SR; 827 case ICE_PHY_TYPE_LOW_10GBASE_LR: 828 return IFM_10G_LR; 829 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 830 return IFM_10G_KR; 831 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 832 return IFM_10G_AOC; 833 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 834 return IFM_10G_SFI; 835 case ICE_PHY_TYPE_LOW_25GBASE_T: 836 return IFM_25G_T; 837 case ICE_PHY_TYPE_LOW_25GBASE_CR: 838 return IFM_25G_CR; 839 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 840 return IFM_25G_CR_S; 841 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 842 return IFM_25G_CR1; 843 case ICE_PHY_TYPE_LOW_25GBASE_SR: 844 return IFM_25G_SR; 845 case ICE_PHY_TYPE_LOW_25GBASE_LR: 846 return IFM_25G_LR; 847 case ICE_PHY_TYPE_LOW_25GBASE_KR: 848 return IFM_25G_KR; 849 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 850 return IFM_25G_KR_S; 851 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 852 return IFM_25G_KR1; 853 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 854 return IFM_25G_AOC; 855 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 856 return IFM_25G_AUI; 857 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 858 return IFM_40G_CR4; 859 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 860 return IFM_40G_SR4; 861 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 862 return IFM_40G_LR4; 863 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 864 return IFM_40G_KR4; 865 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 866 return IFM_40G_XLAUI_AC; 867 case ICE_PHY_TYPE_LOW_40G_XLAUI: 868 return IFM_40G_XLAUI; 869 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 870 return IFM_50G_CR2; 871 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 872 return IFM_50G_SR2; 873 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 874 return IFM_50G_LR2; 875 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 876 return IFM_50G_KR2; 877 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 878 return IFM_50G_LAUI2_AC; 879 case ICE_PHY_TYPE_LOW_50G_LAUI2: 880 return IFM_50G_LAUI2; 881 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 882 return IFM_50G_AUI2_AC; 883 case ICE_PHY_TYPE_LOW_50G_AUI2: 884 return IFM_50G_AUI2; 885 case ICE_PHY_TYPE_LOW_50GBASE_CP: 886 return IFM_50G_CP; 887 case ICE_PHY_TYPE_LOW_50GBASE_SR: 888 return IFM_50G_SR; 889 case ICE_PHY_TYPE_LOW_50GBASE_FR: 890 return IFM_50G_FR; 891 case ICE_PHY_TYPE_LOW_50GBASE_LR: 892 return IFM_50G_LR; 893 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 894 return IFM_50G_KR_PAM4; 895 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 896 return IFM_50G_AUI1_AC; 897 case ICE_PHY_TYPE_LOW_50G_AUI1: 898 return IFM_50G_AUI1; 899 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 900 return IFM_100G_CR4; 901 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 902 return IFM_100G_SR4; 903 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 904 return IFM_100G_LR4; 905 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 906 return IFM_100G_KR4; 907 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 908 return IFM_100G_CAUI4_AC; 909 case ICE_PHY_TYPE_LOW_100G_CAUI4: 910 return IFM_100G_CAUI4; 911 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 912 return IFM_100G_AUI4_AC; 913 case ICE_PHY_TYPE_LOW_100G_AUI4: 914 return IFM_100G_AUI4; 915 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 916 return IFM_100G_CR_PAM4; 917 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 918 return IFM_100G_KR_PAM4; 919 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 920 return IFM_100G_CP2; 921 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 922 return IFM_100G_SR2; 923 case ICE_PHY_TYPE_LOW_100GBASE_DR: 924 return IFM_100G_DR; 925 default: 926 return IFM_UNKNOWN; 927 } 928 } 929 930 /** 931 * ice_get_phy_type_high - Get media associated with phy_type_high 932 * @phy_type_high: the upper 64bits of phy_type from the AdminQ 933 * 934 * Given the upper 64bits of the phy_type from the hardware, return the 935 * ifm_active bit associated. Return IFM_UNKNOWN on an unknown value. Note 936 * that only one of ice_get_phy_type_low or ice_get_phy_type_high should be 937 * called. If phy_type_high is zero, call ice_get_phy_type_low. 938 */ 939 int 940 ice_get_phy_type_high(uint64_t phy_type_high) 941 { 942 switch (phy_type_high) { 943 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 944 return IFM_100G_KR2_PAM4; 945 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 946 return IFM_100G_CAUI2_AC; 947 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 948 return IFM_100G_CAUI2; 949 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 950 return IFM_100G_AUI2_AC; 951 case ICE_PHY_TYPE_HIGH_100G_AUI2: 952 return IFM_100G_AUI2; 953 default: 954 return IFM_UNKNOWN; 955 } 956 } 957 958 /** 959 * ice_phy_types_to_max_rate - Returns port's max supported baudrate 960 * @pi: port info struct 961 * 962 * ice_aq_get_phy_caps() w/ ICE_AQC_REPORT_TOPO_CAP parameter needs to have 963 * been called before this function for it to work. 964 */ 965 static uint64_t 966 ice_phy_types_to_max_rate(struct ice_port_info *pi) 967 { 968 uint64_t phy_low = pi->phy.phy_type_low; 969 uint64_t phy_high = pi->phy.phy_type_high; 970 uint64_t max_rate = 0; 971 int bit; 972 973 /* 974 * These are based on the indices used in the BIT() macros for 975 * ICE_PHY_TYPE_LOW_* 976 */ 977 static const uint64_t phy_rates[] = { 978 IF_Mbps(100), 979 IF_Mbps(100), 980 IF_Gbps(1ULL), 981 IF_Gbps(1ULL), 982 IF_Gbps(1ULL), 983 IF_Gbps(1ULL), 984 IF_Gbps(1ULL), 985 IF_Mbps(2500ULL), 986 IF_Mbps(2500ULL), 987 IF_Mbps(2500ULL), 988 IF_Gbps(5ULL), 989 IF_Gbps(5ULL), 990 IF_Gbps(10ULL), 991 IF_Gbps(10ULL), 992 IF_Gbps(10ULL), 993 IF_Gbps(10ULL), 994 IF_Gbps(10ULL), 995 IF_Gbps(10ULL), 996 IF_Gbps(10ULL), 997 IF_Gbps(25ULL), 998 IF_Gbps(25ULL), 999 IF_Gbps(25ULL), 1000 IF_Gbps(25ULL), 1001 IF_Gbps(25ULL), 1002 IF_Gbps(25ULL), 1003 IF_Gbps(25ULL), 1004 IF_Gbps(25ULL), 1005 IF_Gbps(25ULL), 1006 IF_Gbps(25ULL), 1007 IF_Gbps(25ULL), 1008 IF_Gbps(40ULL), 1009 IF_Gbps(40ULL), 1010 IF_Gbps(40ULL), 1011 IF_Gbps(40ULL), 1012 IF_Gbps(40ULL), 1013 IF_Gbps(40ULL), 1014 IF_Gbps(50ULL), 1015 IF_Gbps(50ULL), 1016 IF_Gbps(50ULL), 1017 IF_Gbps(50ULL), 1018 IF_Gbps(50ULL), 1019 IF_Gbps(50ULL), 1020 IF_Gbps(50ULL), 1021 IF_Gbps(50ULL), 1022 IF_Gbps(50ULL), 1023 IF_Gbps(50ULL), 1024 IF_Gbps(50ULL), 1025 IF_Gbps(50ULL), 1026 IF_Gbps(50ULL), 1027 IF_Gbps(50ULL), 1028 IF_Gbps(50ULL), 1029 IF_Gbps(100ULL), 1030 IF_Gbps(100ULL), 1031 IF_Gbps(100ULL), 1032 IF_Gbps(100ULL), 1033 IF_Gbps(100ULL), 1034 IF_Gbps(100ULL), 1035 IF_Gbps(100ULL), 1036 IF_Gbps(100ULL), 1037 IF_Gbps(100ULL), 1038 IF_Gbps(100ULL), 1039 IF_Gbps(100ULL), 1040 IF_Gbps(100ULL), 1041 IF_Gbps(100ULL), 1042 /* These rates are for ICE_PHY_TYPE_HIGH_* */ 1043 IF_Gbps(100ULL), 1044 IF_Gbps(100ULL), 1045 IF_Gbps(100ULL), 1046 IF_Gbps(100ULL), 1047 IF_Gbps(100ULL) 1048 }; 1049 1050 /* coverity[address_of] */ 1051 for_each_set_bit(bit, &phy_high, 64) 1052 if ((bit + 64) < (int)ARRAY_SIZE(phy_rates)) 1053 max_rate = uqmax(max_rate, phy_rates[(bit + 64)]); 1054 1055 /* coverity[address_of] */ 1056 for_each_set_bit(bit, &phy_low, 64) 1057 max_rate = uqmax(max_rate, phy_rates[bit]); 1058 1059 return (max_rate); 1060 } 1061 1062 /* The if_media type is split over the original 5 bit media variant field, 1063 * along with extended types using up extra bits in the options section. 1064 * We want to convert this split number into a bitmap index, so we reverse the 1065 * calculation of IFM_X here. 1066 */ 1067 #define IFM_IDX(x) (((x) & IFM_TMASK) | \ 1068 (((x) & IFM_ETH_XTYPE) >> IFM_ETH_XSHIFT)) 1069 1070 /** 1071 * ice_add_media_types - Add supported media types to the media structure 1072 * @sc: ice private softc structure 1073 * @media: ifmedia structure to setup 1074 * 1075 * Looks up the supported phy types, and initializes the various media types 1076 * available. 1077 * 1078 * @pre this function must be protected from being called while another thread 1079 * is accessing the ifmedia types. 1080 */ 1081 enum ice_status 1082 ice_add_media_types(struct ice_softc *sc, struct ifmedia *media) 1083 { 1084 enum ice_status status; 1085 uint64_t phy_low, phy_high; 1086 int bit; 1087 1088 ASSERT_CFG_LOCKED(sc); 1089 1090 /* the maximum possible media type index is 511. We probably don't 1091 * need most of this space, but this ensures future compatibility when 1092 * additional media types are used. 1093 */ 1094 ice_declare_bitmap(already_added, 511); 1095 1096 /* Remove all previous media types */ 1097 ifmedia_removeall(media); 1098 1099 status = ice_get_phy_types(sc, &phy_low, &phy_high); 1100 if (status != ICE_SUCCESS) { 1101 /* Function already prints appropriate error 1102 * message 1103 */ 1104 return (status); 1105 } 1106 1107 /* make sure the added bitmap is zero'd */ 1108 memset(already_added, 0, sizeof(already_added)); 1109 1110 /* coverity[address_of] */ 1111 for_each_set_bit(bit, &phy_low, 64) { 1112 uint64_t type = BIT_ULL(bit); 1113 int ostype; 1114 1115 /* get the OS media type */ 1116 ostype = ice_get_phy_type_low(type); 1117 1118 /* don't bother adding the unknown type */ 1119 if (ostype == IFM_UNKNOWN) 1120 continue; 1121 1122 /* only add each media type to the list once */ 1123 if (ice_is_bit_set(already_added, IFM_IDX(ostype))) 1124 continue; 1125 1126 ifmedia_add(media, IFM_ETHER | ostype, 0, NULL); 1127 ice_set_bit(IFM_IDX(ostype), already_added); 1128 } 1129 1130 /* coverity[address_of] */ 1131 for_each_set_bit(bit, &phy_high, 64) { 1132 uint64_t type = BIT_ULL(bit); 1133 int ostype; 1134 1135 /* get the OS media type */ 1136 ostype = ice_get_phy_type_high(type); 1137 1138 /* don't bother adding the unknown type */ 1139 if (ostype == IFM_UNKNOWN) 1140 continue; 1141 1142 /* only add each media type to the list once */ 1143 if (ice_is_bit_set(already_added, IFM_IDX(ostype))) 1144 continue; 1145 1146 ifmedia_add(media, IFM_ETHER | ostype, 0, NULL); 1147 ice_set_bit(IFM_IDX(ostype), already_added); 1148 } 1149 1150 /* Use autoselect media by default */ 1151 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 1152 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 1153 1154 return (ICE_SUCCESS); 1155 } 1156 1157 /** 1158 * ice_configure_rxq_interrupts - Configure HW Rx queues for MSI-X interrupts 1159 * @vsi: the VSI to configure 1160 * 1161 * Called when setting up MSI-X interrupts to configure the Rx hardware queues. 1162 */ 1163 void 1164 ice_configure_rxq_interrupts(struct ice_vsi *vsi) 1165 { 1166 struct ice_hw *hw = &vsi->sc->hw; 1167 int i; 1168 1169 for (i = 0; i < vsi->num_rx_queues; i++) { 1170 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1171 u32 val; 1172 1173 val = (QINT_RQCTL_CAUSE_ENA_M | 1174 (ICE_RX_ITR << QINT_RQCTL_ITR_INDX_S) | 1175 (rxq->irqv->me << QINT_RQCTL_MSIX_INDX_S)); 1176 wr32(hw, QINT_RQCTL(vsi->rx_qmap[rxq->me]), val); 1177 } 1178 1179 ice_flush(hw); 1180 } 1181 1182 /** 1183 * ice_configure_txq_interrupts - Configure HW Tx queues for MSI-X interrupts 1184 * @vsi: the VSI to configure 1185 * 1186 * Called when setting up MSI-X interrupts to configure the Tx hardware queues. 1187 */ 1188 void 1189 ice_configure_txq_interrupts(struct ice_vsi *vsi) 1190 { 1191 struct ice_hw *hw = &vsi->sc->hw; 1192 int i; 1193 1194 for (i = 0; i < vsi->num_tx_queues; i++) { 1195 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1196 u32 val; 1197 1198 val = (QINT_TQCTL_CAUSE_ENA_M | 1199 (ICE_TX_ITR << QINT_TQCTL_ITR_INDX_S) | 1200 (txq->irqv->me << QINT_TQCTL_MSIX_INDX_S)); 1201 wr32(hw, QINT_TQCTL(vsi->tx_qmap[txq->me]), val); 1202 } 1203 1204 ice_flush(hw); 1205 } 1206 1207 /** 1208 * ice_flush_rxq_interrupts - Unconfigure Hw Rx queues MSI-X interrupt cause 1209 * @vsi: the VSI to configure 1210 * 1211 * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger 1212 * a software interrupt on that cause. This is required as part of the Rx 1213 * queue disable logic to dissociate the Rx queue from the interrupt. 1214 * 1215 * Note: this function must be called prior to disabling Rx queues with 1216 * ice_control_rx_queues, otherwise the Rx queue may not be disabled properly. 1217 */ 1218 void 1219 ice_flush_rxq_interrupts(struct ice_vsi *vsi) 1220 { 1221 struct ice_hw *hw = &vsi->sc->hw; 1222 int i; 1223 1224 for (i = 0; i < vsi->num_rx_queues; i++) { 1225 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1226 u32 reg, val; 1227 1228 /* Clear the CAUSE_ENA flag */ 1229 reg = vsi->rx_qmap[rxq->me]; 1230 val = rd32(hw, QINT_RQCTL(reg)); 1231 val &= ~QINT_RQCTL_CAUSE_ENA_M; 1232 wr32(hw, QINT_RQCTL(reg), val); 1233 1234 ice_flush(hw); 1235 1236 /* Trigger a software interrupt to complete interrupt 1237 * dissociation. 1238 */ 1239 wr32(hw, GLINT_DYN_CTL(rxq->irqv->me), 1240 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 1241 } 1242 } 1243 1244 /** 1245 * ice_flush_txq_interrupts - Unconfigure Hw Tx queues MSI-X interrupt cause 1246 * @vsi: the VSI to configure 1247 * 1248 * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger 1249 * a software interrupt on that cause. This is required as part of the Tx 1250 * queue disable logic to dissociate the Tx queue from the interrupt. 1251 * 1252 * Note: this function must be called prior to ice_vsi_disable_tx, otherwise 1253 * the Tx queue disable may not complete properly. 1254 */ 1255 void 1256 ice_flush_txq_interrupts(struct ice_vsi *vsi) 1257 { 1258 struct ice_hw *hw = &vsi->sc->hw; 1259 int i; 1260 1261 for (i = 0; i < vsi->num_tx_queues; i++) { 1262 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1263 u32 reg, val; 1264 1265 /* Clear the CAUSE_ENA flag */ 1266 reg = vsi->tx_qmap[txq->me]; 1267 val = rd32(hw, QINT_TQCTL(reg)); 1268 val &= ~QINT_TQCTL_CAUSE_ENA_M; 1269 wr32(hw, QINT_TQCTL(reg), val); 1270 1271 ice_flush(hw); 1272 1273 /* Trigger a software interrupt to complete interrupt 1274 * dissociation. 1275 */ 1276 wr32(hw, GLINT_DYN_CTL(txq->irqv->me), 1277 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 1278 } 1279 } 1280 1281 /** 1282 * ice_configure_rx_itr - Configure the Rx ITR settings for this VSI 1283 * @vsi: the VSI to configure 1284 * 1285 * Program the hardware ITR registers with the settings for this VSI. 1286 */ 1287 void 1288 ice_configure_rx_itr(struct ice_vsi *vsi) 1289 { 1290 struct ice_hw *hw = &vsi->sc->hw; 1291 int i; 1292 1293 /* TODO: Handle per-queue/per-vector ITR? */ 1294 1295 for (i = 0; i < vsi->num_rx_queues; i++) { 1296 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1297 1298 wr32(hw, GLINT_ITR(ICE_RX_ITR, rxq->irqv->me), 1299 ice_itr_to_reg(hw, vsi->rx_itr)); 1300 } 1301 1302 ice_flush(hw); 1303 } 1304 1305 /** 1306 * ice_configure_tx_itr - Configure the Tx ITR settings for this VSI 1307 * @vsi: the VSI to configure 1308 * 1309 * Program the hardware ITR registers with the settings for this VSI. 1310 */ 1311 void 1312 ice_configure_tx_itr(struct ice_vsi *vsi) 1313 { 1314 struct ice_hw *hw = &vsi->sc->hw; 1315 int i; 1316 1317 /* TODO: Handle per-queue/per-vector ITR? */ 1318 1319 for (i = 0; i < vsi->num_tx_queues; i++) { 1320 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1321 1322 wr32(hw, GLINT_ITR(ICE_TX_ITR, txq->irqv->me), 1323 ice_itr_to_reg(hw, vsi->tx_itr)); 1324 } 1325 1326 ice_flush(hw); 1327 } 1328 1329 /** 1330 * ice_setup_tx_ctx - Setup an ice_tlan_ctx structure for a queue 1331 * @txq: the Tx queue to configure 1332 * @tlan_ctx: the Tx LAN queue context structure to initialize 1333 * @pf_q: real queue number 1334 */ 1335 static int 1336 ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 1337 { 1338 struct ice_vsi *vsi = txq->vsi; 1339 struct ice_softc *sc = vsi->sc; 1340 struct ice_hw *hw = &sc->hw; 1341 1342 tlan_ctx->port_num = hw->port_info->lport; 1343 1344 /* number of descriptors in the queue */ 1345 tlan_ctx->qlen = txq->desc_count; 1346 1347 /* set the transmit queue base address, defined in 128 byte units */ 1348 tlan_ctx->base = txq->tx_paddr >> 7; 1349 1350 tlan_ctx->pf_num = hw->pf_id; 1351 1352 /* For now, we only have code supporting PF VSIs */ 1353 switch (vsi->type) { 1354 case ICE_VSI_PF: 1355 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 1356 break; 1357 default: 1358 return (ENODEV); 1359 } 1360 1361 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 1362 1363 /* Enable TSO */ 1364 tlan_ctx->tso_ena = 1; 1365 tlan_ctx->internal_usage_flag = 1; 1366 1367 tlan_ctx->tso_qnum = pf_q; 1368 1369 /* 1370 * Stick with the older legacy Tx queue interface, instead of the new 1371 * advanced queue interface. 1372 */ 1373 tlan_ctx->legacy_int = 1; 1374 1375 /* Descriptor WB mode */ 1376 tlan_ctx->wb_mode = 0; 1377 1378 return (0); 1379 } 1380 1381 /** 1382 * ice_cfg_vsi_for_tx - Configure the hardware for Tx 1383 * @vsi: the VSI to configure 1384 * 1385 * Configure the device Tx queues through firmware AdminQ commands. After 1386 * this, Tx queues will be ready for transmit. 1387 */ 1388 int 1389 ice_cfg_vsi_for_tx(struct ice_vsi *vsi) 1390 { 1391 struct ice_aqc_add_tx_qgrp *qg; 1392 struct ice_hw *hw = &vsi->sc->hw; 1393 device_t dev = vsi->sc->dev; 1394 enum ice_status status; 1395 int i; 1396 int err = 0; 1397 u16 qg_size, pf_q; 1398 1399 qg_size = ice_struct_size(qg, txqs, 1); 1400 qg = (struct ice_aqc_add_tx_qgrp *)malloc(qg_size, M_ICE, M_NOWAIT|M_ZERO); 1401 if (!qg) 1402 return (ENOMEM); 1403 1404 qg->num_txqs = 1; 1405 1406 for (i = 0; i < vsi->num_tx_queues; i++) { 1407 struct ice_tlan_ctx tlan_ctx = { 0 }; 1408 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1409 1410 pf_q = vsi->tx_qmap[txq->me]; 1411 qg->txqs[0].txq_id = htole16(pf_q); 1412 1413 err = ice_setup_tx_ctx(txq, &tlan_ctx, pf_q); 1414 if (err) 1415 goto free_txqg; 1416 1417 ice_set_ctx(hw, (u8 *)&tlan_ctx, qg->txqs[0].txq_ctx, 1418 ice_tlan_ctx_info); 1419 1420 status = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1421 i, 1, qg, qg_size, NULL); 1422 if (status) { 1423 device_printf(dev, 1424 "Failed to set LAN Tx queue context, err %s aq_err %s\n", 1425 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 1426 err = ENODEV; 1427 goto free_txqg; 1428 } 1429 1430 /* Keep track of the Tx queue TEID */ 1431 if (pf_q == le16toh(qg->txqs[0].txq_id)) 1432 txq->q_teid = le32toh(qg->txqs[0].q_teid); 1433 } 1434 1435 free_txqg: 1436 free(qg, M_ICE); 1437 1438 return (err); 1439 } 1440 1441 /** 1442 * ice_setup_rx_ctx - Setup an Rx context structure for a receive queue 1443 * @rxq: the receive queue to program 1444 * 1445 * Setup an Rx queue context structure and program it into the hardware 1446 * registers. This is a necessary step for enabling the Rx queue. 1447 * 1448 * @pre the VSI associated with this queue must have initialized mbuf_sz 1449 */ 1450 static int 1451 ice_setup_rx_ctx(struct ice_rx_queue *rxq) 1452 { 1453 struct ice_rlan_ctx rlan_ctx = {0}; 1454 struct ice_vsi *vsi = rxq->vsi; 1455 struct ice_softc *sc = vsi->sc; 1456 struct ice_hw *hw = &sc->hw; 1457 enum ice_status status; 1458 u32 rxdid = ICE_RXDID_FLEX_NIC; 1459 u32 regval; 1460 u16 pf_q; 1461 1462 pf_q = vsi->rx_qmap[rxq->me]; 1463 1464 /* set the receive queue base address, defined in 128 byte units */ 1465 rlan_ctx.base = rxq->rx_paddr >> 7; 1466 1467 rlan_ctx.qlen = rxq->desc_count; 1468 1469 rlan_ctx.dbuf = vsi->mbuf_sz >> ICE_RLAN_CTX_DBUF_S; 1470 1471 /* use 32 byte descriptors */ 1472 rlan_ctx.dsize = 1; 1473 1474 /* Strip the Ethernet CRC bytes before the packet is posted to the 1475 * host memory. 1476 */ 1477 rlan_ctx.crcstrip = 1; 1478 1479 rlan_ctx.l2tsel = 1; 1480 1481 /* don't do header splitting */ 1482 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; 1483 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; 1484 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; 1485 1486 /* strip VLAN from inner headers */ 1487 rlan_ctx.showiv = 1; 1488 1489 rlan_ctx.rxmax = min(vsi->max_frame_size, 1490 ICE_MAX_RX_SEGS * vsi->mbuf_sz); 1491 1492 rlan_ctx.lrxqthresh = 1; 1493 1494 if (vsi->type != ICE_VSI_VF) { 1495 regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 1496 regval &= ~QRXFLXP_CNTXT_RXDID_IDX_M; 1497 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 1498 QRXFLXP_CNTXT_RXDID_IDX_M; 1499 1500 regval &= ~QRXFLXP_CNTXT_RXDID_PRIO_M; 1501 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 1502 QRXFLXP_CNTXT_RXDID_PRIO_M; 1503 1504 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 1505 } 1506 1507 status = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); 1508 if (status) { 1509 device_printf(sc->dev, 1510 "Failed to set LAN Rx queue context, err %s aq_err %s\n", 1511 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 1512 return (EIO); 1513 } 1514 1515 wr32(hw, rxq->tail, 0); 1516 1517 return 0; 1518 } 1519 1520 /** 1521 * ice_cfg_vsi_for_rx - Configure the hardware for Rx 1522 * @vsi: the VSI to configure 1523 * 1524 * Prepare an Rx context descriptor and configure the device to receive 1525 * traffic. 1526 * 1527 * @pre the VSI must have initialized mbuf_sz 1528 */ 1529 int 1530 ice_cfg_vsi_for_rx(struct ice_vsi *vsi) 1531 { 1532 int i, err; 1533 1534 for (i = 0; i < vsi->num_rx_queues; i++) { 1535 MPASS(vsi->mbuf_sz > 0); 1536 err = ice_setup_rx_ctx(&vsi->rx_queues[i]); 1537 if (err) 1538 return err; 1539 } 1540 1541 return (0); 1542 } 1543 1544 /** 1545 * ice_is_rxq_ready - Check if an Rx queue is ready 1546 * @hw: ice hw structure 1547 * @pf_q: absolute PF queue index to check 1548 * @reg: on successful return, contains qrx_ctrl contents 1549 * 1550 * Reads the QRX_CTRL register and verifies if the queue is in a consistent 1551 * state. That is, QENA_REQ matches QENA_STAT. Used to check before making 1552 * a request to change the queue, as well as to verify the request has 1553 * finished. The queue should change status within a few microseconds, so we 1554 * use a small delay while polling the register. 1555 * 1556 * Returns an error code if the queue does not update after a few retries. 1557 */ 1558 static int 1559 ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg) 1560 { 1561 u32 qrx_ctrl, qena_req, qena_stat; 1562 int i; 1563 1564 for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { 1565 qrx_ctrl = rd32(hw, QRX_CTRL(pf_q)); 1566 qena_req = (qrx_ctrl >> QRX_CTRL_QENA_REQ_S) & 1; 1567 qena_stat = (qrx_ctrl >> QRX_CTRL_QENA_STAT_S) & 1; 1568 1569 /* if the request and status bits equal, then the queue is 1570 * fully disabled or enabled. 1571 */ 1572 if (qena_req == qena_stat) { 1573 *reg = qrx_ctrl; 1574 return (0); 1575 } 1576 1577 /* wait a few microseconds before we check again */ 1578 DELAY(10); 1579 } 1580 1581 return (ETIMEDOUT); 1582 } 1583 1584 /** 1585 * ice_control_rx_queues - Configure hardware to start or stop the Rx queues 1586 * @vsi: VSI to enable/disable queues 1587 * @enable: true to enable queues, false to disable 1588 * 1589 * Control the Rx queues through the QRX_CTRL register, enabling or disabling 1590 * them. Wait for the appropriate time to ensure that the queues have actually 1591 * reached the expected state. 1592 */ 1593 int 1594 ice_control_rx_queues(struct ice_vsi *vsi, bool enable) 1595 { 1596 struct ice_hw *hw = &vsi->sc->hw; 1597 device_t dev = vsi->sc->dev; 1598 u32 qrx_ctrl = 0; 1599 int i, err; 1600 1601 /* TODO: amortize waits by changing all queues up front and then 1602 * checking their status afterwards. This will become more necessary 1603 * when we have a large number of queues. 1604 */ 1605 for (i = 0; i < vsi->num_rx_queues; i++) { 1606 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1607 int pf_q = vsi->rx_qmap[rxq->me]; 1608 1609 err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl); 1610 if (err) { 1611 device_printf(dev, 1612 "Rx queue %d is not ready\n", 1613 pf_q); 1614 return err; 1615 } 1616 1617 /* Skip if the queue is already in correct state */ 1618 if (enable == !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) 1619 continue; 1620 1621 if (enable) 1622 qrx_ctrl |= QRX_CTRL_QENA_REQ_M; 1623 else 1624 qrx_ctrl &= ~QRX_CTRL_QENA_REQ_M; 1625 wr32(hw, QRX_CTRL(pf_q), qrx_ctrl); 1626 1627 /* wait for the queue to finalize the request */ 1628 err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl); 1629 if (err) { 1630 device_printf(dev, 1631 "Rx queue %d %sable timeout\n", 1632 pf_q, (enable ? "en" : "dis")); 1633 return err; 1634 } 1635 1636 /* this should never happen */ 1637 if (enable != !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) { 1638 device_printf(dev, 1639 "Rx queue %d invalid state\n", 1640 pf_q); 1641 return (EDOOFUS); 1642 } 1643 } 1644 1645 return (0); 1646 } 1647 1648 /** 1649 * ice_add_mac_to_list - Add MAC filter to a MAC filter list 1650 * @vsi: the VSI to forward to 1651 * @list: list which contains MAC filter entries 1652 * @addr: the MAC address to be added 1653 * @action: filter action to perform on match 1654 * 1655 * Adds a MAC address filter to the list which will be forwarded to firmware 1656 * to add a series of MAC address filters. 1657 * 1658 * Returns 0 on success, and an error code on failure. 1659 * 1660 */ 1661 static int 1662 ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list, 1663 const u8 *addr, enum ice_sw_fwd_act_type action) 1664 { 1665 struct ice_fltr_list_entry *entry; 1666 1667 entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO); 1668 if (!entry) 1669 return (ENOMEM); 1670 1671 entry->fltr_info.flag = ICE_FLTR_TX; 1672 entry->fltr_info.src_id = ICE_SRC_ID_VSI; 1673 entry->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 1674 entry->fltr_info.fltr_act = action; 1675 entry->fltr_info.vsi_handle = vsi->idx; 1676 bcopy(addr, entry->fltr_info.l_data.mac.mac_addr, ETHER_ADDR_LEN); 1677 1678 LIST_ADD(&entry->list_entry, list); 1679 1680 return 0; 1681 } 1682 1683 /** 1684 * ice_free_fltr_list - Free memory associated with a MAC address list 1685 * @list: the list to free 1686 * 1687 * Free the memory of each entry associated with the list. 1688 */ 1689 static void 1690 ice_free_fltr_list(struct ice_list_head *list) 1691 { 1692 struct ice_fltr_list_entry *e, *tmp; 1693 1694 LIST_FOR_EACH_ENTRY_SAFE(e, tmp, list, ice_fltr_list_entry, list_entry) { 1695 LIST_DEL(&e->list_entry); 1696 free(e, M_ICE); 1697 } 1698 } 1699 1700 /** 1701 * ice_add_vsi_mac_filter - Add a MAC address filter for a VSI 1702 * @vsi: the VSI to add the filter for 1703 * @addr: MAC address to add a filter for 1704 * 1705 * Add a MAC address filter for a given VSI. This is a wrapper around 1706 * ice_add_mac to simplify the interface. First, it only accepts a single 1707 * address, so we don't have to mess around with the list setup in other 1708 * functions. Second, it ignores the ICE_ERR_ALREADY_EXIST error, so that 1709 * callers don't need to worry about attempting to add the same filter twice. 1710 */ 1711 int 1712 ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr) 1713 { 1714 struct ice_list_head mac_addr_list; 1715 struct ice_hw *hw = &vsi->sc->hw; 1716 device_t dev = vsi->sc->dev; 1717 enum ice_status status; 1718 int err = 0; 1719 1720 INIT_LIST_HEAD(&mac_addr_list); 1721 1722 err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI); 1723 if (err) 1724 goto free_mac_list; 1725 1726 status = ice_add_mac(hw, &mac_addr_list); 1727 if (status == ICE_ERR_ALREADY_EXISTS) { 1728 ; /* Don't complain if we try to add a filter that already exists */ 1729 } else if (status) { 1730 device_printf(dev, 1731 "Failed to add a filter for MAC %6D, err %s aq_err %s\n", 1732 addr, ":", 1733 ice_status_str(status), 1734 ice_aq_str(hw->adminq.sq_last_status)); 1735 err = (EIO); 1736 } 1737 1738 free_mac_list: 1739 ice_free_fltr_list(&mac_addr_list); 1740 return err; 1741 } 1742 1743 /** 1744 * ice_cfg_pf_default_mac_filters - Setup default unicast and broadcast addrs 1745 * @sc: device softc structure 1746 * 1747 * Program the default unicast and broadcast filters for the PF VSI. 1748 */ 1749 int 1750 ice_cfg_pf_default_mac_filters(struct ice_softc *sc) 1751 { 1752 struct ice_vsi *vsi = &sc->pf_vsi; 1753 struct ice_hw *hw = &sc->hw; 1754 int err; 1755 1756 /* Add the LAN MAC address */ 1757 err = ice_add_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr); 1758 if (err) 1759 return err; 1760 1761 /* Add the broadcast address */ 1762 err = ice_add_vsi_mac_filter(vsi, broadcastaddr); 1763 if (err) 1764 return err; 1765 1766 return (0); 1767 } 1768 1769 /** 1770 * ice_remove_vsi_mac_filter - Remove a MAC address filter for a VSI 1771 * @vsi: the VSI to add the filter for 1772 * @addr: MAC address to remove a filter for 1773 * 1774 * Remove a MAC address filter from a given VSI. This is a wrapper around 1775 * ice_remove_mac to simplify the interface. First, it only accepts a single 1776 * address, so we don't have to mess around with the list setup in other 1777 * functions. Second, it ignores the ICE_ERR_DOES_NOT_EXIST error, so that 1778 * callers don't need to worry about attempting to remove filters which 1779 * haven't yet been added. 1780 */ 1781 int 1782 ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr) 1783 { 1784 struct ice_list_head mac_addr_list; 1785 struct ice_hw *hw = &vsi->sc->hw; 1786 device_t dev = vsi->sc->dev; 1787 enum ice_status status; 1788 int err = 0; 1789 1790 INIT_LIST_HEAD(&mac_addr_list); 1791 1792 err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI); 1793 if (err) 1794 goto free_mac_list; 1795 1796 status = ice_remove_mac(hw, &mac_addr_list); 1797 if (status == ICE_ERR_DOES_NOT_EXIST) { 1798 ; /* Don't complain if we try to remove a filter that doesn't exist */ 1799 } else if (status) { 1800 device_printf(dev, 1801 "Failed to remove a filter for MAC %6D, err %s aq_err %s\n", 1802 addr, ":", 1803 ice_status_str(status), 1804 ice_aq_str(hw->adminq.sq_last_status)); 1805 err = (EIO); 1806 } 1807 1808 free_mac_list: 1809 ice_free_fltr_list(&mac_addr_list); 1810 return err; 1811 } 1812 1813 /** 1814 * ice_rm_pf_default_mac_filters - Remove default unicast and broadcast addrs 1815 * @sc: device softc structure 1816 * 1817 * Remove the default unicast and broadcast filters from the PF VSI. 1818 */ 1819 int 1820 ice_rm_pf_default_mac_filters(struct ice_softc *sc) 1821 { 1822 struct ice_vsi *vsi = &sc->pf_vsi; 1823 struct ice_hw *hw = &sc->hw; 1824 int err; 1825 1826 /* Remove the LAN MAC address */ 1827 err = ice_remove_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr); 1828 if (err) 1829 return err; 1830 1831 /* Remove the broadcast address */ 1832 err = ice_remove_vsi_mac_filter(vsi, broadcastaddr); 1833 if (err) 1834 return (EIO); 1835 1836 return (0); 1837 } 1838 1839 /** 1840 * ice_check_ctrlq_errors - Check for and report controlq errors 1841 * @sc: device private structure 1842 * @qname: name of the controlq 1843 * @cq: the controlq to check 1844 * 1845 * Check and report controlq errors. Currently all we do is report them to the 1846 * kernel message log, but we might want to improve this in the future, such 1847 * as to keep track of statistics. 1848 */ 1849 static void 1850 ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname, 1851 struct ice_ctl_q_info *cq) 1852 { 1853 struct ice_hw *hw = &sc->hw; 1854 u32 val; 1855 1856 /* Check for error indications. Note that all the controlqs use the 1857 * same register layout, so we use the PF_FW_AxQLEN defines only. 1858 */ 1859 val = rd32(hw, cq->rq.len); 1860 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1861 PF_FW_ARQLEN_ARQCRIT_M)) { 1862 if (val & PF_FW_ARQLEN_ARQVFE_M) 1863 device_printf(sc->dev, 1864 "%s Receive Queue VF Error detected\n", qname); 1865 if (val & PF_FW_ARQLEN_ARQOVFL_M) 1866 device_printf(sc->dev, 1867 "%s Receive Queue Overflow Error detected\n", 1868 qname); 1869 if (val & PF_FW_ARQLEN_ARQCRIT_M) 1870 device_printf(sc->dev, 1871 "%s Receive Queue Critical Error detected\n", 1872 qname); 1873 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1874 PF_FW_ARQLEN_ARQCRIT_M); 1875 wr32(hw, cq->rq.len, val); 1876 } 1877 1878 val = rd32(hw, cq->sq.len); 1879 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1880 PF_FW_ATQLEN_ATQCRIT_M)) { 1881 if (val & PF_FW_ATQLEN_ATQVFE_M) 1882 device_printf(sc->dev, 1883 "%s Send Queue VF Error detected\n", qname); 1884 if (val & PF_FW_ATQLEN_ATQOVFL_M) 1885 device_printf(sc->dev, 1886 "%s Send Queue Overflow Error detected\n", 1887 qname); 1888 if (val & PF_FW_ATQLEN_ATQCRIT_M) 1889 device_printf(sc->dev, 1890 "%s Send Queue Critical Error detected\n", 1891 qname); 1892 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1893 PF_FW_ATQLEN_ATQCRIT_M); 1894 wr32(hw, cq->sq.len, val); 1895 } 1896 } 1897 1898 /** 1899 * ice_process_link_event - Process a link event indication from firmware 1900 * @sc: device softc structure 1901 * @e: the received event data 1902 * 1903 * Gets the current link status from hardware, and may print a message if an 1904 * unqualified is detected. 1905 */ 1906 static void 1907 ice_process_link_event(struct ice_softc *sc, 1908 struct ice_rq_event_info __invariant_only *e) 1909 { 1910 struct ice_port_info *pi = sc->hw.port_info; 1911 struct ice_hw *hw = &sc->hw; 1912 device_t dev = sc->dev; 1913 enum ice_status status; 1914 1915 /* Sanity check that the data length matches */ 1916 MPASS(le16toh(e->desc.datalen) == sizeof(struct ice_aqc_get_link_status_data)); 1917 1918 /* 1919 * Even though the adapter gets link status information inside the 1920 * event, it needs to send a Get Link Status AQ command in order 1921 * to re-enable link events. 1922 */ 1923 pi->phy.get_link_info = true; 1924 ice_get_link_status(pi, &sc->link_up); 1925 1926 if (pi->phy.link_info.topo_media_conflict & 1927 (ICE_AQ_LINK_TOPO_CONFLICT | ICE_AQ_LINK_MEDIA_CONFLICT | 1928 ICE_AQ_LINK_TOPO_CORRUPT)) 1929 device_printf(dev, 1930 "Possible mis-configuration of the Ethernet port detected; please use the Intel (R) Ethernet Port Configuration Tool utility to address the issue.\n"); 1931 1932 if ((pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) && 1933 !(pi->phy.link_info.link_info & ICE_AQ_LINK_UP) && 1934 !(pi->phy.link_info.an_info & ICE_AQ_QUALIFIED_MODULE)) 1935 device_printf(dev, 1936 "Link is disabled on this device because an unsupported module type was detected! Refer to the Intel (R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 1937 1938 if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 1939 if (!ice_testandset_state(&sc->state, ICE_STATE_NO_MEDIA)) { 1940 status = ice_aq_set_link_restart_an(pi, false, NULL); 1941 if (status != ICE_SUCCESS) 1942 device_printf(dev, 1943 "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", 1944 __func__, ice_status_str(status), 1945 ice_aq_str(hw->adminq.sq_last_status)); 1946 } 1947 } 1948 /* ICE_STATE_NO_MEDIA is cleared when polling task detects media */ 1949 1950 /* Indicate that link status must be reported again */ 1951 ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED); 1952 1953 /* OS link info is updated elsewhere */ 1954 } 1955 1956 /** 1957 * ice_process_ctrlq_event - Respond to a controlq event 1958 * @sc: device private structure 1959 * @qname: the name for this controlq 1960 * @event: the event to process 1961 * 1962 * Perform actions in response to various controlq event notifications. 1963 */ 1964 static void 1965 ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, 1966 struct ice_rq_event_info *event) 1967 { 1968 u16 opcode; 1969 1970 opcode = le16toh(event->desc.opcode); 1971 1972 switch (opcode) { 1973 case ice_aqc_opc_get_link_status: 1974 ice_process_link_event(sc, event); 1975 break; 1976 case ice_mbx_opc_send_msg_to_pf: 1977 /* TODO: handle IOV event */ 1978 break; 1979 case ice_aqc_opc_lldp_set_mib_change: 1980 ice_handle_mib_change_event(sc, event); 1981 break; 1982 case ice_aqc_opc_event_lan_overflow: 1983 ice_handle_lan_overflow_event(sc, event); 1984 break; 1985 default: 1986 device_printf(sc->dev, 1987 "%s Receive Queue unhandled event 0x%04x ignored\n", 1988 qname, opcode); 1989 } 1990 } 1991 1992 /** 1993 * ice_process_ctrlq - helper function to process controlq rings 1994 * @sc: device private structure 1995 * @q_type: specific control queue type 1996 * @pending: return parameter to track remaining events 1997 * 1998 * Process controlq events for a given control queue type. Returns zero on 1999 * success, and an error code on failure. If successful, pending is the number 2000 * of remaining events left in the queue. 2001 */ 2002 int 2003 ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending) 2004 { 2005 struct ice_rq_event_info event = { { 0 } }; 2006 struct ice_hw *hw = &sc->hw; 2007 struct ice_ctl_q_info *cq; 2008 enum ice_status status; 2009 const char *qname; 2010 int loop = 0; 2011 2012 switch (q_type) { 2013 case ICE_CTL_Q_ADMIN: 2014 cq = &hw->adminq; 2015 qname = "Admin"; 2016 break; 2017 case ICE_CTL_Q_MAILBOX: 2018 cq = &hw->mailboxq; 2019 qname = "Mailbox"; 2020 break; 2021 default: 2022 device_printf(sc->dev, 2023 "Unknown control queue type 0x%x\n", 2024 q_type); 2025 return 0; 2026 } 2027 2028 ice_check_ctrlq_errors(sc, qname, cq); 2029 2030 /* 2031 * Control queue processing happens during the admin task which may be 2032 * holding a non-sleepable lock, so we *must* use M_NOWAIT here. 2033 */ 2034 event.buf_len = cq->rq_buf_size; 2035 event.msg_buf = (u8 *)malloc(event.buf_len, M_ICE, M_ZERO | M_NOWAIT); 2036 if (!event.msg_buf) { 2037 device_printf(sc->dev, 2038 "Unable to allocate memory for %s Receive Queue event\n", 2039 qname); 2040 return (ENOMEM); 2041 } 2042 2043 do { 2044 status = ice_clean_rq_elem(hw, cq, &event, pending); 2045 if (status == ICE_ERR_AQ_NO_WORK) 2046 break; 2047 if (status) { 2048 if (q_type == ICE_CTL_Q_ADMIN) 2049 device_printf(sc->dev, 2050 "%s Receive Queue event error %s aq_err %s\n", 2051 qname, ice_status_str(status), 2052 ice_aq_str(cq->rq_last_status)); 2053 else 2054 device_printf(sc->dev, 2055 "%s Receive Queue event error %s cq_err %d\n", 2056 qname, ice_status_str(status), cq->rq_last_status); 2057 free(event.msg_buf, M_ICE); 2058 return (EIO); 2059 } 2060 /* XXX should we separate this handler by controlq type? */ 2061 ice_process_ctrlq_event(sc, qname, &event); 2062 } while (*pending && (++loop < ICE_CTRLQ_WORK_LIMIT)); 2063 2064 free(event.msg_buf, M_ICE); 2065 2066 return 0; 2067 } 2068 2069 /** 2070 * pkg_ver_empty - Check if a package version is empty 2071 * @pkg_ver: the package version to check 2072 * @pkg_name: the package name to check 2073 * 2074 * Checks if the package version structure is empty. We consider a package 2075 * version as empty if none of the versions are non-zero and the name string 2076 * is null as well. 2077 * 2078 * This is used to check if the package version was initialized by the driver, 2079 * as we do not expect an actual DDP package file to have a zero'd version and 2080 * name. 2081 * 2082 * @returns true if the package version is valid, or false otherwise. 2083 */ 2084 static bool 2085 pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name) 2086 { 2087 return (pkg_name[0] == '\0' && 2088 pkg_ver->major == 0 && 2089 pkg_ver->minor == 0 && 2090 pkg_ver->update == 0 && 2091 pkg_ver->draft == 0); 2092 } 2093 2094 /** 2095 * pkg_ver_compatible - Check if the package version is compatible 2096 * @pkg_ver: the package version to check 2097 * 2098 * Compares the package version number to the driver's expected major/minor 2099 * version. Returns an integer indicating whether the version is older, newer, 2100 * or compatible with the driver. 2101 * 2102 * @returns 0 if the package version is compatible, -1 if the package version 2103 * is older, and 1 if the package version is newer than the driver version. 2104 */ 2105 static int 2106 pkg_ver_compatible(struct ice_pkg_ver *pkg_ver) 2107 { 2108 if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ) 2109 return (1); /* newer */ 2110 else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) && 2111 (pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) 2112 return (1); /* newer */ 2113 else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) && 2114 (pkg_ver->minor == ICE_PKG_SUPP_VER_MNR)) 2115 return (0); /* compatible */ 2116 else 2117 return (-1); /* older */ 2118 } 2119 2120 /** 2121 * ice_os_pkg_version_str - Format OS package version info into a sbuf 2122 * @hw: device hw structure 2123 * @buf: string buffer to store name/version string 2124 * 2125 * Formats the name and version of the OS DDP package as found in the ice_ddp 2126 * module into a string. 2127 * 2128 * @remark This will almost always be the same as the active package, but 2129 * could be different in some cases. Use ice_active_pkg_version_str to get the 2130 * version of the active DDP package. 2131 */ 2132 static void 2133 ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf) 2134 { 2135 char name_buf[ICE_PKG_NAME_SIZE]; 2136 2137 /* If the OS DDP package info is empty, use "None" */ 2138 if (pkg_ver_empty(&hw->pkg_ver, hw->pkg_name)) { 2139 sbuf_printf(buf, "None"); 2140 return; 2141 } 2142 2143 /* 2144 * This should already be null-terminated, but since this is a raw 2145 * value from an external source, strlcpy() into a new buffer to 2146 * make sure. 2147 */ 2148 bzero(name_buf, sizeof(name_buf)); 2149 strlcpy(name_buf, (char *)hw->pkg_name, ICE_PKG_NAME_SIZE); 2150 2151 sbuf_printf(buf, "%s version %u.%u.%u.%u", 2152 name_buf, 2153 hw->pkg_ver.major, 2154 hw->pkg_ver.minor, 2155 hw->pkg_ver.update, 2156 hw->pkg_ver.draft); 2157 } 2158 2159 /** 2160 * ice_active_pkg_version_str - Format active package version info into a sbuf 2161 * @hw: device hw structure 2162 * @buf: string buffer to store name/version string 2163 * 2164 * Formats the name and version of the active DDP package info into a string 2165 * buffer for use. 2166 */ 2167 static void 2168 ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf) 2169 { 2170 char name_buf[ICE_PKG_NAME_SIZE]; 2171 2172 /* If the active DDP package info is empty, use "None" */ 2173 if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) { 2174 sbuf_printf(buf, "None"); 2175 return; 2176 } 2177 2178 /* 2179 * This should already be null-terminated, but since this is a raw 2180 * value from an external source, strlcpy() into a new buffer to 2181 * make sure. 2182 */ 2183 bzero(name_buf, sizeof(name_buf)); 2184 strlcpy(name_buf, (char *)hw->active_pkg_name, ICE_PKG_NAME_SIZE); 2185 2186 sbuf_printf(buf, "%s version %u.%u.%u.%u", 2187 name_buf, 2188 hw->active_pkg_ver.major, 2189 hw->active_pkg_ver.minor, 2190 hw->active_pkg_ver.update, 2191 hw->active_pkg_ver.draft); 2192 2193 if (hw->active_track_id != 0) 2194 sbuf_printf(buf, ", track id 0x%08x", hw->active_track_id); 2195 } 2196 2197 /** 2198 * ice_nvm_version_str - Format the NVM version information into a sbuf 2199 * @hw: device hw structure 2200 * @buf: string buffer to store version string 2201 * 2202 * Formats the NVM information including firmware version, API version, NVM 2203 * version, the EETRACK id, and OEM specific version information into a string 2204 * buffer. 2205 */ 2206 static void 2207 ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf) 2208 { 2209 struct ice_nvm_info *nvm = &hw->flash.nvm; 2210 struct ice_orom_info *orom = &hw->flash.orom; 2211 struct ice_netlist_info *netlist = &hw->flash.netlist; 2212 2213 /* Note that the netlist versions are stored in packed Binary Coded 2214 * Decimal format. The use of '%x' will correctly display these as 2215 * decimal numbers. This works because every 4 bits will be displayed 2216 * as a hexadecimal digit, and the BCD format will only use the values 2217 * 0-9. 2218 */ 2219 sbuf_printf(buf, 2220 "fw %u.%u.%u api %u.%u nvm %x.%02x etid %08x netlist %x.%x.%x-%x.%x.%x.%04x oem %u.%u.%u", 2221 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch, 2222 hw->api_maj_ver, hw->api_min_ver, 2223 nvm->major, nvm->minor, nvm->eetrack, 2224 netlist->major, netlist->minor, 2225 netlist->type >> 16, netlist->type & 0xFFFF, 2226 netlist->rev, netlist->cust_ver, netlist->hash, 2227 orom->major, orom->build, orom->patch); 2228 } 2229 2230 /** 2231 * ice_print_nvm_version - Print the NVM info to the kernel message log 2232 * @sc: the device softc structure 2233 * 2234 * Format and print an NVM version string using ice_nvm_version_str(). 2235 */ 2236 void 2237 ice_print_nvm_version(struct ice_softc *sc) 2238 { 2239 struct ice_hw *hw = &sc->hw; 2240 device_t dev = sc->dev; 2241 struct sbuf *sbuf; 2242 2243 sbuf = sbuf_new_auto(); 2244 ice_nvm_version_str(hw, sbuf); 2245 sbuf_finish(sbuf); 2246 device_printf(dev, "%s\n", sbuf_data(sbuf)); 2247 sbuf_delete(sbuf); 2248 } 2249 2250 /** 2251 * ice_update_vsi_hw_stats - Update VSI-specific ethernet statistics counters 2252 * @vsi: the VSI to be updated 2253 * 2254 * Reads hardware stats and updates the ice_vsi_hw_stats tracking structure with 2255 * the updated values. 2256 */ 2257 void 2258 ice_update_vsi_hw_stats(struct ice_vsi *vsi) 2259 { 2260 struct ice_eth_stats *prev_es, *cur_es; 2261 struct ice_hw *hw = &vsi->sc->hw; 2262 u16 vsi_num; 2263 2264 if (!ice_is_vsi_valid(hw, vsi->idx)) 2265 return; 2266 2267 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); /* HW absolute index of a VSI */ 2268 prev_es = &vsi->hw_stats.prev; 2269 cur_es = &vsi->hw_stats.cur; 2270 2271 #define ICE_VSI_STAT40(name, location) \ 2272 ice_stat_update40(hw, name ## L(vsi_num), \ 2273 vsi->hw_stats.offsets_loaded, \ 2274 &prev_es->location, &cur_es->location) 2275 2276 #define ICE_VSI_STAT32(name, location) \ 2277 ice_stat_update32(hw, name(vsi_num), \ 2278 vsi->hw_stats.offsets_loaded, \ 2279 &prev_es->location, &cur_es->location) 2280 2281 ICE_VSI_STAT40(GLV_GORC, rx_bytes); 2282 ICE_VSI_STAT40(GLV_UPRC, rx_unicast); 2283 ICE_VSI_STAT40(GLV_MPRC, rx_multicast); 2284 ICE_VSI_STAT40(GLV_BPRC, rx_broadcast); 2285 ICE_VSI_STAT32(GLV_RDPC, rx_discards); 2286 ICE_VSI_STAT40(GLV_GOTC, tx_bytes); 2287 ICE_VSI_STAT40(GLV_UPTC, tx_unicast); 2288 ICE_VSI_STAT40(GLV_MPTC, tx_multicast); 2289 ICE_VSI_STAT40(GLV_BPTC, tx_broadcast); 2290 ICE_VSI_STAT32(GLV_TEPC, tx_errors); 2291 2292 ice_stat_update_repc(hw, vsi->idx, vsi->hw_stats.offsets_loaded, 2293 cur_es); 2294 2295 #undef ICE_VSI_STAT40 2296 #undef ICE_VSI_STAT32 2297 2298 vsi->hw_stats.offsets_loaded = true; 2299 } 2300 2301 /** 2302 * ice_reset_vsi_stats - Reset VSI statistics counters 2303 * @vsi: VSI structure 2304 * 2305 * Resets the software tracking counters for the VSI statistics, and indicate 2306 * that the offsets haven't been loaded. This is intended to be called 2307 * post-reset so that VSI statistics count from zero again. 2308 */ 2309 void 2310 ice_reset_vsi_stats(struct ice_vsi *vsi) 2311 { 2312 /* Reset HW stats */ 2313 memset(&vsi->hw_stats.prev, 0, sizeof(vsi->hw_stats.prev)); 2314 memset(&vsi->hw_stats.cur, 0, sizeof(vsi->hw_stats.cur)); 2315 vsi->hw_stats.offsets_loaded = false; 2316 } 2317 2318 /** 2319 * ice_update_pf_stats - Update port stats counters 2320 * @sc: device private softc structure 2321 * 2322 * Reads hardware statistics registers and updates the software tracking 2323 * structure with new values. 2324 */ 2325 void 2326 ice_update_pf_stats(struct ice_softc *sc) 2327 { 2328 struct ice_hw_port_stats *prev_ps, *cur_ps; 2329 struct ice_hw *hw = &sc->hw; 2330 u8 lport; 2331 2332 MPASS(hw->port_info); 2333 2334 prev_ps = &sc->stats.prev; 2335 cur_ps = &sc->stats.cur; 2336 lport = hw->port_info->lport; 2337 2338 #define ICE_PF_STAT40(name, location) \ 2339 ice_stat_update40(hw, name ## L(lport), \ 2340 sc->stats.offsets_loaded, \ 2341 &prev_ps->location, &cur_ps->location) 2342 2343 #define ICE_PF_STAT32(name, location) \ 2344 ice_stat_update32(hw, name(lport), \ 2345 sc->stats.offsets_loaded, \ 2346 &prev_ps->location, &cur_ps->location) 2347 2348 ICE_PF_STAT40(GLPRT_GORC, eth.rx_bytes); 2349 ICE_PF_STAT40(GLPRT_UPRC, eth.rx_unicast); 2350 ICE_PF_STAT40(GLPRT_MPRC, eth.rx_multicast); 2351 ICE_PF_STAT40(GLPRT_BPRC, eth.rx_broadcast); 2352 ICE_PF_STAT40(GLPRT_GOTC, eth.tx_bytes); 2353 ICE_PF_STAT40(GLPRT_UPTC, eth.tx_unicast); 2354 ICE_PF_STAT40(GLPRT_MPTC, eth.tx_multicast); 2355 ICE_PF_STAT40(GLPRT_BPTC, eth.tx_broadcast); 2356 /* This stat register doesn't have an lport */ 2357 ice_stat_update32(hw, PRTRPB_RDPC, 2358 sc->stats.offsets_loaded, 2359 &prev_ps->eth.rx_discards, &cur_ps->eth.rx_discards); 2360 2361 ICE_PF_STAT32(GLPRT_TDOLD, tx_dropped_link_down); 2362 ICE_PF_STAT40(GLPRT_PRC64, rx_size_64); 2363 ICE_PF_STAT40(GLPRT_PRC127, rx_size_127); 2364 ICE_PF_STAT40(GLPRT_PRC255, rx_size_255); 2365 ICE_PF_STAT40(GLPRT_PRC511, rx_size_511); 2366 ICE_PF_STAT40(GLPRT_PRC1023, rx_size_1023); 2367 ICE_PF_STAT40(GLPRT_PRC1522, rx_size_1522); 2368 ICE_PF_STAT40(GLPRT_PRC9522, rx_size_big); 2369 ICE_PF_STAT40(GLPRT_PTC64, tx_size_64); 2370 ICE_PF_STAT40(GLPRT_PTC127, tx_size_127); 2371 ICE_PF_STAT40(GLPRT_PTC255, tx_size_255); 2372 ICE_PF_STAT40(GLPRT_PTC511, tx_size_511); 2373 ICE_PF_STAT40(GLPRT_PTC1023, tx_size_1023); 2374 ICE_PF_STAT40(GLPRT_PTC1522, tx_size_1522); 2375 ICE_PF_STAT40(GLPRT_PTC9522, tx_size_big); 2376 2377 ICE_PF_STAT32(GLPRT_LXONRXC, link_xon_rx); 2378 ICE_PF_STAT32(GLPRT_LXOFFRXC, link_xoff_rx); 2379 ICE_PF_STAT32(GLPRT_LXONTXC, link_xon_tx); 2380 ICE_PF_STAT32(GLPRT_LXOFFTXC, link_xoff_tx); 2381 ICE_PF_STAT32(GLPRT_CRCERRS, crc_errors); 2382 ICE_PF_STAT32(GLPRT_ILLERRC, illegal_bytes); 2383 ICE_PF_STAT32(GLPRT_MLFC, mac_local_faults); 2384 ICE_PF_STAT32(GLPRT_MRFC, mac_remote_faults); 2385 ICE_PF_STAT32(GLPRT_RLEC, rx_len_errors); 2386 ICE_PF_STAT32(GLPRT_RUC, rx_undersize); 2387 ICE_PF_STAT32(GLPRT_RFC, rx_fragments); 2388 ICE_PF_STAT32(GLPRT_ROC, rx_oversize); 2389 ICE_PF_STAT32(GLPRT_RJC, rx_jabber); 2390 2391 #undef ICE_PF_STAT40 2392 #undef ICE_PF_STAT32 2393 2394 sc->stats.offsets_loaded = true; 2395 } 2396 2397 /** 2398 * ice_reset_pf_stats - Reset port stats counters 2399 * @sc: Device private softc structure 2400 * 2401 * Reset software tracking values for statistics to zero, and indicate that 2402 * offsets haven't been loaded. Intended to be called after a device reset so 2403 * that statistics count from zero again. 2404 */ 2405 void 2406 ice_reset_pf_stats(struct ice_softc *sc) 2407 { 2408 memset(&sc->stats.prev, 0, sizeof(sc->stats.prev)); 2409 memset(&sc->stats.cur, 0, sizeof(sc->stats.cur)); 2410 sc->stats.offsets_loaded = false; 2411 } 2412 2413 /** 2414 * ice_sysctl_show_fw - sysctl callback to show firmware information 2415 * @oidp: sysctl oid structure 2416 * @arg1: pointer to private data structure 2417 * @arg2: unused 2418 * @req: sysctl request pointer 2419 * 2420 * Callback for the fw_version sysctl, to display the current firmware 2421 * information found at hardware init time. 2422 */ 2423 static int 2424 ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 2425 { 2426 struct ice_softc *sc = (struct ice_softc *)arg1; 2427 struct ice_hw *hw = &sc->hw; 2428 struct sbuf *sbuf; 2429 2430 UNREFERENCED_PARAMETER(oidp); 2431 UNREFERENCED_PARAMETER(arg2); 2432 2433 if (ice_driver_is_detaching(sc)) 2434 return (ESHUTDOWN); 2435 2436 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2437 ice_nvm_version_str(hw, sbuf); 2438 sbuf_finish(sbuf); 2439 sbuf_delete(sbuf); 2440 2441 return (0); 2442 } 2443 2444 /** 2445 * ice_sysctl_pba_number - sysctl callback to show PBA number 2446 * @oidp: sysctl oid structure 2447 * @arg1: pointer to private data structure 2448 * @arg2: unused 2449 * @req: sysctl request pointer 2450 * 2451 * Callback for the pba_number sysctl, used to read the Product Board Assembly 2452 * number for this device. 2453 */ 2454 static int 2455 ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS) 2456 { 2457 struct ice_softc *sc = (struct ice_softc *)arg1; 2458 struct ice_hw *hw = &sc->hw; 2459 device_t dev = sc->dev; 2460 u8 pba_string[32] = ""; 2461 enum ice_status status; 2462 2463 UNREFERENCED_PARAMETER(arg2); 2464 2465 if (ice_driver_is_detaching(sc)) 2466 return (ESHUTDOWN); 2467 2468 status = ice_read_pba_string(hw, pba_string, sizeof(pba_string)); 2469 if (status) { 2470 device_printf(dev, 2471 "%s: failed to read PBA string from NVM; status %s, aq_err %s\n", 2472 __func__, ice_status_str(status), 2473 ice_aq_str(hw->adminq.sq_last_status)); 2474 return (EIO); 2475 } 2476 2477 return sysctl_handle_string(oidp, pba_string, sizeof(pba_string), req); 2478 } 2479 2480 /** 2481 * ice_sysctl_pkg_version - sysctl to show the active package version info 2482 * @oidp: sysctl oid structure 2483 * @arg1: pointer to private data structure 2484 * @arg2: unused 2485 * @req: sysctl request pointer 2486 * 2487 * Callback for the pkg_version sysctl, to display the active DDP package name 2488 * and version information. 2489 */ 2490 static int 2491 ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS) 2492 { 2493 struct ice_softc *sc = (struct ice_softc *)arg1; 2494 struct ice_hw *hw = &sc->hw; 2495 struct sbuf *sbuf; 2496 2497 UNREFERENCED_PARAMETER(oidp); 2498 UNREFERENCED_PARAMETER(arg2); 2499 2500 if (ice_driver_is_detaching(sc)) 2501 return (ESHUTDOWN); 2502 2503 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2504 ice_active_pkg_version_str(hw, sbuf); 2505 sbuf_finish(sbuf); 2506 sbuf_delete(sbuf); 2507 2508 return (0); 2509 } 2510 2511 /** 2512 * ice_sysctl_os_pkg_version - sysctl to show the OS package version info 2513 * @oidp: sysctl oid structure 2514 * @arg1: pointer to private data structure 2515 * @arg2: unused 2516 * @req: sysctl request pointer 2517 * 2518 * Callback for the pkg_version sysctl, to display the OS DDP package name and 2519 * version info found in the ice_ddp module. 2520 */ 2521 static int 2522 ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS) 2523 { 2524 struct ice_softc *sc = (struct ice_softc *)arg1; 2525 struct ice_hw *hw = &sc->hw; 2526 struct sbuf *sbuf; 2527 2528 UNREFERENCED_PARAMETER(oidp); 2529 UNREFERENCED_PARAMETER(arg2); 2530 2531 if (ice_driver_is_detaching(sc)) 2532 return (ESHUTDOWN); 2533 2534 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2535 ice_os_pkg_version_str(hw, sbuf); 2536 sbuf_finish(sbuf); 2537 sbuf_delete(sbuf); 2538 2539 return (0); 2540 } 2541 2542 /** 2543 * ice_sysctl_current_speed - sysctl callback to show current link speed 2544 * @oidp: sysctl oid structure 2545 * @arg1: pointer to private data structure 2546 * @arg2: unused 2547 * @req: sysctl request pointer 2548 * 2549 * Callback for the current_speed sysctl, to display the string representing 2550 * the current link speed. 2551 */ 2552 static int 2553 ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2554 { 2555 struct ice_softc *sc = (struct ice_softc *)arg1; 2556 struct ice_hw *hw = &sc->hw; 2557 struct sbuf *sbuf; 2558 2559 UNREFERENCED_PARAMETER(oidp); 2560 UNREFERENCED_PARAMETER(arg2); 2561 2562 if (ice_driver_is_detaching(sc)) 2563 return (ESHUTDOWN); 2564 2565 sbuf = sbuf_new_for_sysctl(NULL, NULL, 10, req); 2566 sbuf_printf(sbuf, "%s", ice_aq_speed_to_str(hw->port_info)); 2567 sbuf_finish(sbuf); 2568 sbuf_delete(sbuf); 2569 2570 return (0); 2571 } 2572 2573 /** 2574 * @var phy_link_speeds 2575 * @brief PHY link speed conversion array 2576 * 2577 * Array of link speeds to convert ICE_PHY_TYPE_LOW and ICE_PHY_TYPE_HIGH into 2578 * link speeds used by the link speed sysctls. 2579 * 2580 * @remark these are based on the indices used in the BIT() macros for the 2581 * ICE_PHY_TYPE_LOW_* and ICE_PHY_TYPE_HIGH_* definitions. 2582 */ 2583 static const uint16_t phy_link_speeds[] = { 2584 ICE_AQ_LINK_SPEED_100MB, 2585 ICE_AQ_LINK_SPEED_100MB, 2586 ICE_AQ_LINK_SPEED_1000MB, 2587 ICE_AQ_LINK_SPEED_1000MB, 2588 ICE_AQ_LINK_SPEED_1000MB, 2589 ICE_AQ_LINK_SPEED_1000MB, 2590 ICE_AQ_LINK_SPEED_1000MB, 2591 ICE_AQ_LINK_SPEED_2500MB, 2592 ICE_AQ_LINK_SPEED_2500MB, 2593 ICE_AQ_LINK_SPEED_2500MB, 2594 ICE_AQ_LINK_SPEED_5GB, 2595 ICE_AQ_LINK_SPEED_5GB, 2596 ICE_AQ_LINK_SPEED_10GB, 2597 ICE_AQ_LINK_SPEED_10GB, 2598 ICE_AQ_LINK_SPEED_10GB, 2599 ICE_AQ_LINK_SPEED_10GB, 2600 ICE_AQ_LINK_SPEED_10GB, 2601 ICE_AQ_LINK_SPEED_10GB, 2602 ICE_AQ_LINK_SPEED_10GB, 2603 ICE_AQ_LINK_SPEED_25GB, 2604 ICE_AQ_LINK_SPEED_25GB, 2605 ICE_AQ_LINK_SPEED_25GB, 2606 ICE_AQ_LINK_SPEED_25GB, 2607 ICE_AQ_LINK_SPEED_25GB, 2608 ICE_AQ_LINK_SPEED_25GB, 2609 ICE_AQ_LINK_SPEED_25GB, 2610 ICE_AQ_LINK_SPEED_25GB, 2611 ICE_AQ_LINK_SPEED_25GB, 2612 ICE_AQ_LINK_SPEED_25GB, 2613 ICE_AQ_LINK_SPEED_25GB, 2614 ICE_AQ_LINK_SPEED_40GB, 2615 ICE_AQ_LINK_SPEED_40GB, 2616 ICE_AQ_LINK_SPEED_40GB, 2617 ICE_AQ_LINK_SPEED_40GB, 2618 ICE_AQ_LINK_SPEED_40GB, 2619 ICE_AQ_LINK_SPEED_40GB, 2620 ICE_AQ_LINK_SPEED_50GB, 2621 ICE_AQ_LINK_SPEED_50GB, 2622 ICE_AQ_LINK_SPEED_50GB, 2623 ICE_AQ_LINK_SPEED_50GB, 2624 ICE_AQ_LINK_SPEED_50GB, 2625 ICE_AQ_LINK_SPEED_50GB, 2626 ICE_AQ_LINK_SPEED_50GB, 2627 ICE_AQ_LINK_SPEED_50GB, 2628 ICE_AQ_LINK_SPEED_50GB, 2629 ICE_AQ_LINK_SPEED_50GB, 2630 ICE_AQ_LINK_SPEED_50GB, 2631 ICE_AQ_LINK_SPEED_50GB, 2632 ICE_AQ_LINK_SPEED_50GB, 2633 ICE_AQ_LINK_SPEED_50GB, 2634 ICE_AQ_LINK_SPEED_50GB, 2635 ICE_AQ_LINK_SPEED_100GB, 2636 ICE_AQ_LINK_SPEED_100GB, 2637 ICE_AQ_LINK_SPEED_100GB, 2638 ICE_AQ_LINK_SPEED_100GB, 2639 ICE_AQ_LINK_SPEED_100GB, 2640 ICE_AQ_LINK_SPEED_100GB, 2641 ICE_AQ_LINK_SPEED_100GB, 2642 ICE_AQ_LINK_SPEED_100GB, 2643 ICE_AQ_LINK_SPEED_100GB, 2644 ICE_AQ_LINK_SPEED_100GB, 2645 ICE_AQ_LINK_SPEED_100GB, 2646 ICE_AQ_LINK_SPEED_100GB, 2647 ICE_AQ_LINK_SPEED_100GB, 2648 /* These rates are for ICE_PHY_TYPE_HIGH_* */ 2649 ICE_AQ_LINK_SPEED_100GB, 2650 ICE_AQ_LINK_SPEED_100GB, 2651 ICE_AQ_LINK_SPEED_100GB, 2652 ICE_AQ_LINK_SPEED_100GB, 2653 ICE_AQ_LINK_SPEED_100GB 2654 }; 2655 2656 #define ICE_SYSCTL_HELP_ADVERTISE_SPEED \ 2657 "\nControl advertised link speed." \ 2658 "\nFlags:" \ 2659 "\n\t 0x0 - Auto" \ 2660 "\n\t 0x1 - 10 Mb" \ 2661 "\n\t 0x2 - 100 Mb" \ 2662 "\n\t 0x4 - 1G" \ 2663 "\n\t 0x8 - 2.5G" \ 2664 "\n\t 0x10 - 5G" \ 2665 "\n\t 0x20 - 10G" \ 2666 "\n\t 0x40 - 20G" \ 2667 "\n\t 0x80 - 25G" \ 2668 "\n\t 0x100 - 40G" \ 2669 "\n\t 0x200 - 50G" \ 2670 "\n\t 0x400 - 100G" \ 2671 "\n\t0x8000 - Unknown" \ 2672 "\n\t" \ 2673 "\nUse \"sysctl -x\" to view flags properly." 2674 2675 #define ICE_PHYS_100MB \ 2676 (ICE_PHY_TYPE_LOW_100BASE_TX | \ 2677 ICE_PHY_TYPE_LOW_100M_SGMII) 2678 #define ICE_PHYS_1000MB \ 2679 (ICE_PHY_TYPE_LOW_1000BASE_T | \ 2680 ICE_PHY_TYPE_LOW_1000BASE_SX | \ 2681 ICE_PHY_TYPE_LOW_1000BASE_LX | \ 2682 ICE_PHY_TYPE_LOW_1000BASE_KX | \ 2683 ICE_PHY_TYPE_LOW_1G_SGMII) 2684 #define ICE_PHYS_2500MB \ 2685 (ICE_PHY_TYPE_LOW_2500BASE_T | \ 2686 ICE_PHY_TYPE_LOW_2500BASE_X | \ 2687 ICE_PHY_TYPE_LOW_2500BASE_KX) 2688 #define ICE_PHYS_5GB \ 2689 (ICE_PHY_TYPE_LOW_5GBASE_T | \ 2690 ICE_PHY_TYPE_LOW_5GBASE_KR) 2691 #define ICE_PHYS_10GB \ 2692 (ICE_PHY_TYPE_LOW_10GBASE_T | \ 2693 ICE_PHY_TYPE_LOW_10G_SFI_DA | \ 2694 ICE_PHY_TYPE_LOW_10GBASE_SR | \ 2695 ICE_PHY_TYPE_LOW_10GBASE_LR | \ 2696 ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \ 2697 ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \ 2698 ICE_PHY_TYPE_LOW_10G_SFI_C2C) 2699 #define ICE_PHYS_25GB \ 2700 (ICE_PHY_TYPE_LOW_25GBASE_T | \ 2701 ICE_PHY_TYPE_LOW_25GBASE_CR | \ 2702 ICE_PHY_TYPE_LOW_25GBASE_CR_S | \ 2703 ICE_PHY_TYPE_LOW_25GBASE_CR1 | \ 2704 ICE_PHY_TYPE_LOW_25GBASE_SR | \ 2705 ICE_PHY_TYPE_LOW_25GBASE_LR | \ 2706 ICE_PHY_TYPE_LOW_25GBASE_KR | \ 2707 ICE_PHY_TYPE_LOW_25GBASE_KR_S | \ 2708 ICE_PHY_TYPE_LOW_25GBASE_KR1 | \ 2709 ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | \ 2710 ICE_PHY_TYPE_LOW_25G_AUI_C2C) 2711 #define ICE_PHYS_40GB \ 2712 (ICE_PHY_TYPE_LOW_40GBASE_CR4 | \ 2713 ICE_PHY_TYPE_LOW_40GBASE_SR4 | \ 2714 ICE_PHY_TYPE_LOW_40GBASE_LR4 | \ 2715 ICE_PHY_TYPE_LOW_40GBASE_KR4 | \ 2716 ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | \ 2717 ICE_PHY_TYPE_LOW_40G_XLAUI) 2718 #define ICE_PHYS_50GB \ 2719 (ICE_PHY_TYPE_LOW_50GBASE_CR2 | \ 2720 ICE_PHY_TYPE_LOW_50GBASE_SR2 | \ 2721 ICE_PHY_TYPE_LOW_50GBASE_LR2 | \ 2722 ICE_PHY_TYPE_LOW_50GBASE_KR2 | \ 2723 ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | \ 2724 ICE_PHY_TYPE_LOW_50G_LAUI2 | \ 2725 ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | \ 2726 ICE_PHY_TYPE_LOW_50G_AUI2 | \ 2727 ICE_PHY_TYPE_LOW_50GBASE_CP | \ 2728 ICE_PHY_TYPE_LOW_50GBASE_SR | \ 2729 ICE_PHY_TYPE_LOW_50GBASE_FR | \ 2730 ICE_PHY_TYPE_LOW_50GBASE_LR | \ 2731 ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 | \ 2732 ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | \ 2733 ICE_PHY_TYPE_LOW_50G_AUI1) 2734 #define ICE_PHYS_100GB_LOW \ 2735 (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \ 2736 ICE_PHY_TYPE_LOW_100GBASE_SR4 | \ 2737 ICE_PHY_TYPE_LOW_100GBASE_LR4 | \ 2738 ICE_PHY_TYPE_LOW_100GBASE_KR4 | \ 2739 ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \ 2740 ICE_PHY_TYPE_LOW_100G_CAUI4 | \ 2741 ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \ 2742 ICE_PHY_TYPE_LOW_100G_AUI4 | \ 2743 ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \ 2744 ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \ 2745 ICE_PHY_TYPE_LOW_100GBASE_CP2 | \ 2746 ICE_PHY_TYPE_LOW_100GBASE_SR2 | \ 2747 ICE_PHY_TYPE_LOW_100GBASE_DR) 2748 #define ICE_PHYS_100GB_HIGH \ 2749 (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \ 2750 ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | \ 2751 ICE_PHY_TYPE_HIGH_100G_CAUI2 | \ 2752 ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ 2753 ICE_PHY_TYPE_HIGH_100G_AUI2) 2754 2755 /** 2756 * ice_aq_phy_types_to_sysctl_speeds - Convert the PHY Types to speeds 2757 * @phy_type_low: lower 64-bit PHY Type bitmask 2758 * @phy_type_high: upper 64-bit PHY Type bitmask 2759 * 2760 * Convert the PHY Type fields from Get PHY Abilities and Set PHY Config into 2761 * link speed flags. If phy_type_high has an unknown PHY type, then the return 2762 * value will include the "ICE_AQ_LINK_SPEED_UNKNOWN" flag as well. 2763 */ 2764 static u16 2765 ice_aq_phy_types_to_sysctl_speeds(u64 phy_type_low, u64 phy_type_high) 2766 { 2767 u16 sysctl_speeds = 0; 2768 int bit; 2769 2770 /* coverity[address_of] */ 2771 for_each_set_bit(bit, &phy_type_low, 64) 2772 sysctl_speeds |= phy_link_speeds[bit]; 2773 2774 /* coverity[address_of] */ 2775 for_each_set_bit(bit, &phy_type_high, 64) { 2776 if ((bit + 64) < (int)ARRAY_SIZE(phy_link_speeds)) 2777 sysctl_speeds |= phy_link_speeds[bit + 64]; 2778 else 2779 sysctl_speeds |= ICE_AQ_LINK_SPEED_UNKNOWN; 2780 } 2781 2782 return (sysctl_speeds); 2783 } 2784 2785 /** 2786 * ice_sysctl_speeds_to_aq_phy_types - Convert sysctl speed flags to AQ PHY flags 2787 * @sysctl_speeds: 16-bit sysctl speeds or AQ_LINK_SPEED flags 2788 * @phy_type_low: output parameter for lower AQ PHY flags 2789 * @phy_type_high: output parameter for higher AQ PHY flags 2790 * 2791 * Converts the given link speed flags into AQ PHY type flag sets appropriate 2792 * for use in a Set PHY Config command. 2793 */ 2794 static void 2795 ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, 2796 u64 *phy_type_high) 2797 { 2798 *phy_type_low = 0, *phy_type_high = 0; 2799 2800 if (sysctl_speeds & ICE_AQ_LINK_SPEED_100MB) 2801 *phy_type_low |= ICE_PHYS_100MB; 2802 if (sysctl_speeds & ICE_AQ_LINK_SPEED_1000MB) 2803 *phy_type_low |= ICE_PHYS_1000MB; 2804 if (sysctl_speeds & ICE_AQ_LINK_SPEED_2500MB) 2805 *phy_type_low |= ICE_PHYS_2500MB; 2806 if (sysctl_speeds & ICE_AQ_LINK_SPEED_5GB) 2807 *phy_type_low |= ICE_PHYS_5GB; 2808 if (sysctl_speeds & ICE_AQ_LINK_SPEED_10GB) 2809 *phy_type_low |= ICE_PHYS_10GB; 2810 if (sysctl_speeds & ICE_AQ_LINK_SPEED_25GB) 2811 *phy_type_low |= ICE_PHYS_25GB; 2812 if (sysctl_speeds & ICE_AQ_LINK_SPEED_40GB) 2813 *phy_type_low |= ICE_PHYS_40GB; 2814 if (sysctl_speeds & ICE_AQ_LINK_SPEED_50GB) 2815 *phy_type_low |= ICE_PHYS_50GB; 2816 if (sysctl_speeds & ICE_AQ_LINK_SPEED_100GB) { 2817 *phy_type_low |= ICE_PHYS_100GB_LOW; 2818 *phy_type_high |= ICE_PHYS_100GB_HIGH; 2819 } 2820 } 2821 2822 /** 2823 * ice_intersect_media_types_with_caps - Restrict input AQ PHY flags 2824 * @sc: driver private structure 2825 * @sysctl_speeds: current SW configuration of PHY types 2826 * @phy_type_low: input/output flag set for low PHY types 2827 * @phy_type_high: input/output flag set for high PHY types 2828 * 2829 * Intersects the input PHY flags with PHY flags retrieved from the adapter to 2830 * ensure the flags are compatible. 2831 * 2832 * @returns 0 on success, EIO if an AQ command fails, or EINVAL if input PHY 2833 * types have no intersection with TOPO_CAPS and the adapter is in non-lenient 2834 * mode 2835 */ 2836 static int 2837 ice_intersect_media_types_with_caps(struct ice_softc *sc, u16 sysctl_speeds, 2838 u64 *phy_type_low, u64 *phy_type_high) 2839 { 2840 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 2841 struct ice_port_info *pi = sc->hw.port_info; 2842 device_t dev = sc->dev; 2843 enum ice_status status; 2844 u64 temp_phy_low, temp_phy_high; 2845 u64 final_phy_low, final_phy_high; 2846 u16 topo_speeds; 2847 2848 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 2849 &pcaps, NULL); 2850 if (status != ICE_SUCCESS) { 2851 device_printf(dev, 2852 "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", 2853 __func__, ice_status_str(status), 2854 ice_aq_str(sc->hw.adminq.sq_last_status)); 2855 return (EIO); 2856 } 2857 2858 final_phy_low = le64toh(pcaps.phy_type_low); 2859 final_phy_high = le64toh(pcaps.phy_type_high); 2860 2861 topo_speeds = ice_aq_phy_types_to_sysctl_speeds(final_phy_low, 2862 final_phy_high); 2863 2864 /* 2865 * If the user specifies a subset of speeds the media is already 2866 * capable of supporting, then we're good to go. 2867 */ 2868 if ((sysctl_speeds & topo_speeds) == sysctl_speeds) 2869 goto intersect_final; 2870 2871 temp_phy_low = final_phy_low; 2872 temp_phy_high = final_phy_high; 2873 /* 2874 * Otherwise, we'll have to use the superset if Lenient Mode is 2875 * supported. 2876 */ 2877 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) { 2878 /* 2879 * Start with masks that _don't_ include the PHY types 2880 * discovered by the TOPO_CAP. 2881 */ 2882 ice_sysctl_speeds_to_aq_phy_types(topo_speeds, &final_phy_low, 2883 &final_phy_high); 2884 final_phy_low = ~final_phy_low; 2885 final_phy_high = ~final_phy_high; 2886 2887 /* Get the PHY types the NVM says we can support */ 2888 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, 2889 &pcaps, NULL); 2890 if (status != ICE_SUCCESS) { 2891 device_printf(dev, 2892 "%s: ice_aq_get_phy_caps (NVM_CAP) failed; status %s, aq_err %s\n", 2893 __func__, ice_status_str(status), 2894 ice_aq_str(sc->hw.adminq.sq_last_status)); 2895 return (status); 2896 } 2897 2898 /* 2899 * Clear out the unsupported PHY types, including those 2900 * from TOPO_CAP. 2901 */ 2902 final_phy_low &= le64toh(pcaps.phy_type_low); 2903 final_phy_high &= le64toh(pcaps.phy_type_high); 2904 /* 2905 * Include PHY types from TOPO_CAP (which may be a subset 2906 * of the types the NVM specifies). 2907 */ 2908 final_phy_low |= temp_phy_low; 2909 final_phy_high |= temp_phy_high; 2910 } 2911 2912 intersect_final: 2913 2914 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) 2915 ice_apply_supported_speed_filter(&final_phy_low, &final_phy_high); 2916 2917 ice_sysctl_speeds_to_aq_phy_types(sysctl_speeds, &temp_phy_low, 2918 &temp_phy_high); 2919 2920 final_phy_low &= temp_phy_low; 2921 final_phy_high &= temp_phy_high; 2922 2923 if (final_phy_low == 0 && final_phy_high == 0) { 2924 device_printf(dev, 2925 "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 2926 return (EINVAL); 2927 } 2928 2929 /* Overwrite input phy_type values and return */ 2930 *phy_type_low = final_phy_low; 2931 *phy_type_high = final_phy_high; 2932 2933 return (0); 2934 } 2935 2936 /** 2937 * ice_get_auto_speeds - Get PHY type flags for "auto" speed 2938 * @sc: driver private structure 2939 * @phy_type_low: output low PHY type flags 2940 * @phy_type_high: output high PHY type flags 2941 * 2942 * Retrieves a suitable set of PHY type flags to use for an "auto" speed 2943 * setting by either using the NVM default overrides for speed, or retrieving 2944 * a default from the adapter using Get PHY capabilities in TOPO_CAPS mode. 2945 * 2946 * @returns 0 on success or EIO on AQ command failure 2947 */ 2948 static int 2949 ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low, 2950 u64 *phy_type_high) 2951 { 2952 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 2953 struct ice_hw *hw = &sc->hw; 2954 struct ice_port_info *pi = hw->port_info; 2955 device_t dev = sc->dev; 2956 enum ice_status status; 2957 2958 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_DEFAULT_OVERRIDE)) { 2959 /* copy over speed settings from LDO TLV */ 2960 *phy_type_low = CPU_TO_LE64(sc->ldo_tlv.phy_type_low); 2961 *phy_type_high = CPU_TO_LE64(sc->ldo_tlv.phy_type_high); 2962 } else { 2963 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 2964 &pcaps, NULL); 2965 if (status != ICE_SUCCESS) { 2966 device_printf(dev, 2967 "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", 2968 __func__, ice_status_str(status), 2969 ice_aq_str(hw->adminq.sq_last_status)); 2970 return (EIO); 2971 } 2972 2973 *phy_type_low = le64toh(pcaps.phy_type_low); 2974 *phy_type_high = le64toh(pcaps.phy_type_high); 2975 } 2976 2977 return (0); 2978 } 2979 2980 /** 2981 * ice_sysctl_advertise_speed - Display/change link speeds supported by port 2982 * @oidp: sysctl oid structure 2983 * @arg1: pointer to private data structure 2984 * @arg2: unused 2985 * @req: sysctl request pointer 2986 * 2987 * On read: Displays the currently supported speeds 2988 * On write: Sets the device's supported speeds 2989 * Valid input flags: see ICE_SYSCTL_HELP_ADVERTISE_SPEED 2990 */ 2991 static int 2992 ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS) 2993 { 2994 struct ice_softc *sc = (struct ice_softc *)arg1; 2995 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 2996 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 2997 struct ice_hw *hw = &sc->hw; 2998 struct ice_port_info *pi = hw->port_info; 2999 device_t dev = sc->dev; 3000 enum ice_status status; 3001 u64 phy_low, phy_high; 3002 u16 sysctl_speeds = 0; 3003 int error = 0; 3004 3005 UNREFERENCED_PARAMETER(arg2); 3006 3007 if (ice_driver_is_detaching(sc)) 3008 return (ESHUTDOWN); 3009 3010 /* Get the current speeds from the adapter's "active" configuration. */ 3011 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 3012 &pcaps, NULL); 3013 if (status != ICE_SUCCESS) { 3014 device_printf(dev, 3015 "%s: ice_aq_get_phy_caps (SW_CFG) failed; status %s, aq_err %s\n", 3016 __func__, ice_status_str(status), 3017 ice_aq_str(hw->adminq.sq_last_status)); 3018 return (EIO); 3019 } 3020 3021 phy_low = le64toh(pcaps.phy_type_low); 3022 phy_high = le64toh(pcaps.phy_type_high); 3023 sysctl_speeds = ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); 3024 3025 error = sysctl_handle_16(oidp, &sysctl_speeds, 0, req); 3026 if ((error) || (req->newptr == NULL)) 3027 return (error); 3028 3029 if (sysctl_speeds > 0x7FF) { 3030 device_printf(dev, 3031 "%s: \"%u\" is outside of the range of acceptable values.\n", 3032 __func__, sysctl_speeds); 3033 return (EINVAL); 3034 } 3035 3036 /* 0 is treated as "Auto"; the driver will handle selecting the correct speeds, 3037 * or apply an override if one is specified in the NVM. 3038 */ 3039 if (sysctl_speeds == 0) { 3040 error = ice_get_auto_speeds(sc, &phy_low, &phy_high); 3041 if (error) 3042 /* Function already prints appropriate error message */ 3043 return (error); 3044 } else { 3045 error = ice_intersect_media_types_with_caps(sc, sysctl_speeds, 3046 &phy_low, &phy_high); 3047 if (error) 3048 /* Function already prints appropriate error message */ 3049 return (error); 3050 } 3051 sysctl_speeds = ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); 3052 3053 /* Cache new user setting for speeds */ 3054 pi->phy.curr_user_speed_req = sysctl_speeds; 3055 3056 /* Setup new PHY config with new input PHY types */ 3057 ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); 3058 3059 cfg.phy_type_low = phy_low; 3060 cfg.phy_type_high = phy_high; 3061 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 3062 3063 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3064 if (status != ICE_SUCCESS) { 3065 /* Don't indicate failure if there's no media in the port -- the sysctl 3066 * handler has saved the value and will apply it when media is inserted. 3067 */ 3068 if (status == ICE_ERR_AQ_ERROR && 3069 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 3070 device_printf(dev, 3071 "%s: Setting will be applied when media is inserted\n", __func__); 3072 return (0); 3073 } else { 3074 device_printf(dev, 3075 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 3076 __func__, ice_status_str(status), 3077 ice_aq_str(hw->adminq.sq_last_status)); 3078 return (EIO); 3079 } 3080 } 3081 3082 return (0); 3083 } 3084 3085 #define ICE_SYSCTL_HELP_FEC_CONFIG \ 3086 "\nDisplay or set the port's requested FEC mode." \ 3087 "\n\tauto - " ICE_FEC_STRING_AUTO \ 3088 "\n\tfc - " ICE_FEC_STRING_BASER \ 3089 "\n\trs - " ICE_FEC_STRING_RS \ 3090 "\n\tnone - " ICE_FEC_STRING_NONE \ 3091 "\nEither of the left or right strings above can be used to set the requested mode." 3092 3093 /** 3094 * ice_sysctl_fec_config - Display/change the configured FEC mode 3095 * @oidp: sysctl oid structure 3096 * @arg1: pointer to private data structure 3097 * @arg2: unused 3098 * @req: sysctl request pointer 3099 * 3100 * On read: Displays the configured FEC mode 3101 * On write: Sets the device's FEC mode to the input string, if it's valid. 3102 * Valid input strings: see ICE_SYSCTL_HELP_FEC_CONFIG 3103 */ 3104 static int 3105 ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS) 3106 { 3107 struct ice_softc *sc = (struct ice_softc *)arg1; 3108 struct ice_port_info *pi = sc->hw.port_info; 3109 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3110 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3111 struct ice_hw *hw = &sc->hw; 3112 enum ice_fec_mode new_mode; 3113 enum ice_status status; 3114 device_t dev = sc->dev; 3115 char req_fec[32]; 3116 int error = 0; 3117 3118 UNREFERENCED_PARAMETER(arg2); 3119 3120 if (ice_driver_is_detaching(sc)) 3121 return (ESHUTDOWN); 3122 3123 bzero(req_fec, sizeof(req_fec)); 3124 strlcpy(req_fec, ice_requested_fec_mode(pi), sizeof(req_fec)); 3125 3126 error = sysctl_handle_string(oidp, req_fec, sizeof(req_fec), req); 3127 if ((error) || (req->newptr == NULL)) 3128 return (error); 3129 3130 if (strcmp(req_fec, "auto") == 0 || 3131 strcmp(req_fec, ice_fec_str(ICE_FEC_AUTO)) == 0) { 3132 new_mode = ICE_FEC_AUTO; 3133 } else if (strcmp(req_fec, "fc") == 0 || 3134 strcmp(req_fec, ice_fec_str(ICE_FEC_BASER)) == 0) { 3135 new_mode = ICE_FEC_BASER; 3136 } else if (strcmp(req_fec, "rs") == 0 || 3137 strcmp(req_fec, ice_fec_str(ICE_FEC_RS)) == 0) { 3138 new_mode = ICE_FEC_RS; 3139 } else if (strcmp(req_fec, "none") == 0 || 3140 strcmp(req_fec, ice_fec_str(ICE_FEC_NONE)) == 0) { 3141 new_mode = ICE_FEC_NONE; 3142 } else { 3143 device_printf(dev, 3144 "%s: \"%s\" is not a valid FEC mode\n", 3145 __func__, req_fec); 3146 return (EINVAL); 3147 } 3148 3149 /* Cache user FEC mode for later link ups */ 3150 pi->phy.curr_user_fec_req = new_mode; 3151 3152 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 3153 &pcaps, NULL); 3154 if (status != ICE_SUCCESS) { 3155 device_printf(dev, 3156 "%s: ice_aq_get_phy_caps failed (SW_CFG); status %s, aq_err %s\n", 3157 __func__, ice_status_str(status), 3158 ice_aq_str(hw->adminq.sq_last_status)); 3159 return (EIO); 3160 } 3161 3162 ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); 3163 3164 /* Get link_fec_opt/AUTO_FEC mode from TOPO caps for base for new FEC mode */ 3165 memset(&pcaps, 0, sizeof(pcaps)); 3166 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 3167 &pcaps, NULL); 3168 if (status != ICE_SUCCESS) { 3169 device_printf(dev, 3170 "%s: ice_aq_get_phy_caps failed (TOPO_CAP); status %s, aq_err %s\n", 3171 __func__, ice_status_str(status), 3172 ice_aq_str(hw->adminq.sq_last_status)); 3173 return (EIO); 3174 } 3175 3176 /* Configure new FEC options using TOPO caps */ 3177 cfg.link_fec_opt = pcaps.link_fec_options; 3178 cfg.caps &= ~ICE_AQ_PHY_ENA_AUTO_FEC; 3179 if (pcaps.caps & ICE_AQC_PHY_EN_AUTO_FEC) 3180 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_FEC; 3181 3182 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_DEFAULT_OVERRIDE) && 3183 new_mode == ICE_FEC_AUTO) { 3184 /* copy over FEC settings from LDO TLV */ 3185 cfg.link_fec_opt = sc->ldo_tlv.fec_options; 3186 } else { 3187 ice_cfg_phy_fec(pi, &cfg, new_mode); 3188 3189 /* Check if the new mode is valid, and exit with an error if not */ 3190 if (cfg.link_fec_opt && 3191 !(cfg.link_fec_opt & pcaps.link_fec_options)) { 3192 device_printf(dev, 3193 "%s: The requested FEC mode, %s, is not supported by current media\n", 3194 __func__, ice_fec_str(new_mode)); 3195 return (ENOTSUP); 3196 } 3197 } 3198 3199 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3200 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3201 if (status != ICE_SUCCESS) { 3202 /* Don't indicate failure if there's no media in the port -- the sysctl 3203 * handler has saved the value and will apply it when media is inserted. 3204 */ 3205 if (status == ICE_ERR_AQ_ERROR && 3206 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 3207 device_printf(dev, 3208 "%s: Setting will be applied when media is inserted\n", __func__); 3209 return (0); 3210 } else { 3211 device_printf(dev, 3212 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 3213 __func__, ice_status_str(status), 3214 ice_aq_str(hw->adminq.sq_last_status)); 3215 return (EIO); 3216 } 3217 } 3218 3219 return (0); 3220 } 3221 3222 /** 3223 * ice_sysctl_negotiated_fec - Display the negotiated FEC mode on the link 3224 * @oidp: sysctl oid structure 3225 * @arg1: pointer to private data structure 3226 * @arg2: unused 3227 * @req: sysctl request pointer 3228 * 3229 * On read: Displays the negotiated FEC mode, in a string 3230 */ 3231 static int 3232 ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS) 3233 { 3234 struct ice_softc *sc = (struct ice_softc *)arg1; 3235 struct ice_hw *hw = &sc->hw; 3236 char neg_fec[32]; 3237 int error; 3238 3239 UNREFERENCED_PARAMETER(arg2); 3240 3241 if (ice_driver_is_detaching(sc)) 3242 return (ESHUTDOWN); 3243 3244 /* Copy const string into a buffer to drop const qualifier */ 3245 bzero(neg_fec, sizeof(neg_fec)); 3246 strlcpy(neg_fec, ice_negotiated_fec_mode(hw->port_info), sizeof(neg_fec)); 3247 3248 error = sysctl_handle_string(oidp, neg_fec, 0, req); 3249 if (req->newptr != NULL) 3250 return (EPERM); 3251 3252 return (error); 3253 } 3254 3255 #define ICE_SYSCTL_HELP_FC_CONFIG \ 3256 "\nDisplay or set the port's advertised flow control mode.\n" \ 3257 "\t0 - " ICE_FC_STRING_NONE \ 3258 "\n\t1 - " ICE_FC_STRING_RX \ 3259 "\n\t2 - " ICE_FC_STRING_TX \ 3260 "\n\t3 - " ICE_FC_STRING_FULL \ 3261 "\nEither the numbers or the strings above can be used to set the advertised mode." 3262 3263 /** 3264 * ice_sysctl_fc_config - Display/change the advertised flow control mode 3265 * @oidp: sysctl oid structure 3266 * @arg1: pointer to private data structure 3267 * @arg2: unused 3268 * @req: sysctl request pointer 3269 * 3270 * On read: Displays the configured flow control mode 3271 * On write: Sets the device's flow control mode to the input, if it's valid. 3272 * Valid input strings: see ICE_SYSCTL_HELP_FC_CONFIG 3273 */ 3274 static int 3275 ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS) 3276 { 3277 struct ice_softc *sc = (struct ice_softc *)arg1; 3278 struct ice_port_info *pi = sc->hw.port_info; 3279 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3280 enum ice_fc_mode old_mode, new_mode; 3281 struct ice_hw *hw = &sc->hw; 3282 device_t dev = sc->dev; 3283 enum ice_status status; 3284 int error = 0, fc_num; 3285 bool mode_set = false; 3286 struct sbuf buf; 3287 char *fc_str_end; 3288 char fc_str[32]; 3289 u8 aq_failures; 3290 3291 UNREFERENCED_PARAMETER(arg2); 3292 3293 if (ice_driver_is_detaching(sc)) 3294 return (ESHUTDOWN); 3295 3296 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 3297 &pcaps, NULL); 3298 if (status != ICE_SUCCESS) { 3299 device_printf(dev, 3300 "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", 3301 __func__, ice_status_str(status), 3302 ice_aq_str(hw->adminq.sq_last_status)); 3303 return (EIO); 3304 } 3305 3306 /* Convert HW response format to SW enum value */ 3307 if ((pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && 3308 (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) 3309 old_mode = ICE_FC_FULL; 3310 else if (pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3311 old_mode = ICE_FC_TX_PAUSE; 3312 else if (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3313 old_mode = ICE_FC_RX_PAUSE; 3314 else 3315 old_mode = ICE_FC_NONE; 3316 3317 /* Create "old" string for output */ 3318 bzero(fc_str, sizeof(fc_str)); 3319 sbuf_new_for_sysctl(&buf, fc_str, sizeof(fc_str), req); 3320 sbuf_printf(&buf, "%d<%s>", old_mode, ice_fc_str(old_mode)); 3321 sbuf_finish(&buf); 3322 sbuf_delete(&buf); 3323 3324 error = sysctl_handle_string(oidp, fc_str, sizeof(fc_str), req); 3325 if ((error) || (req->newptr == NULL)) 3326 return (error); 3327 3328 /* Try to parse input as a string, first */ 3329 if (strcasecmp(ice_fc_str(ICE_FC_FULL), fc_str) == 0) { 3330 new_mode = ICE_FC_FULL; 3331 mode_set = true; 3332 } 3333 else if (strcasecmp(ice_fc_str(ICE_FC_TX_PAUSE), fc_str) == 0) { 3334 new_mode = ICE_FC_TX_PAUSE; 3335 mode_set = true; 3336 } 3337 else if (strcasecmp(ice_fc_str(ICE_FC_RX_PAUSE), fc_str) == 0) { 3338 new_mode = ICE_FC_RX_PAUSE; 3339 mode_set = true; 3340 } 3341 else if (strcasecmp(ice_fc_str(ICE_FC_NONE), fc_str) == 0) { 3342 new_mode = ICE_FC_NONE; 3343 mode_set = true; 3344 } 3345 3346 /* 3347 * Then check if it's an integer, for compatibility with the method 3348 * used in older drivers. 3349 */ 3350 if (!mode_set) { 3351 fc_num = strtol(fc_str, &fc_str_end, 0); 3352 if (fc_str_end == fc_str) 3353 fc_num = -1; 3354 switch (fc_num) { 3355 case 3: 3356 new_mode = ICE_FC_FULL; 3357 break; 3358 case 2: 3359 new_mode = ICE_FC_TX_PAUSE; 3360 break; 3361 case 1: 3362 new_mode = ICE_FC_RX_PAUSE; 3363 break; 3364 case 0: 3365 new_mode = ICE_FC_NONE; 3366 break; 3367 default: 3368 device_printf(dev, 3369 "%s: \"%s\" is not a valid flow control mode\n", 3370 __func__, fc_str); 3371 return (EINVAL); 3372 } 3373 } 3374 3375 /* Finally, set the flow control mode in FW */ 3376 hw->port_info->fc.req_mode = new_mode; 3377 status = ice_set_fc(pi, &aq_failures, true); 3378 if (status != ICE_SUCCESS) { 3379 /* Don't indicate failure if there's no media in the port -- the sysctl 3380 * handler has saved the value and will apply it when media is inserted. 3381 */ 3382 if (aq_failures == ICE_SET_FC_AQ_FAIL_SET && 3383 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 3384 device_printf(dev, 3385 "%s: Setting will be applied when media is inserted\n", __func__); 3386 return (0); 3387 } else { 3388 device_printf(dev, 3389 "%s: ice_set_fc AQ failure = %d\n", __func__, aq_failures); 3390 return (EIO); 3391 } 3392 } 3393 3394 return (0); 3395 } 3396 3397 /** 3398 * ice_sysctl_negotiated_fc - Display currently negotiated FC mode 3399 * @oidp: sysctl oid structure 3400 * @arg1: pointer to private data structure 3401 * @arg2: unused 3402 * @req: sysctl request pointer 3403 * 3404 * On read: Displays the currently negotiated flow control settings. 3405 * 3406 * If link is not established, this will report ICE_FC_NONE, as no flow 3407 * control is negotiated while link is down. 3408 */ 3409 static int 3410 ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS) 3411 { 3412 struct ice_softc *sc = (struct ice_softc *)arg1; 3413 struct ice_port_info *pi = sc->hw.port_info; 3414 const char *negotiated_fc; 3415 3416 UNREFERENCED_PARAMETER(arg2); 3417 3418 if (ice_driver_is_detaching(sc)) 3419 return (ESHUTDOWN); 3420 3421 negotiated_fc = ice_flowcontrol_mode(pi); 3422 3423 return sysctl_handle_string(oidp, __DECONST(char *, negotiated_fc), 0, req); 3424 } 3425 3426 /** 3427 * __ice_sysctl_phy_type_handler - Display/change supported PHY types/speeds 3428 * @oidp: sysctl oid structure 3429 * @arg1: pointer to private data structure 3430 * @arg2: unused 3431 * @req: sysctl request pointer 3432 * @is_phy_type_high: if true, handle the high PHY type instead of the low PHY type 3433 * 3434 * Private handler for phy_type_high and phy_type_low sysctls. 3435 */ 3436 static int 3437 __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high) 3438 { 3439 struct ice_softc *sc = (struct ice_softc *)arg1; 3440 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3441 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3442 struct ice_hw *hw = &sc->hw; 3443 device_t dev = sc->dev; 3444 enum ice_status status; 3445 uint64_t types; 3446 int error = 0; 3447 3448 UNREFERENCED_PARAMETER(arg2); 3449 3450 if (ice_driver_is_detaching(sc)) 3451 return (ESHUTDOWN); 3452 3453 status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_SW_CFG, 3454 &pcaps, NULL); 3455 if (status != ICE_SUCCESS) { 3456 device_printf(dev, 3457 "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", 3458 __func__, ice_status_str(status), 3459 ice_aq_str(hw->adminq.sq_last_status)); 3460 return (EIO); 3461 } 3462 3463 if (is_phy_type_high) 3464 types = pcaps.phy_type_high; 3465 else 3466 types = pcaps.phy_type_low; 3467 3468 error = sysctl_handle_64(oidp, &types, sizeof(types), req); 3469 if ((error) || (req->newptr == NULL)) 3470 return (error); 3471 3472 ice_copy_phy_caps_to_cfg(hw->port_info, &pcaps, &cfg); 3473 3474 if (is_phy_type_high) 3475 cfg.phy_type_high = types & hw->port_info->phy.phy_type_high; 3476 else 3477 cfg.phy_type_low = types & hw->port_info->phy.phy_type_low; 3478 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3479 3480 status = ice_aq_set_phy_cfg(hw, hw->port_info, &cfg, NULL); 3481 if (status != ICE_SUCCESS) { 3482 device_printf(dev, 3483 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 3484 __func__, ice_status_str(status), 3485 ice_aq_str(hw->adminq.sq_last_status)); 3486 return (EIO); 3487 } 3488 3489 return (0); 3490 3491 } 3492 3493 /** 3494 * ice_sysctl_phy_type_low - Display/change supported lower PHY types/speeds 3495 * @oidp: sysctl oid structure 3496 * @arg1: pointer to private data structure 3497 * @arg2: unused 3498 * @req: sysctl request pointer 3499 * 3500 * On read: Displays the currently supported lower PHY types 3501 * On write: Sets the device's supported low PHY types 3502 */ 3503 static int 3504 ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS) 3505 { 3506 return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, false); 3507 } 3508 3509 /** 3510 * ice_sysctl_phy_type_high - Display/change supported higher PHY types/speeds 3511 * @oidp: sysctl oid structure 3512 * @arg1: pointer to private data structure 3513 * @arg2: unused 3514 * @req: sysctl request pointer 3515 * 3516 * On read: Displays the currently supported higher PHY types 3517 * On write: Sets the device's supported high PHY types 3518 */ 3519 static int 3520 ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS) 3521 { 3522 return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, true); 3523 } 3524 3525 /** 3526 * ice_sysctl_phy_caps - Display response from Get PHY abililties 3527 * @oidp: sysctl oid structure 3528 * @arg1: pointer to private data structure 3529 * @arg2: unused 3530 * @req: sysctl request pointer 3531 * @report_mode: the mode to report 3532 * 3533 * On read: Display the response from Get PHY abillities with the given report 3534 * mode. 3535 */ 3536 static int 3537 ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode) 3538 { 3539 struct ice_softc *sc = (struct ice_softc *)arg1; 3540 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3541 struct ice_hw *hw = &sc->hw; 3542 struct ice_port_info *pi = hw->port_info; 3543 device_t dev = sc->dev; 3544 enum ice_status status; 3545 int error; 3546 3547 UNREFERENCED_PARAMETER(arg2); 3548 3549 error = priv_check(curthread, PRIV_DRIVER); 3550 if (error) 3551 return (error); 3552 3553 if (ice_driver_is_detaching(sc)) 3554 return (ESHUTDOWN); 3555 3556 status = ice_aq_get_phy_caps(pi, true, report_mode, &pcaps, NULL); 3557 if (status != ICE_SUCCESS) { 3558 device_printf(dev, 3559 "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", 3560 __func__, ice_status_str(status), 3561 ice_aq_str(hw->adminq.sq_last_status)); 3562 return (EIO); 3563 } 3564 3565 error = sysctl_handle_opaque(oidp, &pcaps, sizeof(pcaps), req); 3566 if (req->newptr != NULL) 3567 return (EPERM); 3568 3569 return (error); 3570 } 3571 3572 /** 3573 * ice_sysctl_phy_sw_caps - Display response from Get PHY abililties 3574 * @oidp: sysctl oid structure 3575 * @arg1: pointer to private data structure 3576 * @arg2: unused 3577 * @req: sysctl request pointer 3578 * 3579 * On read: Display the response from Get PHY abillities reporting the last 3580 * software configuration. 3581 */ 3582 static int 3583 ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS) 3584 { 3585 return ice_sysctl_phy_caps(oidp, arg1, arg2, req, 3586 ICE_AQC_REPORT_SW_CFG); 3587 } 3588 3589 /** 3590 * ice_sysctl_phy_nvm_caps - Display response from Get PHY abililties 3591 * @oidp: sysctl oid structure 3592 * @arg1: pointer to private data structure 3593 * @arg2: unused 3594 * @req: sysctl request pointer 3595 * 3596 * On read: Display the response from Get PHY abillities reporting the NVM 3597 * configuration. 3598 */ 3599 static int 3600 ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS) 3601 { 3602 return ice_sysctl_phy_caps(oidp, arg1, arg2, req, 3603 ICE_AQC_REPORT_NVM_CAP); 3604 } 3605 3606 /** 3607 * ice_sysctl_phy_topo_caps - Display response from Get PHY abililties 3608 * @oidp: sysctl oid structure 3609 * @arg1: pointer to private data structure 3610 * @arg2: unused 3611 * @req: sysctl request pointer 3612 * 3613 * On read: Display the response from Get PHY abillities reporting the 3614 * topology configuration. 3615 */ 3616 static int 3617 ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS) 3618 { 3619 return ice_sysctl_phy_caps(oidp, arg1, arg2, req, 3620 ICE_AQC_REPORT_TOPO_CAP); 3621 } 3622 3623 /** 3624 * ice_sysctl_phy_link_status - Display response from Get Link Status 3625 * @oidp: sysctl oid structure 3626 * @arg1: pointer to private data structure 3627 * @arg2: unused 3628 * @req: sysctl request pointer 3629 * 3630 * On read: Display the response from firmware for the Get Link Status 3631 * request. 3632 */ 3633 static int 3634 ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS) 3635 { 3636 struct ice_aqc_get_link_status_data link_data = { 0 }; 3637 struct ice_softc *sc = (struct ice_softc *)arg1; 3638 struct ice_hw *hw = &sc->hw; 3639 struct ice_port_info *pi = hw->port_info; 3640 struct ice_aqc_get_link_status *resp; 3641 struct ice_aq_desc desc; 3642 device_t dev = sc->dev; 3643 enum ice_status status; 3644 int error; 3645 3646 UNREFERENCED_PARAMETER(arg2); 3647 3648 /* 3649 * Ensure that only contexts with driver privilege are allowed to 3650 * access this information 3651 */ 3652 error = priv_check(curthread, PRIV_DRIVER); 3653 if (error) 3654 return (error); 3655 3656 if (ice_driver_is_detaching(sc)) 3657 return (ESHUTDOWN); 3658 3659 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 3660 resp = &desc.params.get_link_status; 3661 resp->lport_num = pi->lport; 3662 3663 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), NULL); 3664 if (status != ICE_SUCCESS) { 3665 device_printf(dev, 3666 "%s: ice_aq_send_cmd failed; status %s, aq_err %s\n", 3667 __func__, ice_status_str(status), 3668 ice_aq_str(hw->adminq.sq_last_status)); 3669 return (EIO); 3670 } 3671 3672 error = sysctl_handle_opaque(oidp, &link_data, sizeof(link_data), req); 3673 if (req->newptr != NULL) 3674 return (EPERM); 3675 3676 return (error); 3677 } 3678 3679 /** 3680 * ice_sysctl_fw_cur_lldp_persist_status - Display current FW LLDP status 3681 * @oidp: sysctl oid structure 3682 * @arg1: pointer to private softc structure 3683 * @arg2: unused 3684 * @req: sysctl request pointer 3685 * 3686 * On read: Displays current persistent LLDP status. 3687 */ 3688 static int 3689 ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS) 3690 { 3691 struct ice_softc *sc = (struct ice_softc *)arg1; 3692 struct ice_hw *hw = &sc->hw; 3693 device_t dev = sc->dev; 3694 enum ice_status status; 3695 struct sbuf *sbuf; 3696 u32 lldp_state; 3697 3698 UNREFERENCED_PARAMETER(arg2); 3699 UNREFERENCED_PARAMETER(oidp); 3700 3701 if (ice_driver_is_detaching(sc)) 3702 return (ESHUTDOWN); 3703 3704 status = ice_get_cur_lldp_persist_status(hw, &lldp_state); 3705 if (status) { 3706 device_printf(dev, 3707 "Could not acquire current LLDP persistence status, err %s aq_err %s\n", 3708 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 3709 return (EIO); 3710 } 3711 3712 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3713 sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state)); 3714 sbuf_finish(sbuf); 3715 sbuf_delete(sbuf); 3716 3717 return (0); 3718 } 3719 3720 /** 3721 * ice_sysctl_fw_dflt_lldp_persist_status - Display default FW LLDP status 3722 * @oidp: sysctl oid structure 3723 * @arg1: pointer to private softc structure 3724 * @arg2: unused 3725 * @req: sysctl request pointer 3726 * 3727 * On read: Displays default persistent LLDP status. 3728 */ 3729 static int 3730 ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS) 3731 { 3732 struct ice_softc *sc = (struct ice_softc *)arg1; 3733 struct ice_hw *hw = &sc->hw; 3734 device_t dev = sc->dev; 3735 enum ice_status status; 3736 struct sbuf *sbuf; 3737 u32 lldp_state; 3738 3739 UNREFERENCED_PARAMETER(arg2); 3740 UNREFERENCED_PARAMETER(oidp); 3741 3742 if (ice_driver_is_detaching(sc)) 3743 return (ESHUTDOWN); 3744 3745 status = ice_get_dflt_lldp_persist_status(hw, &lldp_state); 3746 if (status) { 3747 device_printf(dev, 3748 "Could not acquire default LLDP persistence status, err %s aq_err %s\n", 3749 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 3750 return (EIO); 3751 } 3752 3753 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3754 sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state)); 3755 sbuf_finish(sbuf); 3756 sbuf_delete(sbuf); 3757 3758 return (0); 3759 } 3760 3761 #define ICE_SYSCTL_HELP_FW_LLDP_AGENT \ 3762 "\nDisplay or change FW LLDP agent state:" \ 3763 "\n\t0 - disabled" \ 3764 "\n\t1 - enabled" 3765 3766 /** 3767 * ice_sysctl_fw_lldp_agent - Display or change the FW LLDP agent status 3768 * @oidp: sysctl oid structure 3769 * @arg1: pointer to private softc structure 3770 * @arg2: unused 3771 * @req: sysctl request pointer 3772 * 3773 * On read: Displays whether the FW LLDP agent is running 3774 * On write: Persistently enables or disables the FW LLDP agent 3775 */ 3776 static int 3777 ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS) 3778 { 3779 struct ice_softc *sc = (struct ice_softc *)arg1; 3780 struct ice_hw *hw = &sc->hw; 3781 device_t dev = sc->dev; 3782 enum ice_status status; 3783 int error = 0; 3784 u32 old_state; 3785 u8 fw_lldp_enabled; 3786 bool retried_start_lldp = false; 3787 3788 UNREFERENCED_PARAMETER(arg2); 3789 3790 if (ice_driver_is_detaching(sc)) 3791 return (ESHUTDOWN); 3792 3793 status = ice_get_cur_lldp_persist_status(hw, &old_state); 3794 if (status) { 3795 device_printf(dev, 3796 "Could not acquire current LLDP persistence status, err %s aq_err %s\n", 3797 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 3798 return (EIO); 3799 } 3800 3801 if (old_state > ICE_LLDP_ADMINSTATUS_ENA_RXTX) { 3802 status = ice_get_dflt_lldp_persist_status(hw, &old_state); 3803 if (status) { 3804 device_printf(dev, 3805 "Could not acquire default LLDP persistence status, err %s aq_err %s\n", 3806 ice_status_str(status), 3807 ice_aq_str(hw->adminq.sq_last_status)); 3808 return (EIO); 3809 } 3810 } 3811 if (old_state == 0) 3812 fw_lldp_enabled = false; 3813 else 3814 fw_lldp_enabled = true; 3815 3816 error = sysctl_handle_bool(oidp, &fw_lldp_enabled, 0, req); 3817 if ((error) || (req->newptr == NULL)) 3818 return (error); 3819 3820 if (old_state == 0 && fw_lldp_enabled == false) 3821 return (0); 3822 3823 if (old_state != 0 && fw_lldp_enabled == true) 3824 return (0); 3825 3826 if (fw_lldp_enabled == false) { 3827 status = ice_aq_stop_lldp(hw, true, true, NULL); 3828 /* EPERM is returned if the LLDP agent is already shutdown */ 3829 if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EPERM) { 3830 device_printf(dev, 3831 "%s: ice_aq_stop_lldp failed; status %s, aq_err %s\n", 3832 __func__, ice_status_str(status), 3833 ice_aq_str(hw->adminq.sq_last_status)); 3834 return (EIO); 3835 } 3836 ice_aq_set_dcb_parameters(hw, true, NULL); 3837 hw->port_info->qos_cfg.is_sw_lldp = true; 3838 ice_add_rx_lldp_filter(sc); 3839 } else { 3840 ice_del_rx_lldp_filter(sc); 3841 retry_start_lldp: 3842 status = ice_aq_start_lldp(hw, true, NULL); 3843 if (status) { 3844 switch (hw->adminq.sq_last_status) { 3845 /* EEXIST is returned if the LLDP agent is already started */ 3846 case ICE_AQ_RC_EEXIST: 3847 break; 3848 case ICE_AQ_RC_EAGAIN: 3849 /* Retry command after a 2 second wait */ 3850 if (retried_start_lldp == false) { 3851 retried_start_lldp = true; 3852 pause("slldp", ICE_START_LLDP_RETRY_WAIT); 3853 goto retry_start_lldp; 3854 } 3855 /* Fallthrough */ 3856 default: 3857 device_printf(dev, 3858 "%s: ice_aq_start_lldp failed; status %s, aq_err %s\n", 3859 __func__, ice_status_str(status), 3860 ice_aq_str(hw->adminq.sq_last_status)); 3861 return (EIO); 3862 } 3863 } 3864 hw->port_info->qos_cfg.is_sw_lldp = false; 3865 } 3866 3867 return (error); 3868 } 3869 3870 /** 3871 * ice_add_device_sysctls - add device specific dynamic sysctls 3872 * @sc: device private structure 3873 * 3874 * Add per-device dynamic sysctls which show device configuration or enable 3875 * configuring device functionality. For tunable values which can be set prior 3876 * to load, see ice_add_device_tunables. 3877 * 3878 * This function depends on the sysctl layout setup by ice_add_device_tunables, 3879 * and likely should be called near the end of the attach process. 3880 */ 3881 void 3882 ice_add_device_sysctls(struct ice_softc *sc) 3883 { 3884 struct sysctl_oid *hw_node; 3885 device_t dev = sc->dev; 3886 3887 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 3888 struct sysctl_oid_list *ctx_list = 3889 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 3890 3891 SYSCTL_ADD_PROC(ctx, ctx_list, 3892 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, 3893 sc, 0, ice_sysctl_show_fw, "A", "Firmware version"); 3894 3895 SYSCTL_ADD_PROC(ctx, ctx_list, 3896 OID_AUTO, "pba_number", CTLTYPE_STRING | CTLFLAG_RD, 3897 sc, 0, ice_sysctl_pba_number, "A", "Product Board Assembly Number"); 3898 3899 SYSCTL_ADD_PROC(ctx, ctx_list, 3900 OID_AUTO, "ddp_version", CTLTYPE_STRING | CTLFLAG_RD, 3901 sc, 0, ice_sysctl_pkg_version, "A", "Active DDP package name and version"); 3902 3903 SYSCTL_ADD_PROC(ctx, ctx_list, 3904 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, 3905 sc, 0, ice_sysctl_current_speed, "A", "Current Port Link Speed"); 3906 3907 SYSCTL_ADD_PROC(ctx, ctx_list, 3908 OID_AUTO, "requested_fec", CTLTYPE_STRING | CTLFLAG_RW, 3909 sc, 0, ice_sysctl_fec_config, "A", ICE_SYSCTL_HELP_FEC_CONFIG); 3910 3911 SYSCTL_ADD_PROC(ctx, ctx_list, 3912 OID_AUTO, "negotiated_fec", CTLTYPE_STRING | CTLFLAG_RD, 3913 sc, 0, ice_sysctl_negotiated_fec, "A", "Current Negotiated FEC mode"); 3914 3915 SYSCTL_ADD_PROC(ctx, ctx_list, 3916 OID_AUTO, "fc", CTLTYPE_STRING | CTLFLAG_RW, 3917 sc, 0, ice_sysctl_fc_config, "A", ICE_SYSCTL_HELP_FC_CONFIG); 3918 3919 SYSCTL_ADD_PROC(ctx, ctx_list, 3920 OID_AUTO, "advertise_speed", CTLTYPE_U16 | CTLFLAG_RW, 3921 sc, 0, ice_sysctl_advertise_speed, "SU", ICE_SYSCTL_HELP_ADVERTISE_SPEED); 3922 3923 SYSCTL_ADD_PROC(ctx, ctx_list, 3924 OID_AUTO, "fw_lldp_agent", CTLTYPE_U8 | CTLFLAG_RWTUN, 3925 sc, 0, ice_sysctl_fw_lldp_agent, "CU", ICE_SYSCTL_HELP_FW_LLDP_AGENT); 3926 3927 /* Differentiate software and hardware statistics, by keeping hw stats 3928 * in their own node. This isn't in ice_add_device_tunables, because 3929 * we won't have any CTLFLAG_TUN sysctls under this node. 3930 */ 3931 hw_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "hw", CTLFLAG_RD, 3932 NULL, "Port Hardware Statistics"); 3933 3934 ice_add_sysctls_mac_stats(ctx, hw_node, &sc->stats.cur); 3935 3936 /* Add the main PF VSI stats now. Other VSIs will add their own stats 3937 * during creation 3938 */ 3939 ice_add_vsi_sysctls(&sc->pf_vsi); 3940 3941 /* Add sysctls related to debugging the device driver. This includes 3942 * sysctls which display additional internal driver state for use in 3943 * understanding what is happening within the driver. 3944 */ 3945 ice_add_debug_sysctls(sc); 3946 } 3947 3948 /** 3949 * @enum hmc_error_type 3950 * @brief enumeration of HMC errors 3951 * 3952 * Enumeration defining the possible HMC errors that might occur. 3953 */ 3954 enum hmc_error_type { 3955 HMC_ERR_PMF_INVALID = 0, 3956 HMC_ERR_VF_IDX_INVALID = 1, 3957 HMC_ERR_VF_PARENT_PF_INVALID = 2, 3958 /* 3 is reserved */ 3959 HMC_ERR_INDEX_TOO_BIG = 4, 3960 HMC_ERR_ADDRESS_TOO_LARGE = 5, 3961 HMC_ERR_SEGMENT_DESC_INVALID = 6, 3962 HMC_ERR_SEGMENT_DESC_TOO_SMALL = 7, 3963 HMC_ERR_PAGE_DESC_INVALID = 8, 3964 HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION = 9, 3965 /* 10 is reserved */ 3966 HMC_ERR_INVALID_OBJECT_TYPE = 11, 3967 /* 12 is reserved */ 3968 }; 3969 3970 /** 3971 * ice_log_hmc_error - Log an HMC error message 3972 * @hw: device hw structure 3973 * @dev: the device to pass to device_printf() 3974 * 3975 * Log a message when an HMC error interrupt is triggered. 3976 */ 3977 void 3978 ice_log_hmc_error(struct ice_hw *hw, device_t dev) 3979 { 3980 u32 info, data; 3981 u8 index, errtype, objtype; 3982 bool isvf; 3983 3984 info = rd32(hw, PFHMC_ERRORINFO); 3985 data = rd32(hw, PFHMC_ERRORDATA); 3986 3987 index = (u8)(info & PFHMC_ERRORINFO_PMF_INDEX_M); 3988 errtype = (u8)((info & PFHMC_ERRORINFO_HMC_ERROR_TYPE_M) >> 3989 PFHMC_ERRORINFO_HMC_ERROR_TYPE_S); 3990 objtype = (u8)((info & PFHMC_ERRORINFO_HMC_OBJECT_TYPE_M) >> 3991 PFHMC_ERRORINFO_HMC_OBJECT_TYPE_S); 3992 3993 isvf = info & PFHMC_ERRORINFO_PMF_ISVF_M; 3994 3995 device_printf(dev, "%s HMC Error detected on PMF index %d:\n", 3996 isvf ? "VF" : "PF", index); 3997 3998 device_printf(dev, "error type %d, object type %d, data 0x%08x\n", 3999 errtype, objtype, data); 4000 4001 switch (errtype) { 4002 case HMC_ERR_PMF_INVALID: 4003 device_printf(dev, "Private Memory Function is not valid\n"); 4004 break; 4005 case HMC_ERR_VF_IDX_INVALID: 4006 device_printf(dev, "Invalid Private Memory Function index for PE enabled VF\n"); 4007 break; 4008 case HMC_ERR_VF_PARENT_PF_INVALID: 4009 device_printf(dev, "Invalid parent PF for PE enabled VF\n"); 4010 break; 4011 case HMC_ERR_INDEX_TOO_BIG: 4012 device_printf(dev, "Object index too big\n"); 4013 break; 4014 case HMC_ERR_ADDRESS_TOO_LARGE: 4015 device_printf(dev, "Address extends beyond segment descriptor limit\n"); 4016 break; 4017 case HMC_ERR_SEGMENT_DESC_INVALID: 4018 device_printf(dev, "Segment descriptor is invalid\n"); 4019 break; 4020 case HMC_ERR_SEGMENT_DESC_TOO_SMALL: 4021 device_printf(dev, "Segment descriptor is too small\n"); 4022 break; 4023 case HMC_ERR_PAGE_DESC_INVALID: 4024 device_printf(dev, "Page descriptor is invalid\n"); 4025 break; 4026 case HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION: 4027 device_printf(dev, "Unsupported Request completion received from PCIe\n"); 4028 break; 4029 case HMC_ERR_INVALID_OBJECT_TYPE: 4030 device_printf(dev, "Invalid object type\n"); 4031 break; 4032 default: 4033 device_printf(dev, "Unknown HMC error\n"); 4034 } 4035 4036 /* Clear the error indication */ 4037 wr32(hw, PFHMC_ERRORINFO, 0); 4038 } 4039 4040 /** 4041 * @struct ice_sysctl_info 4042 * @brief sysctl information 4043 * 4044 * Structure used to simplify the process of defining the many similar 4045 * statistics sysctls. 4046 */ 4047 struct ice_sysctl_info { 4048 u64 *stat; 4049 const char *name; 4050 const char *description; 4051 }; 4052 4053 /** 4054 * ice_add_sysctls_eth_stats - Add sysctls for ethernet statistics 4055 * @ctx: sysctl ctx to use 4056 * @parent: the parent node to add sysctls under 4057 * @stats: the ethernet stats structure to source values from 4058 * 4059 * Adds statistics sysctls for the ethernet statistics of the MAC or a VSI. 4060 * Will add them under the parent node specified. 4061 * 4062 * Note that tx_errors is only meaningful for VSIs and not the global MAC/PF 4063 * statistics, so it is not included here. Similarly, rx_discards has different 4064 * descriptions for VSIs and MAC/PF stats, so it is also not included here. 4065 */ 4066 void 4067 ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, 4068 struct sysctl_oid *parent, 4069 struct ice_eth_stats *stats) 4070 { 4071 const struct ice_sysctl_info ctls[] = { 4072 /* Rx Stats */ 4073 { &stats->rx_bytes, "good_octets_rcvd", "Good Octets Received" }, 4074 { &stats->rx_unicast, "ucast_pkts_rcvd", "Unicast Packets Received" }, 4075 { &stats->rx_multicast, "mcast_pkts_rcvd", "Multicast Packets Received" }, 4076 { &stats->rx_broadcast, "bcast_pkts_rcvd", "Broadcast Packets Received" }, 4077 /* Tx Stats */ 4078 { &stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted" }, 4079 { &stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted" }, 4080 { &stats->tx_multicast, "mcast_pkts_txd", "Multicast Packets Transmitted" }, 4081 { &stats->tx_broadcast, "bcast_pkts_txd", "Broadcast Packets Transmitted" }, 4082 /* End */ 4083 { 0, 0, 0 } 4084 }; 4085 4086 struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); 4087 4088 const struct ice_sysctl_info *entry = ctls; 4089 while (entry->stat != 0) { 4090 SYSCTL_ADD_U64(ctx, parent_list, OID_AUTO, entry->name, 4091 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 4092 entry->description); 4093 entry++; 4094 } 4095 } 4096 4097 /** 4098 * ice_sysctl_tx_cso_stat - Display Tx checksum offload statistic 4099 * @oidp: sysctl oid structure 4100 * @arg1: pointer to private data structure 4101 * @arg2: Tx CSO stat to read 4102 * @req: sysctl request pointer 4103 * 4104 * On read: Sums the per-queue Tx CSO stat and displays it. 4105 */ 4106 static int 4107 ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS) 4108 { 4109 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4110 enum ice_tx_cso_stat type = (enum ice_tx_cso_stat)arg2; 4111 u64 stat = 0; 4112 int i; 4113 4114 if (ice_driver_is_detaching(vsi->sc)) 4115 return (ESHUTDOWN); 4116 4117 /* Check that the type is valid */ 4118 if (type >= ICE_CSO_STAT_TX_COUNT) 4119 return (EDOOFUS); 4120 4121 /* Sum the stat for each of the Tx queues */ 4122 for (i = 0; i < vsi->num_tx_queues; i++) 4123 stat += vsi->tx_queues[i].stats.cso[type]; 4124 4125 return sysctl_handle_64(oidp, NULL, stat, req); 4126 } 4127 4128 /** 4129 * ice_sysctl_rx_cso_stat - Display Rx checksum offload statistic 4130 * @oidp: sysctl oid structure 4131 * @arg1: pointer to private data structure 4132 * @arg2: Rx CSO stat to read 4133 * @req: sysctl request pointer 4134 * 4135 * On read: Sums the per-queue Rx CSO stat and displays it. 4136 */ 4137 static int 4138 ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS) 4139 { 4140 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4141 enum ice_rx_cso_stat type = (enum ice_rx_cso_stat)arg2; 4142 u64 stat = 0; 4143 int i; 4144 4145 if (ice_driver_is_detaching(vsi->sc)) 4146 return (ESHUTDOWN); 4147 4148 /* Check that the type is valid */ 4149 if (type >= ICE_CSO_STAT_RX_COUNT) 4150 return (EDOOFUS); 4151 4152 /* Sum the stat for each of the Rx queues */ 4153 for (i = 0; i < vsi->num_rx_queues; i++) 4154 stat += vsi->rx_queues[i].stats.cso[type]; 4155 4156 return sysctl_handle_64(oidp, NULL, stat, req); 4157 } 4158 4159 /** 4160 * @struct ice_rx_cso_stat_info 4161 * @brief sysctl information for an Rx checksum offload statistic 4162 * 4163 * Structure used to simplify the process of defining the checksum offload 4164 * statistics. 4165 */ 4166 struct ice_rx_cso_stat_info { 4167 enum ice_rx_cso_stat type; 4168 const char *name; 4169 const char *description; 4170 }; 4171 4172 /** 4173 * @struct ice_tx_cso_stat_info 4174 * @brief sysctl information for a Tx checksum offload statistic 4175 * 4176 * Structure used to simplify the process of defining the checksum offload 4177 * statistics. 4178 */ 4179 struct ice_tx_cso_stat_info { 4180 enum ice_tx_cso_stat type; 4181 const char *name; 4182 const char *description; 4183 }; 4184 4185 /** 4186 * ice_add_sysctls_sw_stats - Add sysctls for software statistics 4187 * @vsi: pointer to the VSI to add sysctls for 4188 * @ctx: sysctl ctx to use 4189 * @parent: the parent node to add sysctls under 4190 * 4191 * Add statistics sysctls for software tracked statistics of a VSI. 4192 * 4193 * Currently this only adds checksum offload statistics, but more counters may 4194 * be added in the future. 4195 */ 4196 static void 4197 ice_add_sysctls_sw_stats(struct ice_vsi *vsi, 4198 struct sysctl_ctx_list *ctx, 4199 struct sysctl_oid *parent) 4200 { 4201 struct sysctl_oid *cso_node; 4202 struct sysctl_oid_list *cso_list; 4203 4204 /* Tx CSO Stats */ 4205 const struct ice_tx_cso_stat_info tx_ctls[] = { 4206 { ICE_CSO_STAT_TX_TCP, "tx_tcp", "Transmit TCP Packets marked for HW checksum" }, 4207 { ICE_CSO_STAT_TX_UDP, "tx_udp", "Transmit UDP Packets marked for HW checksum" }, 4208 { ICE_CSO_STAT_TX_SCTP, "tx_sctp", "Transmit SCTP Packets marked for HW checksum" }, 4209 { ICE_CSO_STAT_TX_IP4, "tx_ip4", "Transmit IPv4 Packets marked for HW checksum" }, 4210 { ICE_CSO_STAT_TX_IP6, "tx_ip6", "Transmit IPv6 Packets marked for HW checksum" }, 4211 { ICE_CSO_STAT_TX_L3_ERR, "tx_l3_err", "Transmit packets that driver failed to set L3 HW CSO bits for" }, 4212 { ICE_CSO_STAT_TX_L4_ERR, "tx_l4_err", "Transmit packets that driver failed to set L4 HW CSO bits for" }, 4213 /* End */ 4214 { ICE_CSO_STAT_TX_COUNT, 0, 0 } 4215 }; 4216 4217 /* Rx CSO Stats */ 4218 const struct ice_rx_cso_stat_info rx_ctls[] = { 4219 { ICE_CSO_STAT_RX_IP4_ERR, "rx_ip4_err", "Received packets with invalid IPv4 checksum indicated by HW" }, 4220 { ICE_CSO_STAT_RX_IP6_ERR, "rx_ip6_err", "Received IPv6 packets with extension headers" }, 4221 { ICE_CSO_STAT_RX_L3_ERR, "rx_l3_err", "Received packets with an unexpected invalid L3 checksum indicated by HW" }, 4222 { ICE_CSO_STAT_RX_TCP_ERR, "rx_tcp_err", "Received packets with invalid TCP checksum indicated by HW" }, 4223 { ICE_CSO_STAT_RX_UDP_ERR, "rx_udp_err", "Received packets with invalid UDP checksum indicated by HW" }, 4224 { ICE_CSO_STAT_RX_SCTP_ERR, "rx_sctp_err", "Received packets with invalid SCTP checksum indicated by HW" }, 4225 { ICE_CSO_STAT_RX_L4_ERR, "rx_l4_err", "Received packets with an unexpected invalid L4 checksum indicated by HW" }, 4226 /* End */ 4227 { ICE_CSO_STAT_RX_COUNT, 0, 0 } 4228 }; 4229 4230 struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); 4231 4232 /* Add a node for statistics tracked by software. */ 4233 cso_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "cso", CTLFLAG_RD, 4234 NULL, "Checksum offload Statistics"); 4235 cso_list = SYSCTL_CHILDREN(cso_node); 4236 4237 const struct ice_tx_cso_stat_info *tx_entry = tx_ctls; 4238 while (tx_entry->name && tx_entry->description) { 4239 SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, tx_entry->name, 4240 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4241 vsi, tx_entry->type, ice_sysctl_tx_cso_stat, "QU", 4242 tx_entry->description); 4243 tx_entry++; 4244 } 4245 4246 const struct ice_rx_cso_stat_info *rx_entry = rx_ctls; 4247 while (rx_entry->name && rx_entry->description) { 4248 SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, rx_entry->name, 4249 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4250 vsi, rx_entry->type, ice_sysctl_rx_cso_stat, "QU", 4251 rx_entry->description); 4252 rx_entry++; 4253 } 4254 } 4255 4256 /** 4257 * ice_add_vsi_sysctls - Add sysctls for a VSI 4258 * @vsi: pointer to VSI structure 4259 * 4260 * Add various sysctls for a given VSI. 4261 */ 4262 void 4263 ice_add_vsi_sysctls(struct ice_vsi *vsi) 4264 { 4265 struct sysctl_ctx_list *ctx = &vsi->ctx; 4266 struct sysctl_oid *hw_node, *sw_node; 4267 struct sysctl_oid_list *vsi_list, *hw_list, *sw_list; 4268 4269 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 4270 4271 /* Keep hw stats in their own node. */ 4272 hw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "hw", CTLFLAG_RD, 4273 NULL, "VSI Hardware Statistics"); 4274 hw_list = SYSCTL_CHILDREN(hw_node); 4275 4276 /* Add the ethernet statistics for this VSI */ 4277 ice_add_sysctls_eth_stats(ctx, hw_node, &vsi->hw_stats.cur); 4278 4279 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_discards", 4280 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_discards, 4281 0, "Discarded Rx Packets (see rx_errors or rx_no_desc)"); 4282 4283 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_errors", 4284 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_errors, 4285 0, "Rx Packets Discarded Due To Error"); 4286 4287 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_no_desc", 4288 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_no_desc, 4289 0, "Rx Packets Discarded Due To Lack Of Descriptors"); 4290 4291 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "tx_errors", 4292 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.tx_errors, 4293 0, "Tx Packets Discarded Due To Error"); 4294 4295 /* Add a node for statistics tracked by software. */ 4296 sw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "sw", CTLFLAG_RD, 4297 NULL, "VSI Software Statistics"); 4298 sw_list = SYSCTL_CHILDREN(sw_node); 4299 4300 ice_add_sysctls_sw_stats(vsi, ctx, sw_node); 4301 } 4302 4303 /** 4304 * ice_add_sysctls_mac_stats - Add sysctls for global MAC statistics 4305 * @ctx: the sysctl ctx to use 4306 * @parent: parent node to add the sysctls under 4307 * @stats: the hw ports stat structure to pull values from 4308 * 4309 * Add global MAC statistics sysctls. 4310 */ 4311 void 4312 ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 4313 struct sysctl_oid *parent, 4314 struct ice_hw_port_stats *stats) 4315 { 4316 struct sysctl_oid *mac_node; 4317 struct sysctl_oid_list *parent_list, *mac_list; 4318 4319 parent_list = SYSCTL_CHILDREN(parent); 4320 4321 mac_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "mac", CTLFLAG_RD, 4322 NULL, "Mac Hardware Statistics"); 4323 mac_list = SYSCTL_CHILDREN(mac_node); 4324 4325 /* add the common ethernet statistics */ 4326 ice_add_sysctls_eth_stats(ctx, mac_node, &stats->eth); 4327 4328 const struct ice_sysctl_info ctls[] = { 4329 /* Packet Reception Stats */ 4330 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 4331 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 4332 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 4333 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 4334 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 4335 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 4336 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 4337 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 4338 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 4339 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 4340 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 4341 {&stats->rx_len_errors, "rx_length_errors", "Receive Length Errors"}, 4342 {&stats->eth.rx_discards, "rx_discards", 4343 "Discarded Rx Packets by Port (shortage of storage space)"}, 4344 /* Packet Transmission Stats */ 4345 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 4346 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 4347 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 4348 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 4349 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 4350 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 4351 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 4352 {&stats->tx_dropped_link_down, "tx_dropped", "Tx Dropped Due To Link Down"}, 4353 /* Flow control */ 4354 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 4355 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 4356 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 4357 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 4358 /* Other */ 4359 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 4360 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 4361 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 4362 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 4363 /* End */ 4364 { 0, 0, 0 } 4365 }; 4366 4367 const struct ice_sysctl_info *entry = ctls; 4368 while (entry->stat != 0) { 4369 SYSCTL_ADD_U64(ctx, mac_list, OID_AUTO, entry->name, 4370 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 4371 entry->description); 4372 entry++; 4373 } 4374 } 4375 4376 /** 4377 * ice_configure_misc_interrupts - enable 'other' interrupt causes 4378 * @sc: pointer to device private softc 4379 * 4380 * Enable various "other" interrupt causes, and associate them to interrupt 0, 4381 * which is our administrative interrupt. 4382 */ 4383 void 4384 ice_configure_misc_interrupts(struct ice_softc *sc) 4385 { 4386 struct ice_hw *hw = &sc->hw; 4387 u32 val; 4388 4389 /* Read the OICR register to clear it */ 4390 rd32(hw, PFINT_OICR); 4391 4392 /* Enable useful "other" interrupt causes */ 4393 val = (PFINT_OICR_ECC_ERR_M | 4394 PFINT_OICR_MAL_DETECT_M | 4395 PFINT_OICR_GRST_M | 4396 PFINT_OICR_PCI_EXCEPTION_M | 4397 PFINT_OICR_VFLR_M | 4398 PFINT_OICR_HMC_ERR_M | 4399 PFINT_OICR_PE_CRITERR_M); 4400 4401 wr32(hw, PFINT_OICR_ENA, val); 4402 4403 /* Note that since we're using MSI-X index 0, and ITR index 0, we do 4404 * not explicitly program them when writing to the PFINT_*_CTL 4405 * registers. Nevertheless, these writes are associating the 4406 * interrupts with the ITR 0 vector 4407 */ 4408 4409 /* Associate the OICR interrupt with ITR 0, and enable it */ 4410 wr32(hw, PFINT_OICR_CTL, PFINT_OICR_CTL_CAUSE_ENA_M); 4411 4412 /* Associate the Mailbox interrupt with ITR 0, and enable it */ 4413 wr32(hw, PFINT_MBX_CTL, PFINT_MBX_CTL_CAUSE_ENA_M); 4414 4415 /* Associate the AdminQ interrupt with ITR 0, and enable it */ 4416 wr32(hw, PFINT_FW_CTL, PFINT_FW_CTL_CAUSE_ENA_M); 4417 } 4418 4419 /** 4420 * ice_filter_is_mcast - Check if info is a multicast filter 4421 * @vsi: vsi structure addresses are targeted towards 4422 * @info: filter info 4423 * 4424 * @returns true if the provided info is a multicast filter, and false 4425 * otherwise. 4426 */ 4427 static bool 4428 ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info) 4429 { 4430 const u8 *addr = info->l_data.mac.mac_addr; 4431 4432 /* 4433 * Check if this info matches a multicast filter added by 4434 * ice_add_mac_to_list 4435 */ 4436 if ((info->flag == ICE_FLTR_TX) && 4437 (info->src_id == ICE_SRC_ID_VSI) && 4438 (info->lkup_type == ICE_SW_LKUP_MAC) && 4439 (info->vsi_handle == vsi->idx) && 4440 ETHER_IS_MULTICAST(addr) && !ETHER_IS_BROADCAST(addr)) 4441 return true; 4442 4443 return false; 4444 } 4445 4446 /** 4447 * @struct ice_mcast_sync_data 4448 * @brief data used by ice_sync_one_mcast_filter function 4449 * 4450 * Structure used to store data needed for processing by the 4451 * ice_sync_one_mcast_filter. This structure contains a linked list of filters 4452 * to be added, an error indication, and a pointer to the device softc. 4453 */ 4454 struct ice_mcast_sync_data { 4455 struct ice_list_head add_list; 4456 struct ice_softc *sc; 4457 int err; 4458 }; 4459 4460 /** 4461 * ice_sync_one_mcast_filter - Check if we need to program the filter 4462 * @p: void pointer to algorithm data 4463 * @sdl: link level socket address 4464 * @count: unused count value 4465 * 4466 * Called by if_foreach_llmaddr to operate on each filter in the ifp filter 4467 * list. For the given address, search our internal list to see if we have 4468 * found the filter. If not, add it to our list of filters that need to be 4469 * programmed. 4470 * 4471 * @returns (1) if we've actually setup the filter to be added 4472 */ 4473 static u_int 4474 ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, 4475 u_int __unused count) 4476 { 4477 struct ice_mcast_sync_data *data = (struct ice_mcast_sync_data *)p; 4478 struct ice_softc *sc = data->sc; 4479 struct ice_hw *hw = &sc->hw; 4480 struct ice_switch_info *sw = hw->switch_info; 4481 const u8 *sdl_addr = (const u8 *)LLADDR(sdl); 4482 struct ice_fltr_mgmt_list_entry *itr; 4483 struct ice_list_head *rules; 4484 int err; 4485 4486 rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 4487 4488 /* 4489 * If a previous filter already indicated an error, there is no need 4490 * for us to finish processing the rest of the filters. 4491 */ 4492 if (data->err) 4493 return (0); 4494 4495 /* See if this filter has already been programmed */ 4496 LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) { 4497 struct ice_fltr_info *info = &itr->fltr_info; 4498 const u8 *addr = info->l_data.mac.mac_addr; 4499 4500 /* Only check multicast filters */ 4501 if (!ice_filter_is_mcast(&sc->pf_vsi, info)) 4502 continue; 4503 4504 /* 4505 * If this filter matches, mark the internal filter as 4506 * "found", and exit. 4507 */ 4508 if (bcmp(addr, sdl_addr, ETHER_ADDR_LEN) == 0) { 4509 itr->marker = ICE_FLTR_FOUND; 4510 return (1); 4511 } 4512 } 4513 4514 /* 4515 * If we failed to locate the filter in our internal list, we need to 4516 * place it into our add list. 4517 */ 4518 err = ice_add_mac_to_list(&sc->pf_vsi, &data->add_list, sdl_addr, 4519 ICE_FWD_TO_VSI); 4520 if (err) { 4521 device_printf(sc->dev, 4522 "Failed to place MAC %6D onto add list, err %s\n", 4523 sdl_addr, ":", ice_err_str(err)); 4524 data->err = err; 4525 4526 return (0); 4527 } 4528 4529 return (1); 4530 } 4531 4532 /** 4533 * ice_sync_multicast_filters - Synchronize OS and internal filter list 4534 * @sc: device private structure 4535 * 4536 * Called in response to SIOCDELMULTI to synchronize the operating system 4537 * multicast address list with the internal list of filters programmed to 4538 * firmware. 4539 * 4540 * Works in one phase to find added and deleted filters using a marker bit on 4541 * the internal list. 4542 * 4543 * First, a loop over the internal list clears the marker bit. Second, for 4544 * each filter in the ifp list is checked. If we find it in the internal list, 4545 * the marker bit is set. Otherwise, the filter is added to the add list. 4546 * Third, a loop over the internal list determines if any filters have not 4547 * been found. Each of these is added to the delete list. Finally, the add and 4548 * delete lists are programmed to firmware to update the filters. 4549 * 4550 * @returns zero on success or an integer error code on failure. 4551 */ 4552 int 4553 ice_sync_multicast_filters(struct ice_softc *sc) 4554 { 4555 struct ice_hw *hw = &sc->hw; 4556 struct ice_switch_info *sw = hw->switch_info; 4557 struct ice_fltr_mgmt_list_entry *itr; 4558 struct ice_mcast_sync_data data = {}; 4559 struct ice_list_head *rules, remove_list; 4560 enum ice_status status; 4561 int err = 0; 4562 4563 INIT_LIST_HEAD(&data.add_list); 4564 INIT_LIST_HEAD(&remove_list); 4565 data.sc = sc; 4566 data.err = 0; 4567 4568 rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 4569 4570 /* Acquire the lock for the entire duration */ 4571 ice_acquire_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4572 4573 /* (1) Reset the marker state for all filters */ 4574 LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) 4575 itr->marker = ICE_FLTR_NOT_FOUND; 4576 4577 /* (2) determine which filters need to be added and removed */ 4578 if_foreach_llmaddr(sc->ifp, ice_sync_one_mcast_filter, (void *)&data); 4579 if (data.err) { 4580 /* ice_sync_one_mcast_filter already prints an error */ 4581 err = data.err; 4582 ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4583 goto free_filter_lists; 4584 } 4585 4586 LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) { 4587 struct ice_fltr_info *info = &itr->fltr_info; 4588 const u8 *addr = info->l_data.mac.mac_addr; 4589 4590 /* Only check multicast filters */ 4591 if (!ice_filter_is_mcast(&sc->pf_vsi, info)) 4592 continue; 4593 4594 /* 4595 * If the filter is not marked as found, then it must no 4596 * longer be in the ifp address list, so we need to remove it. 4597 */ 4598 if (itr->marker == ICE_FLTR_NOT_FOUND) { 4599 err = ice_add_mac_to_list(&sc->pf_vsi, &remove_list, 4600 addr, ICE_FWD_TO_VSI); 4601 if (err) { 4602 device_printf(sc->dev, 4603 "Failed to place MAC %6D onto remove list, err %s\n", 4604 addr, ":", ice_err_str(err)); 4605 ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4606 goto free_filter_lists; 4607 } 4608 } 4609 } 4610 4611 ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4612 4613 status = ice_add_mac(hw, &data.add_list); 4614 if (status) { 4615 device_printf(sc->dev, 4616 "Could not add new MAC filters, err %s aq_err %s\n", 4617 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 4618 err = (EIO); 4619 goto free_filter_lists; 4620 } 4621 4622 status = ice_remove_mac(hw, &remove_list); 4623 if (status) { 4624 device_printf(sc->dev, 4625 "Could not remove old MAC filters, err %s aq_err %s\n", 4626 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 4627 err = (EIO); 4628 goto free_filter_lists; 4629 } 4630 4631 free_filter_lists: 4632 ice_free_fltr_list(&data.add_list); 4633 ice_free_fltr_list(&remove_list); 4634 4635 return (err); 4636 } 4637 4638 /** 4639 * ice_add_vlan_hw_filter - Add a VLAN filter for a given VSI 4640 * @vsi: The VSI to add the filter for 4641 * @vid: VLAN to add 4642 * 4643 * Programs a HW filter so that the given VSI will receive the specified VLAN. 4644 */ 4645 enum ice_status 4646 ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid) 4647 { 4648 struct ice_hw *hw = &vsi->sc->hw; 4649 struct ice_list_head vlan_list; 4650 struct ice_fltr_list_entry vlan_entry; 4651 4652 INIT_LIST_HEAD(&vlan_list); 4653 memset(&vlan_entry, 0, sizeof(vlan_entry)); 4654 4655 vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 4656 vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 4657 vlan_entry.fltr_info.flag = ICE_FLTR_TX; 4658 vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI; 4659 vlan_entry.fltr_info.vsi_handle = vsi->idx; 4660 vlan_entry.fltr_info.l_data.vlan.vlan_id = vid; 4661 4662 LIST_ADD(&vlan_entry.list_entry, &vlan_list); 4663 4664 return ice_add_vlan(hw, &vlan_list); 4665 } 4666 4667 /** 4668 * ice_remove_vlan_hw_filter - Remove a VLAN filter for a given VSI 4669 * @vsi: The VSI to add the filter for 4670 * @vid: VLAN to remove 4671 * 4672 * Removes a previously programmed HW filter for the specified VSI. 4673 */ 4674 enum ice_status 4675 ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid) 4676 { 4677 struct ice_hw *hw = &vsi->sc->hw; 4678 struct ice_list_head vlan_list; 4679 struct ice_fltr_list_entry vlan_entry; 4680 4681 INIT_LIST_HEAD(&vlan_list); 4682 memset(&vlan_entry, 0, sizeof(vlan_entry)); 4683 4684 vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 4685 vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 4686 vlan_entry.fltr_info.flag = ICE_FLTR_TX; 4687 vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI; 4688 vlan_entry.fltr_info.vsi_handle = vsi->idx; 4689 vlan_entry.fltr_info.l_data.vlan.vlan_id = vid; 4690 4691 LIST_ADD(&vlan_entry.list_entry, &vlan_list); 4692 4693 return ice_remove_vlan(hw, &vlan_list); 4694 } 4695 4696 #define ICE_SYSCTL_HELP_RX_ITR \ 4697 "\nControl Rx interrupt throttle rate." \ 4698 "\n\t0-8160 - sets interrupt rate in usecs" \ 4699 "\n\t -1 - reset the Rx itr to default" 4700 4701 /** 4702 * ice_sysctl_rx_itr - Display or change the Rx ITR for a VSI 4703 * @oidp: sysctl oid structure 4704 * @arg1: pointer to private data structure 4705 * @arg2: unused 4706 * @req: sysctl request pointer 4707 * 4708 * On read: Displays the current Rx ITR value 4709 * on write: Sets the Rx ITR value, reconfiguring device if it is up 4710 */ 4711 static int 4712 ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS) 4713 { 4714 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4715 struct ice_softc *sc = vsi->sc; 4716 int increment, error = 0; 4717 4718 UNREFERENCED_PARAMETER(arg2); 4719 4720 if (ice_driver_is_detaching(sc)) 4721 return (ESHUTDOWN); 4722 4723 error = sysctl_handle_16(oidp, &vsi->rx_itr, 0, req); 4724 if ((error) || (req->newptr == NULL)) 4725 return (error); 4726 4727 if (vsi->rx_itr < 0) 4728 vsi->rx_itr = ICE_DFLT_RX_ITR; 4729 if (vsi->rx_itr > ICE_ITR_MAX) 4730 vsi->rx_itr = ICE_ITR_MAX; 4731 4732 /* Assume 2usec increment if it hasn't been loaded yet */ 4733 increment = sc->hw.itr_gran ? : 2; 4734 4735 /* We need to round the value to the hardware's ITR granularity */ 4736 vsi->rx_itr = (vsi->rx_itr / increment ) * increment; 4737 4738 /* If the driver has finished initializing, then we need to reprogram 4739 * the ITR registers now. Otherwise, they will be programmed during 4740 * driver initialization. 4741 */ 4742 if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) 4743 ice_configure_rx_itr(vsi); 4744 4745 return (0); 4746 } 4747 4748 #define ICE_SYSCTL_HELP_TX_ITR \ 4749 "\nControl Tx interrupt throttle rate." \ 4750 "\n\t0-8160 - sets interrupt rate in usecs" \ 4751 "\n\t -1 - reset the Tx itr to default" 4752 4753 /** 4754 * ice_sysctl_tx_itr - Display or change the Tx ITR for a VSI 4755 * @oidp: sysctl oid structure 4756 * @arg1: pointer to private data structure 4757 * @arg2: unused 4758 * @req: sysctl request pointer 4759 * 4760 * On read: Displays the current Tx ITR value 4761 * on write: Sets the Tx ITR value, reconfiguring device if it is up 4762 */ 4763 static int 4764 ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS) 4765 { 4766 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4767 struct ice_softc *sc = vsi->sc; 4768 int increment, error = 0; 4769 4770 UNREFERENCED_PARAMETER(arg2); 4771 4772 if (ice_driver_is_detaching(sc)) 4773 return (ESHUTDOWN); 4774 4775 error = sysctl_handle_16(oidp, &vsi->tx_itr, 0, req); 4776 if ((error) || (req->newptr == NULL)) 4777 return (error); 4778 4779 /* Allow configuring a negative value to reset to the default */ 4780 if (vsi->tx_itr < 0) 4781 vsi->tx_itr = ICE_DFLT_TX_ITR; 4782 if (vsi->tx_itr > ICE_ITR_MAX) 4783 vsi->tx_itr = ICE_ITR_MAX; 4784 4785 /* Assume 2usec increment if it hasn't been loaded yet */ 4786 increment = sc->hw.itr_gran ? : 2; 4787 4788 /* We need to round the value to the hardware's ITR granularity */ 4789 vsi->tx_itr = (vsi->tx_itr / increment ) * increment; 4790 4791 /* If the driver has finished initializing, then we need to reprogram 4792 * the ITR registers now. Otherwise, they will be programmed during 4793 * driver initialization. 4794 */ 4795 if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) 4796 ice_configure_tx_itr(vsi); 4797 4798 return (0); 4799 } 4800 4801 /** 4802 * ice_add_vsi_tunables - Add tunables and nodes for a VSI 4803 * @vsi: pointer to VSI structure 4804 * @parent: parent node to add the tunables under 4805 * 4806 * Create a sysctl context for the VSI, so that sysctls for the VSI can be 4807 * dynamically removed upon VSI removal. 4808 * 4809 * Add various tunables and set up the basic node structure for the VSI. Must 4810 * be called *prior* to ice_add_vsi_sysctls. It should be called as soon as 4811 * possible after the VSI memory is initialized. 4812 * 4813 * VSI specific sysctls with CTLFLAG_TUN should be initialized here so that 4814 * their values can be read from loader.conf prior to their first use in the 4815 * driver. 4816 */ 4817 void 4818 ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent) 4819 { 4820 struct sysctl_oid_list *vsi_list; 4821 char vsi_name[32], vsi_desc[32]; 4822 4823 struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); 4824 4825 /* Initialize the sysctl context for this VSI */ 4826 sysctl_ctx_init(&vsi->ctx); 4827 4828 /* Add a node to collect this VSI's statistics together */ 4829 snprintf(vsi_name, sizeof(vsi_name), "%u", vsi->idx); 4830 snprintf(vsi_desc, sizeof(vsi_desc), "VSI %u", vsi->idx); 4831 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->ctx, parent_list, OID_AUTO, vsi_name, 4832 CTLFLAG_RD, NULL, vsi_desc); 4833 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 4834 4835 vsi->rx_itr = ICE_DFLT_TX_ITR; 4836 SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "rx_itr", 4837 CTLTYPE_S16 | CTLFLAG_RWTUN, 4838 vsi, 0, ice_sysctl_rx_itr, "S", 4839 ICE_SYSCTL_HELP_RX_ITR); 4840 4841 vsi->tx_itr = ICE_DFLT_TX_ITR; 4842 SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "tx_itr", 4843 CTLTYPE_S16 | CTLFLAG_RWTUN, 4844 vsi, 0, ice_sysctl_tx_itr, "S", 4845 ICE_SYSCTL_HELP_TX_ITR); 4846 } 4847 4848 /** 4849 * ice_del_vsi_sysctl_ctx - Delete the sysctl context(s) of a VSI 4850 * @vsi: the VSI to remove contexts for 4851 * 4852 * Free the context for the VSI sysctls. This includes the main context, as 4853 * well as the per-queue sysctls. 4854 */ 4855 void 4856 ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi) 4857 { 4858 device_t dev = vsi->sc->dev; 4859 int err; 4860 4861 if (vsi->vsi_node) { 4862 err = sysctl_ctx_free(&vsi->ctx); 4863 if (err) 4864 device_printf(dev, "failed to free VSI %d sysctl context, err %s\n", 4865 vsi->idx, ice_err_str(err)); 4866 vsi->vsi_node = NULL; 4867 } 4868 } 4869 4870 /** 4871 * ice_add_device_tunables - Add early tunable sysctls and sysctl nodes 4872 * @sc: device private structure 4873 * 4874 * Add per-device dynamic tunable sysctls, and setup the general sysctl trees 4875 * for re-use by ice_add_device_sysctls. 4876 * 4877 * In order for the sysctl fields to be initialized before use, this function 4878 * should be called as early as possible during attach activities. 4879 * 4880 * Any non-global sysctl marked as CTLFLAG_TUN should likely be initialized 4881 * here in this function, rather than later in ice_add_device_sysctls. 4882 * 4883 * To make things easier, this function is also expected to setup the various 4884 * sysctl nodes in addition to tunables so that other sysctls which can't be 4885 * initialized early can hook into the same nodes. 4886 */ 4887 void 4888 ice_add_device_tunables(struct ice_softc *sc) 4889 { 4890 device_t dev = sc->dev; 4891 4892 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 4893 struct sysctl_oid_list *ctx_list = 4894 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 4895 4896 /* Add a node to track VSI sysctls. Keep track of the node in the 4897 * softc so that we can hook other sysctls into it later. This 4898 * includes both the VSI statistics, as well as potentially dynamic 4899 * VSIs in the future. 4900 */ 4901 4902 sc->vsi_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "vsi", 4903 CTLFLAG_RD, NULL, "VSI Configuration and Statistics"); 4904 4905 /* Add debug tunables */ 4906 ice_add_debug_tunables(sc); 4907 } 4908 4909 /** 4910 * ice_sysctl_dump_mac_filters - Dump a list of all HW MAC Filters 4911 * @oidp: sysctl oid structure 4912 * @arg1: pointer to private data structure 4913 * @arg2: unused 4914 * @req: sysctl request pointer 4915 * 4916 * Callback for "mac_filters" sysctl to dump the programmed MAC filters. 4917 */ 4918 static int 4919 ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS) 4920 { 4921 struct ice_softc *sc = (struct ice_softc *)arg1; 4922 struct ice_hw *hw = &sc->hw; 4923 struct ice_switch_info *sw = hw->switch_info; 4924 struct ice_fltr_mgmt_list_entry *fm_entry; 4925 struct ice_list_head *rule_head; 4926 struct ice_lock *rule_lock; 4927 struct ice_fltr_info *fi; 4928 struct sbuf *sbuf; 4929 int ret; 4930 4931 UNREFERENCED_PARAMETER(oidp); 4932 UNREFERENCED_PARAMETER(arg2); 4933 4934 if (ice_driver_is_detaching(sc)) 4935 return (ESHUTDOWN); 4936 4937 /* Wire the old buffer so we can take a non-sleepable lock */ 4938 ret = sysctl_wire_old_buffer(req, 0); 4939 if (ret) 4940 return (ret); 4941 4942 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4943 4944 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 4945 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 4946 4947 sbuf_printf(sbuf, "MAC Filter List"); 4948 4949 ice_acquire_lock(rule_lock); 4950 4951 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 4952 fi = &fm_entry->fltr_info; 4953 4954 sbuf_printf(sbuf, 4955 "\nmac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %d", 4956 fi->l_data.mac.mac_addr, ":", fi->vsi_handle, 4957 ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, 4958 ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); 4959 4960 /* if we have a vsi_list_info, print some information about that */ 4961 if (fm_entry->vsi_list_info) { 4962 sbuf_printf(sbuf, 4963 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 4964 fm_entry->vsi_count, 4965 fm_entry->vsi_list_info->vsi_list_id, 4966 fm_entry->vsi_list_info->ref_cnt); 4967 } 4968 } 4969 4970 ice_release_lock(rule_lock); 4971 4972 sbuf_finish(sbuf); 4973 sbuf_delete(sbuf); 4974 4975 return (0); 4976 } 4977 4978 /** 4979 * ice_sysctl_dump_vlan_filters - Dump a list of all HW VLAN Filters 4980 * @oidp: sysctl oid structure 4981 * @arg1: pointer to private data structure 4982 * @arg2: unused 4983 * @req: sysctl request pointer 4984 * 4985 * Callback for "vlan_filters" sysctl to dump the programmed VLAN filters. 4986 */ 4987 static int 4988 ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS) 4989 { 4990 struct ice_softc *sc = (struct ice_softc *)arg1; 4991 struct ice_hw *hw = &sc->hw; 4992 struct ice_switch_info *sw = hw->switch_info; 4993 struct ice_fltr_mgmt_list_entry *fm_entry; 4994 struct ice_list_head *rule_head; 4995 struct ice_lock *rule_lock; 4996 struct ice_fltr_info *fi; 4997 struct sbuf *sbuf; 4998 int ret; 4999 5000 UNREFERENCED_PARAMETER(oidp); 5001 UNREFERENCED_PARAMETER(arg2); 5002 5003 if (ice_driver_is_detaching(sc)) 5004 return (ESHUTDOWN); 5005 5006 /* Wire the old buffer so we can take a non-sleepable lock */ 5007 ret = sysctl_wire_old_buffer(req, 0); 5008 if (ret) 5009 return (ret); 5010 5011 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5012 5013 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 5014 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 5015 5016 sbuf_printf(sbuf, "VLAN Filter List"); 5017 5018 ice_acquire_lock(rule_lock); 5019 5020 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 5021 fi = &fm_entry->fltr_info; 5022 5023 sbuf_printf(sbuf, 5024 "\nvlan_id = %4d, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", 5025 fi->l_data.vlan.vlan_id, fi->vsi_handle, 5026 ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, 5027 ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); 5028 5029 /* if we have a vsi_list_info, print some information about that */ 5030 if (fm_entry->vsi_list_info) { 5031 sbuf_printf(sbuf, 5032 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 5033 fm_entry->vsi_count, 5034 fm_entry->vsi_list_info->vsi_list_id, 5035 fm_entry->vsi_list_info->ref_cnt); 5036 } 5037 } 5038 5039 ice_release_lock(rule_lock); 5040 5041 sbuf_finish(sbuf); 5042 sbuf_delete(sbuf); 5043 5044 return (0); 5045 } 5046 5047 /** 5048 * ice_sysctl_dump_ethertype_filters - Dump a list of all HW Ethertype filters 5049 * @oidp: sysctl oid structure 5050 * @arg1: pointer to private data structure 5051 * @arg2: unused 5052 * @req: sysctl request pointer 5053 * 5054 * Callback for "ethertype_filters" sysctl to dump the programmed Ethertype 5055 * filters. 5056 */ 5057 static int 5058 ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS) 5059 { 5060 struct ice_softc *sc = (struct ice_softc *)arg1; 5061 struct ice_hw *hw = &sc->hw; 5062 struct ice_switch_info *sw = hw->switch_info; 5063 struct ice_fltr_mgmt_list_entry *fm_entry; 5064 struct ice_list_head *rule_head; 5065 struct ice_lock *rule_lock; 5066 struct ice_fltr_info *fi; 5067 struct sbuf *sbuf; 5068 int ret; 5069 5070 UNREFERENCED_PARAMETER(oidp); 5071 UNREFERENCED_PARAMETER(arg2); 5072 5073 if (ice_driver_is_detaching(sc)) 5074 return (ESHUTDOWN); 5075 5076 /* Wire the old buffer so we can take a non-sleepable lock */ 5077 ret = sysctl_wire_old_buffer(req, 0); 5078 if (ret) 5079 return (ret); 5080 5081 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5082 5083 rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rule_lock; 5084 rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rules; 5085 5086 sbuf_printf(sbuf, "Ethertype Filter List"); 5087 5088 ice_acquire_lock(rule_lock); 5089 5090 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 5091 fi = &fm_entry->fltr_info; 5092 5093 sbuf_printf(sbuf, 5094 "\nethertype = 0x%04x, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", 5095 fi->l_data.ethertype_mac.ethertype, 5096 fi->vsi_handle, ice_fltr_flag_str(fi->flag), 5097 fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), 5098 fi->fltr_rule_id); 5099 5100 /* if we have a vsi_list_info, print some information about that */ 5101 if (fm_entry->vsi_list_info) { 5102 sbuf_printf(sbuf, 5103 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 5104 fm_entry->vsi_count, 5105 fm_entry->vsi_list_info->vsi_list_id, 5106 fm_entry->vsi_list_info->ref_cnt); 5107 } 5108 } 5109 5110 ice_release_lock(rule_lock); 5111 5112 sbuf_finish(sbuf); 5113 sbuf_delete(sbuf); 5114 5115 return (0); 5116 } 5117 5118 /** 5119 * ice_sysctl_dump_ethertype_mac_filters - Dump a list of all HW Ethertype/MAC filters 5120 * @oidp: sysctl oid structure 5121 * @arg1: pointer to private data structure 5122 * @arg2: unused 5123 * @req: sysctl request pointer 5124 * 5125 * Callback for "ethertype_mac_filters" sysctl to dump the programmed 5126 * Ethertype/MAC filters. 5127 */ 5128 static int 5129 ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS) 5130 { 5131 struct ice_softc *sc = (struct ice_softc *)arg1; 5132 struct ice_hw *hw = &sc->hw; 5133 struct ice_switch_info *sw = hw->switch_info; 5134 struct ice_fltr_mgmt_list_entry *fm_entry; 5135 struct ice_list_head *rule_head; 5136 struct ice_lock *rule_lock; 5137 struct ice_fltr_info *fi; 5138 struct sbuf *sbuf; 5139 int ret; 5140 5141 UNREFERENCED_PARAMETER(oidp); 5142 UNREFERENCED_PARAMETER(arg2); 5143 5144 if (ice_driver_is_detaching(sc)) 5145 return (ESHUTDOWN); 5146 5147 /* Wire the old buffer so we can take a non-sleepable lock */ 5148 ret = sysctl_wire_old_buffer(req, 0); 5149 if (ret) 5150 return (ret); 5151 5152 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5153 5154 rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rule_lock; 5155 rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rules; 5156 5157 sbuf_printf(sbuf, "Ethertype/MAC Filter List"); 5158 5159 ice_acquire_lock(rule_lock); 5160 5161 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 5162 fi = &fm_entry->fltr_info; 5163 5164 sbuf_printf(sbuf, 5165 "\nethertype = 0x%04x, mac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", 5166 fi->l_data.ethertype_mac.ethertype, 5167 fi->l_data.ethertype_mac.mac_addr, ":", 5168 fi->vsi_handle, ice_fltr_flag_str(fi->flag), 5169 fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), 5170 fi->fltr_rule_id); 5171 5172 /* if we have a vsi_list_info, print some information about that */ 5173 if (fm_entry->vsi_list_info) { 5174 sbuf_printf(sbuf, 5175 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 5176 fm_entry->vsi_count, 5177 fm_entry->vsi_list_info->vsi_list_id, 5178 fm_entry->vsi_list_info->ref_cnt); 5179 } 5180 } 5181 5182 ice_release_lock(rule_lock); 5183 5184 sbuf_finish(sbuf); 5185 sbuf_delete(sbuf); 5186 5187 return (0); 5188 } 5189 5190 /** 5191 * ice_sysctl_dump_state_flags - Dump device driver state flags 5192 * @oidp: sysctl oid structure 5193 * @arg1: pointer to private data structure 5194 * @arg2: unused 5195 * @req: sysctl request pointer 5196 * 5197 * Callback for "state" sysctl to display currently set driver state flags. 5198 */ 5199 static int 5200 ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS) 5201 { 5202 struct ice_softc *sc = (struct ice_softc *)arg1; 5203 struct sbuf *sbuf; 5204 u32 copied_state; 5205 unsigned int i; 5206 bool at_least_one = false; 5207 5208 UNREFERENCED_PARAMETER(oidp); 5209 UNREFERENCED_PARAMETER(arg2); 5210 5211 if (ice_driver_is_detaching(sc)) 5212 return (ESHUTDOWN); 5213 5214 /* Make a copy of the state to ensure we display coherent values */ 5215 copied_state = atomic_load_acq_32(&sc->state); 5216 5217 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5218 5219 /* Add the string for each set state to the sbuf */ 5220 for (i = 0; i < 32; i++) { 5221 if (copied_state & BIT(i)) { 5222 const char *str = ice_state_to_str((enum ice_state)i); 5223 5224 at_least_one = true; 5225 5226 if (str) 5227 sbuf_printf(sbuf, "\n%s", str); 5228 else 5229 sbuf_printf(sbuf, "\nBIT(%u)", i); 5230 } 5231 } 5232 5233 if (!at_least_one) 5234 sbuf_printf(sbuf, "Nothing set"); 5235 5236 sbuf_finish(sbuf); 5237 sbuf_delete(sbuf); 5238 5239 return (0); 5240 } 5241 5242 /** 5243 * ice_add_debug_tunables - Add tunables helpful for debugging the device driver 5244 * @sc: device private structure 5245 * 5246 * Add sysctl tunable values related to debugging the device driver. For now, 5247 * this means a tunable to set the debug mask early during driver load. 5248 * 5249 * The debug node will be marked CTLFLAG_SKIP unless INVARIANTS is defined, so 5250 * that in normal kernel builds, these will all be hidden, but on a debug 5251 * kernel they will be more easily visible. 5252 */ 5253 static void 5254 ice_add_debug_tunables(struct ice_softc *sc) 5255 { 5256 struct sysctl_oid_list *debug_list; 5257 device_t dev = sc->dev; 5258 5259 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 5260 struct sysctl_oid_list *ctx_list = 5261 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 5262 5263 sc->debug_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug", 5264 ICE_CTLFLAG_DEBUG | CTLFLAG_RD, 5265 NULL, "Debug Sysctls"); 5266 debug_list = SYSCTL_CHILDREN(sc->debug_sysctls); 5267 5268 SYSCTL_ADD_U64(ctx, debug_list, OID_AUTO, "debug_mask", 5269 CTLFLAG_RW | CTLFLAG_TUN, &sc->hw.debug_mask, 0, 5270 "Debug message enable/disable mask"); 5271 5272 /* Load the default value from the global sysctl first */ 5273 sc->enable_tx_fc_filter = ice_enable_tx_fc_filter; 5274 5275 SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_fc_filter", 5276 CTLFLAG_RDTUN, &sc->enable_tx_fc_filter, 0, 5277 "Drop Ethertype 0x8808 control frames originating from software on this PF"); 5278 5279 /* Load the default value from the global sysctl first */ 5280 sc->enable_tx_lldp_filter = ice_enable_tx_lldp_filter; 5281 5282 SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_lldp_filter", 5283 CTLFLAG_RDTUN, &sc->enable_tx_lldp_filter, 0, 5284 "Drop Ethertype 0x88cc LLDP frames originating from software on this PF"); 5285 5286 } 5287 5288 #define ICE_SYSCTL_HELP_REQUEST_RESET \ 5289 "\nRequest the driver to initiate a reset." \ 5290 "\n\tpfr - Initiate a PF reset" \ 5291 "\n\tcorer - Initiate a CORE reset" \ 5292 "\n\tglobr - Initiate a GLOBAL reset" 5293 5294 /** 5295 * @var rl_sysctl_ticks 5296 * @brief timestamp for latest reset request sysctl call 5297 * 5298 * Helps rate-limit the call to the sysctl which resets the device 5299 */ 5300 int rl_sysctl_ticks = 0; 5301 5302 /** 5303 * ice_sysctl_request_reset - Request that the driver initiate a reset 5304 * @oidp: sysctl oid structure 5305 * @arg1: pointer to private data structure 5306 * @arg2: unused 5307 * @req: sysctl request pointer 5308 * 5309 * Callback for "request_reset" sysctl to request that the driver initiate 5310 * a reset. Expects to be passed one of the following strings 5311 * 5312 * "pfr" - Initiate a PF reset 5313 * "corer" - Initiate a CORE reset 5314 * "globr" - Initiate a Global reset 5315 */ 5316 static int 5317 ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS) 5318 { 5319 struct ice_softc *sc = (struct ice_softc *)arg1; 5320 struct ice_hw *hw = &sc->hw; 5321 enum ice_status status; 5322 enum ice_reset_req reset_type = ICE_RESET_INVAL; 5323 const char *reset_message; 5324 int error = 0; 5325 5326 /* Buffer to store the requested reset string. Must contain enough 5327 * space to store the largest expected reset string, which currently 5328 * means 6 bytes of space. 5329 */ 5330 char reset[6] = ""; 5331 5332 UNREFERENCED_PARAMETER(arg2); 5333 5334 error = priv_check(curthread, PRIV_DRIVER); 5335 if (error) 5336 return (error); 5337 5338 if (ice_driver_is_detaching(sc)) 5339 return (ESHUTDOWN); 5340 5341 /* Read in the requested reset type. */ 5342 error = sysctl_handle_string(oidp, reset, sizeof(reset), req); 5343 if ((error) || (req->newptr == NULL)) 5344 return (error); 5345 5346 if (strcmp(reset, "pfr") == 0) { 5347 reset_message = "Requesting a PF reset"; 5348 reset_type = ICE_RESET_PFR; 5349 } else if (strcmp(reset, "corer") == 0) { 5350 reset_message = "Initiating a CORE reset"; 5351 reset_type = ICE_RESET_CORER; 5352 } else if (strcmp(reset, "globr") == 0) { 5353 reset_message = "Initiating a GLOBAL reset"; 5354 reset_type = ICE_RESET_GLOBR; 5355 } else if (strcmp(reset, "empr") == 0) { 5356 device_printf(sc->dev, "Triggering an EMP reset via software is not currently supported\n"); 5357 return (EOPNOTSUPP); 5358 } 5359 5360 if (reset_type == ICE_RESET_INVAL) { 5361 device_printf(sc->dev, "%s is not a valid reset request\n", reset); 5362 return (EINVAL); 5363 } 5364 5365 /* 5366 * Rate-limit the frequency at which this function is called. 5367 * Assuming this is called successfully once, typically, 5368 * everything should be handled within the allotted time frame. 5369 * However, in the odd setup situations, we've also put in 5370 * guards for when the reset has finished, but we're in the 5371 * process of rebuilding. And instead of queueing an intent, 5372 * simply error out and let the caller retry, if so desired. 5373 */ 5374 if (TICKS_2_MSEC(ticks - rl_sysctl_ticks) < 500) { 5375 device_printf(sc->dev, 5376 "Call frequency too high. Operation aborted.\n"); 5377 return (EBUSY); 5378 } 5379 rl_sysctl_ticks = ticks; 5380 5381 if (TICKS_2_MSEC(ticks - sc->rebuild_ticks) < 100) { 5382 device_printf(sc->dev, "Device rebuilding. Operation aborted.\n"); 5383 return (EBUSY); 5384 } 5385 5386 if (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) { 5387 device_printf(sc->dev, "Device in reset. Operation aborted.\n"); 5388 return (EBUSY); 5389 } 5390 5391 device_printf(sc->dev, "%s\n", reset_message); 5392 5393 /* Initiate the PF reset during the admin status task */ 5394 if (reset_type == ICE_RESET_PFR) { 5395 ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); 5396 return (0); 5397 } 5398 5399 /* 5400 * Other types of resets including CORE and GLOBAL resets trigger an 5401 * interrupt on all PFs. Initiate the reset now. Preparation and 5402 * rebuild logic will be handled by the admin status task. 5403 */ 5404 status = ice_reset(hw, reset_type); 5405 5406 /* 5407 * Resets can take a long time and we still don't want another call 5408 * to this function before we settle down. 5409 */ 5410 rl_sysctl_ticks = ticks; 5411 5412 if (status) { 5413 device_printf(sc->dev, "failed to initiate device reset, err %s\n", 5414 ice_status_str(status)); 5415 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); 5416 return (EFAULT); 5417 } 5418 5419 return (0); 5420 } 5421 5422 /** 5423 * ice_add_debug_sysctls - Add sysctls helpful for debugging the device driver 5424 * @sc: device private structure 5425 * 5426 * Add sysctls related to debugging the device driver. Generally these should 5427 * simply be sysctls which dump internal driver state, to aid in understanding 5428 * what the driver is doing. 5429 */ 5430 static void 5431 ice_add_debug_sysctls(struct ice_softc *sc) 5432 { 5433 struct sysctl_oid *sw_node; 5434 struct sysctl_oid_list *debug_list, *sw_list; 5435 device_t dev = sc->dev; 5436 5437 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 5438 5439 debug_list = SYSCTL_CHILDREN(sc->debug_sysctls); 5440 5441 SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "request_reset", 5442 CTLTYPE_STRING | CTLFLAG_WR, sc, 0, 5443 ice_sysctl_request_reset, "A", 5444 ICE_SYSCTL_HELP_REQUEST_RESET); 5445 5446 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "pfr_count", CTLFLAG_RD, 5447 &sc->soft_stats.pfr_count, 0, "# of PF resets handled"); 5448 5449 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "corer_count", CTLFLAG_RD, 5450 &sc->soft_stats.corer_count, 0, "# of CORE resets handled"); 5451 5452 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "globr_count", CTLFLAG_RD, 5453 &sc->soft_stats.globr_count, 0, "# of Global resets handled"); 5454 5455 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "empr_count", CTLFLAG_RD, 5456 &sc->soft_stats.empr_count, 0, "# of EMP resets handled"); 5457 5458 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "tx_mdd_count", CTLFLAG_RD, 5459 &sc->soft_stats.tx_mdd_count, 0, "# of Tx MDD events detected"); 5460 5461 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "rx_mdd_count", CTLFLAG_RD, 5462 &sc->soft_stats.rx_mdd_count, 0, "# of Rx MDD events detected"); 5463 5464 SYSCTL_ADD_PROC(ctx, debug_list, 5465 OID_AUTO, "state", CTLTYPE_STRING | CTLFLAG_RD, 5466 sc, 0, ice_sysctl_dump_state_flags, "A", "Driver State Flags"); 5467 5468 SYSCTL_ADD_PROC(ctx, debug_list, 5469 OID_AUTO, "phy_type_low", CTLTYPE_U64 | CTLFLAG_RW, 5470 sc, 0, ice_sysctl_phy_type_low, "QU", 5471 "PHY type Low from Get PHY Caps/Set PHY Cfg"); 5472 5473 SYSCTL_ADD_PROC(ctx, debug_list, 5474 OID_AUTO, "phy_type_high", CTLTYPE_U64 | CTLFLAG_RW, 5475 sc, 0, ice_sysctl_phy_type_high, "QU", 5476 "PHY type High from Get PHY Caps/Set PHY Cfg"); 5477 5478 SYSCTL_ADD_PROC(ctx, debug_list, 5479 OID_AUTO, "phy_sw_caps", CTLTYPE_STRUCT | CTLFLAG_RD, 5480 sc, 0, ice_sysctl_phy_sw_caps, "", 5481 "Get PHY Capabilities (Software configuration)"); 5482 5483 SYSCTL_ADD_PROC(ctx, debug_list, 5484 OID_AUTO, "phy_nvm_caps", CTLTYPE_STRUCT | CTLFLAG_RD, 5485 sc, 0, ice_sysctl_phy_nvm_caps, "", 5486 "Get PHY Capabilities (NVM configuration)"); 5487 5488 SYSCTL_ADD_PROC(ctx, debug_list, 5489 OID_AUTO, "phy_topo_caps", CTLTYPE_STRUCT | CTLFLAG_RD, 5490 sc, 0, ice_sysctl_phy_topo_caps, "", 5491 "Get PHY Capabilities (Topology configuration)"); 5492 5493 SYSCTL_ADD_PROC(ctx, debug_list, 5494 OID_AUTO, "phy_link_status", CTLTYPE_STRUCT | CTLFLAG_RD, 5495 sc, 0, ice_sysctl_phy_link_status, "", 5496 "Get PHY Link Status"); 5497 5498 SYSCTL_ADD_PROC(ctx, debug_list, 5499 OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD, 5500 sc, 0, ice_sysctl_read_i2c_diag_data, "A", 5501 "Dump selected diagnostic data from FW"); 5502 5503 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "fw_build", CTLFLAG_RD, 5504 &sc->hw.fw_build, 0, "FW Build ID"); 5505 5506 SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "os_ddp_version", CTLTYPE_STRING | CTLFLAG_RD, 5507 sc, 0, ice_sysctl_os_pkg_version, "A", 5508 "DDP package name and version found in ice_ddp"); 5509 5510 SYSCTL_ADD_PROC(ctx, debug_list, 5511 OID_AUTO, "cur_lldp_persist_status", CTLTYPE_STRING | CTLFLAG_RD, 5512 sc, 0, ice_sysctl_fw_cur_lldp_persist_status, "A", "Current LLDP persistent status"); 5513 5514 SYSCTL_ADD_PROC(ctx, debug_list, 5515 OID_AUTO, "dflt_lldp_persist_status", CTLTYPE_STRING | CTLFLAG_RD, 5516 sc, 0, ice_sysctl_fw_dflt_lldp_persist_status, "A", "Default LLDP persistent status"); 5517 5518 SYSCTL_ADD_PROC(ctx, debug_list, 5519 OID_AUTO, "negotiated_fc", CTLTYPE_STRING | CTLFLAG_RD, 5520 sc, 0, ice_sysctl_negotiated_fc, "A", "Current Negotiated Flow Control mode"); 5521 5522 sw_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "switch", 5523 CTLFLAG_RD, NULL, "Switch Configuration"); 5524 sw_list = SYSCTL_CHILDREN(sw_node); 5525 5526 SYSCTL_ADD_PROC(ctx, sw_list, 5527 OID_AUTO, "mac_filters", CTLTYPE_STRING | CTLFLAG_RD, 5528 sc, 0, ice_sysctl_dump_mac_filters, "A", "MAC Filters"); 5529 5530 SYSCTL_ADD_PROC(ctx, sw_list, 5531 OID_AUTO, "vlan_filters", CTLTYPE_STRING | CTLFLAG_RD, 5532 sc, 0, ice_sysctl_dump_vlan_filters, "A", "VLAN Filters"); 5533 5534 SYSCTL_ADD_PROC(ctx, sw_list, 5535 OID_AUTO, "ethertype_filters", CTLTYPE_STRING | CTLFLAG_RD, 5536 sc, 0, ice_sysctl_dump_ethertype_filters, "A", "Ethertype Filters"); 5537 5538 SYSCTL_ADD_PROC(ctx, sw_list, 5539 OID_AUTO, "ethertype_mac_filters", CTLTYPE_STRING | CTLFLAG_RD, 5540 sc, 0, ice_sysctl_dump_ethertype_mac_filters, "A", "Ethertype/MAC Filters"); 5541 5542 } 5543 5544 /** 5545 * ice_vsi_disable_tx - Disable (unconfigure) Tx queues for a VSI 5546 * @vsi: the VSI to disable 5547 * 5548 * Disables the Tx queues associated with this VSI. Essentially the opposite 5549 * of ice_cfg_vsi_for_tx. 5550 */ 5551 int 5552 ice_vsi_disable_tx(struct ice_vsi *vsi) 5553 { 5554 struct ice_softc *sc = vsi->sc; 5555 struct ice_hw *hw = &sc->hw; 5556 enum ice_status status; 5557 u32 *q_teids; 5558 u16 *q_ids, *q_handles; 5559 int i, err = 0; 5560 5561 if (vsi->num_tx_queues > 255) 5562 return (ENOSYS); 5563 5564 q_teids = (u32 *)malloc(sizeof(*q_teids) * vsi->num_tx_queues, 5565 M_ICE, M_NOWAIT|M_ZERO); 5566 if (!q_teids) 5567 return (ENOMEM); 5568 5569 q_ids = (u16 *)malloc(sizeof(*q_ids) * vsi->num_tx_queues, 5570 M_ICE, M_NOWAIT|M_ZERO); 5571 if (!q_ids) { 5572 err = (ENOMEM); 5573 goto free_q_teids; 5574 } 5575 5576 q_handles = (u16 *)malloc(sizeof(*q_handles) * vsi->num_tx_queues, 5577 M_ICE, M_NOWAIT|M_ZERO); 5578 if (!q_handles) { 5579 err = (ENOMEM); 5580 goto free_q_ids; 5581 } 5582 5583 5584 for (i = 0; i < vsi->num_tx_queues; i++) { 5585 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 5586 5587 q_ids[i] = vsi->tx_qmap[i]; 5588 q_handles[i] = i; 5589 q_teids[i] = txq->q_teid; 5590 } 5591 5592 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, vsi->num_tx_queues, 5593 q_handles, q_ids, q_teids, ICE_NO_RESET, 0, NULL); 5594 if (status == ICE_ERR_DOES_NOT_EXIST) { 5595 ; /* Queues have already been disabled, no need to report this as an error */ 5596 } else if (status == ICE_ERR_RESET_ONGOING) { 5597 device_printf(sc->dev, 5598 "Reset in progress. LAN Tx queues already disabled\n"); 5599 } else if (status) { 5600 device_printf(sc->dev, 5601 "Failed to disable LAN Tx queues: err %s aq_err %s\n", 5602 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5603 err = (ENODEV); 5604 } 5605 5606 /* free_q_handles: */ 5607 free(q_handles, M_ICE); 5608 free_q_ids: 5609 free(q_ids, M_ICE); 5610 free_q_teids: 5611 free(q_teids, M_ICE); 5612 5613 return err; 5614 } 5615 5616 /** 5617 * ice_vsi_set_rss_params - Set the RSS parameters for the VSI 5618 * @vsi: the VSI to configure 5619 * 5620 * Sets the RSS table size and lookup table type for the VSI based on its 5621 * VSI type. 5622 */ 5623 static void 5624 ice_vsi_set_rss_params(struct ice_vsi *vsi) 5625 { 5626 struct ice_softc *sc = vsi->sc; 5627 struct ice_hw_common_caps *cap; 5628 5629 cap = &sc->hw.func_caps.common_cap; 5630 5631 switch (vsi->type) { 5632 case ICE_VSI_PF: 5633 /* The PF VSI inherits RSS instance of the PF */ 5634 vsi->rss_table_size = cap->rss_table_size; 5635 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 5636 break; 5637 case ICE_VSI_VF: 5638 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 5639 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 5640 break; 5641 default: 5642 device_printf(sc->dev, 5643 "VSI %d: RSS not supported for VSI type %d\n", 5644 vsi->idx, vsi->type); 5645 break; 5646 } 5647 } 5648 5649 /** 5650 * ice_vsi_add_txqs_ctx - Create a sysctl context and node to store txq sysctls 5651 * @vsi: The VSI to add the context for 5652 * 5653 * Creates a sysctl context for storing txq sysctls. Additionally creates 5654 * a node rooted at the given VSI's main sysctl node. This context will be 5655 * used to store per-txq sysctls which may need to be released during the 5656 * driver's lifetime. 5657 */ 5658 void 5659 ice_vsi_add_txqs_ctx(struct ice_vsi *vsi) 5660 { 5661 struct sysctl_oid_list *vsi_list; 5662 5663 sysctl_ctx_init(&vsi->txqs_ctx); 5664 5665 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 5666 5667 vsi->txqs_node = SYSCTL_ADD_NODE(&vsi->txqs_ctx, vsi_list, OID_AUTO, "txqs", 5668 CTLFLAG_RD, NULL, "Tx Queues"); 5669 } 5670 5671 /** 5672 * ice_vsi_add_rxqs_ctx - Create a sysctl context and node to store rxq sysctls 5673 * @vsi: The VSI to add the context for 5674 * 5675 * Creates a sysctl context for storing rxq sysctls. Additionally creates 5676 * a node rooted at the given VSI's main sysctl node. This context will be 5677 * used to store per-rxq sysctls which may need to be released during the 5678 * driver's lifetime. 5679 */ 5680 void 5681 ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi) 5682 { 5683 struct sysctl_oid_list *vsi_list; 5684 5685 sysctl_ctx_init(&vsi->rxqs_ctx); 5686 5687 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 5688 5689 vsi->rxqs_node = SYSCTL_ADD_NODE(&vsi->rxqs_ctx, vsi_list, OID_AUTO, "rxqs", 5690 CTLFLAG_RD, NULL, "Rx Queues"); 5691 } 5692 5693 /** 5694 * ice_vsi_del_txqs_ctx - Delete the Tx queue sysctl context for this VSI 5695 * @vsi: The VSI to delete from 5696 * 5697 * Frees the txq sysctl context created for storing the per-queue Tx sysctls. 5698 * Must be called prior to freeing the Tx queue memory, in order to avoid 5699 * having sysctls point at stale memory. 5700 */ 5701 void 5702 ice_vsi_del_txqs_ctx(struct ice_vsi *vsi) 5703 { 5704 device_t dev = vsi->sc->dev; 5705 int err; 5706 5707 if (vsi->txqs_node) { 5708 err = sysctl_ctx_free(&vsi->txqs_ctx); 5709 if (err) 5710 device_printf(dev, "failed to free VSI %d txqs_ctx, err %s\n", 5711 vsi->idx, ice_err_str(err)); 5712 vsi->txqs_node = NULL; 5713 } 5714 } 5715 5716 /** 5717 * ice_vsi_del_rxqs_ctx - Delete the Rx queue sysctl context for this VSI 5718 * @vsi: The VSI to delete from 5719 * 5720 * Frees the rxq sysctl context created for storing the per-queue Rx sysctls. 5721 * Must be called prior to freeing the Rx queue memory, in order to avoid 5722 * having sysctls point at stale memory. 5723 */ 5724 void 5725 ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi) 5726 { 5727 device_t dev = vsi->sc->dev; 5728 int err; 5729 5730 if (vsi->rxqs_node) { 5731 err = sysctl_ctx_free(&vsi->rxqs_ctx); 5732 if (err) 5733 device_printf(dev, "failed to free VSI %d rxqs_ctx, err %s\n", 5734 vsi->idx, ice_err_str(err)); 5735 vsi->rxqs_node = NULL; 5736 } 5737 } 5738 5739 /** 5740 * ice_add_txq_sysctls - Add per-queue sysctls for a Tx queue 5741 * @txq: pointer to the Tx queue 5742 * 5743 * Add per-queue sysctls for a given Tx queue. Can't be called during 5744 * ice_add_vsi_sysctls, since the queue memory has not yet been setup. 5745 */ 5746 void 5747 ice_add_txq_sysctls(struct ice_tx_queue *txq) 5748 { 5749 struct ice_vsi *vsi = txq->vsi; 5750 struct sysctl_ctx_list *ctx = &vsi->txqs_ctx; 5751 struct sysctl_oid_list *txqs_list, *this_txq_list; 5752 struct sysctl_oid *txq_node; 5753 char txq_name[32], txq_desc[32]; 5754 5755 const struct ice_sysctl_info ctls[] = { 5756 { &txq->stats.tx_packets, "tx_packets", "Queue Packets Transmitted" }, 5757 { &txq->stats.tx_bytes, "tx_bytes", "Queue Bytes Transmitted" }, 5758 { &txq->stats.mss_too_small, "mss_too_small", "TSO sends with an MSS less than 64" }, 5759 { 0, 0, 0 } 5760 }; 5761 5762 const struct ice_sysctl_info *entry = ctls; 5763 5764 txqs_list = SYSCTL_CHILDREN(vsi->txqs_node); 5765 5766 snprintf(txq_name, sizeof(txq_name), "%u", txq->me); 5767 snprintf(txq_desc, sizeof(txq_desc), "Tx Queue %u", txq->me); 5768 txq_node = SYSCTL_ADD_NODE(ctx, txqs_list, OID_AUTO, txq_name, 5769 CTLFLAG_RD, NULL, txq_desc); 5770 this_txq_list = SYSCTL_CHILDREN(txq_node); 5771 5772 /* Add the Tx queue statistics */ 5773 while (entry->stat != 0) { 5774 SYSCTL_ADD_U64(ctx, this_txq_list, OID_AUTO, entry->name, 5775 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 5776 entry->description); 5777 entry++; 5778 } 5779 } 5780 5781 /** 5782 * ice_add_rxq_sysctls - Add per-queue sysctls for an Rx queue 5783 * @rxq: pointer to the Rx queue 5784 * 5785 * Add per-queue sysctls for a given Rx queue. Can't be called during 5786 * ice_add_vsi_sysctls, since the queue memory has not yet been setup. 5787 */ 5788 void 5789 ice_add_rxq_sysctls(struct ice_rx_queue *rxq) 5790 { 5791 struct ice_vsi *vsi = rxq->vsi; 5792 struct sysctl_ctx_list *ctx = &vsi->rxqs_ctx; 5793 struct sysctl_oid_list *rxqs_list, *this_rxq_list; 5794 struct sysctl_oid *rxq_node; 5795 char rxq_name[32], rxq_desc[32]; 5796 5797 const struct ice_sysctl_info ctls[] = { 5798 { &rxq->stats.rx_packets, "rx_packets", "Queue Packets Received" }, 5799 { &rxq->stats.rx_bytes, "rx_bytes", "Queue Bytes Received" }, 5800 { &rxq->stats.desc_errs, "rx_desc_errs", "Queue Rx Descriptor Errors" }, 5801 { 0, 0, 0 } 5802 }; 5803 5804 const struct ice_sysctl_info *entry = ctls; 5805 5806 rxqs_list = SYSCTL_CHILDREN(vsi->rxqs_node); 5807 5808 snprintf(rxq_name, sizeof(rxq_name), "%u", rxq->me); 5809 snprintf(rxq_desc, sizeof(rxq_desc), "Rx Queue %u", rxq->me); 5810 rxq_node = SYSCTL_ADD_NODE(ctx, rxqs_list, OID_AUTO, rxq_name, 5811 CTLFLAG_RD, NULL, rxq_desc); 5812 this_rxq_list = SYSCTL_CHILDREN(rxq_node); 5813 5814 /* Add the Rx queue statistics */ 5815 while (entry->stat != 0) { 5816 SYSCTL_ADD_U64(ctx, this_rxq_list, OID_AUTO, entry->name, 5817 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 5818 entry->description); 5819 entry++; 5820 } 5821 } 5822 5823 /** 5824 * ice_get_default_rss_key - Obtain a default RSS key 5825 * @seed: storage for the RSS key data 5826 * 5827 * Copies a pre-generated RSS key into the seed memory. The seed pointer must 5828 * point to a block of memory that is at least 40 bytes in size. 5829 * 5830 * The key isn't randomly generated each time this function is called because 5831 * that makes the RSS key change every time we reconfigure RSS. This does mean 5832 * that we're hard coding a possibly 'well known' key. We might want to 5833 * investigate randomly generating this key once during the first call. 5834 */ 5835 static void 5836 ice_get_default_rss_key(u8 *seed) 5837 { 5838 const u8 default_seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE] = { 5839 0x39, 0xed, 0xff, 0x4d, 0x43, 0x58, 0x42, 0xc3, 0x5f, 0xb8, 5840 0xa5, 0x32, 0x95, 0x65, 0x81, 0xcd, 0x36, 0x79, 0x71, 0x97, 5841 0xde, 0xa4, 0x41, 0x40, 0x6f, 0x27, 0xe9, 0x81, 0x13, 0xa0, 5842 0x95, 0x93, 0x5b, 0x1e, 0x9d, 0x27, 0x9d, 0x24, 0x84, 0xb5, 5843 }; 5844 5845 bcopy(default_seed, seed, ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); 5846 } 5847 5848 /** 5849 * ice_set_rss_key - Configure a given VSI with the default RSS key 5850 * @vsi: the VSI to configure 5851 * 5852 * Program the hardware RSS key. We use rss_getkey to grab the kernel RSS key. 5853 * If the kernel RSS interface is not available, this will fall back to our 5854 * pre-generated hash seed from ice_get_default_rss_key(). 5855 */ 5856 static int 5857 ice_set_rss_key(struct ice_vsi *vsi) 5858 { 5859 struct ice_aqc_get_set_rss_keys keydata = { .standard_rss_key = {0} }; 5860 struct ice_softc *sc = vsi->sc; 5861 struct ice_hw *hw = &sc->hw; 5862 enum ice_status status; 5863 5864 /* 5865 * If the RSS kernel interface is disabled, this will return the 5866 * default RSS key above. 5867 */ 5868 rss_getkey(keydata.standard_rss_key); 5869 5870 status = ice_aq_set_rss_key(hw, vsi->idx, &keydata); 5871 if (status) { 5872 device_printf(sc->dev, 5873 "ice_aq_set_rss_key status %s, error %s\n", 5874 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5875 return (EIO); 5876 } 5877 5878 return (0); 5879 } 5880 5881 /** 5882 * ice_set_rss_flow_flds - Program the RSS hash flows after package init 5883 * @vsi: the VSI to configure 5884 * 5885 * If the package file is initialized, the default RSS flows are reset. We 5886 * need to reprogram the expected hash configuration. We'll use 5887 * rss_gethashconfig() to determine which flows to enable. If RSS kernel 5888 * support is not enabled, this macro will fall back to suitable defaults. 5889 */ 5890 static void 5891 ice_set_rss_flow_flds(struct ice_vsi *vsi) 5892 { 5893 struct ice_softc *sc = vsi->sc; 5894 struct ice_hw *hw = &sc->hw; 5895 struct ice_rss_hash_cfg rss_cfg = { 0, 0, ICE_RSS_ANY_HEADERS, false }; 5896 device_t dev = sc->dev; 5897 enum ice_status status; 5898 u_int rss_hash_config; 5899 5900 rss_hash_config = rss_gethashconfig(); 5901 5902 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) { 5903 rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4; 5904 rss_cfg.hash_flds = ICE_FLOW_HASH_IPV4; 5905 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); 5906 if (status) 5907 device_printf(dev, 5908 "ice_add_rss_cfg on VSI %d failed for ipv4 flow, err %s aq_err %s\n", 5909 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5910 } 5911 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) { 5912 rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP; 5913 rss_cfg.hash_flds = ICE_HASH_TCP_IPV4; 5914 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); 5915 if (status) 5916 device_printf(dev, 5917 "ice_add_rss_cfg on VSI %d failed for tcp4 flow, err %s aq_err %s\n", 5918 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5919 } 5920 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) { 5921 rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP; 5922 rss_cfg.hash_flds = ICE_HASH_UDP_IPV4; 5923 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); 5924 if (status) 5925 device_printf(dev, 5926 "ice_add_rss_cfg on VSI %d failed for udp4 flow, err %s aq_err %s\n", 5927 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5928 } 5929 if (rss_hash_config & (RSS_HASHTYPE_RSS_IPV6 | RSS_HASHTYPE_RSS_IPV6_EX)) { 5930 rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6; 5931 rss_cfg.hash_flds = ICE_FLOW_HASH_IPV6; 5932 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); 5933 if (status) 5934 device_printf(dev, 5935 "ice_add_rss_cfg on VSI %d failed for ipv6 flow, err %s aq_err %s\n", 5936 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5937 } 5938 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) { 5939 rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP; 5940 rss_cfg.hash_flds = ICE_HASH_TCP_IPV6; 5941 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); 5942 if (status) 5943 device_printf(dev, 5944 "ice_add_rss_cfg on VSI %d failed for tcp6 flow, err %s aq_err %s\n", 5945 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5946 } 5947 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) { 5948 rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP; 5949 rss_cfg.hash_flds = ICE_HASH_UDP_IPV6; 5950 status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); 5951 if (status) 5952 device_printf(dev, 5953 "ice_add_rss_cfg on VSI %d failed for udp6 flow, err %s aq_err %s\n", 5954 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5955 } 5956 5957 /* Warn about RSS hash types which are not supported */ 5958 /* coverity[dead_error_condition] */ 5959 if (rss_hash_config & ~ICE_DEFAULT_RSS_HASH_CONFIG) { 5960 device_printf(dev, 5961 "ice_add_rss_cfg on VSI %d could not configure every requested hash type\n", 5962 vsi->idx); 5963 } 5964 } 5965 5966 /** 5967 * ice_set_rss_lut - Program the RSS lookup table for a VSI 5968 * @vsi: the VSI to configure 5969 * 5970 * Programs the RSS lookup table for a given VSI. We use 5971 * rss_get_indirection_to_bucket which will use the indirection table provided 5972 * by the kernel RSS interface when available. If the kernel RSS interface is 5973 * not available, we will fall back to a simple round-robin fashion queue 5974 * assignment. 5975 */ 5976 static int 5977 ice_set_rss_lut(struct ice_vsi *vsi) 5978 { 5979 struct ice_softc *sc = vsi->sc; 5980 struct ice_hw *hw = &sc->hw; 5981 device_t dev = sc->dev; 5982 struct ice_aq_get_set_rss_lut_params lut_params; 5983 enum ice_status status; 5984 int i, err = 0; 5985 u8 *lut; 5986 5987 lut = (u8 *)malloc(vsi->rss_table_size, M_ICE, M_NOWAIT|M_ZERO); 5988 if (!lut) { 5989 device_printf(dev, "Failed to allocate RSS lut memory\n"); 5990 return (ENOMEM); 5991 } 5992 5993 /* Populate the LUT with max no. of queues. If the RSS kernel 5994 * interface is disabled, this will assign the lookup table in 5995 * a simple round robin fashion 5996 */ 5997 for (i = 0; i < vsi->rss_table_size; i++) { 5998 /* XXX: this needs to be changed if num_rx_queues ever counts 5999 * more than just the RSS queues */ 6000 lut[i] = rss_get_indirection_to_bucket(i) % vsi->num_rx_queues; 6001 } 6002 6003 lut_params.vsi_handle = vsi->idx; 6004 lut_params.lut_size = vsi->rss_table_size; 6005 lut_params.lut_type = vsi->rss_lut_type; 6006 lut_params.lut = lut; 6007 lut_params.global_lut_id = 0; 6008 status = ice_aq_set_rss_lut(hw, &lut_params); 6009 if (status) { 6010 device_printf(dev, 6011 "Cannot set RSS lut, err %s aq_err %s\n", 6012 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 6013 err = (EIO); 6014 } 6015 6016 free(lut, M_ICE); 6017 return err; 6018 } 6019 6020 /** 6021 * ice_config_rss - Configure RSS for a VSI 6022 * @vsi: the VSI to configure 6023 * 6024 * If FEATURE_RSS is enabled, configures the RSS lookup table and hash key for 6025 * a given VSI. 6026 */ 6027 int 6028 ice_config_rss(struct ice_vsi *vsi) 6029 { 6030 int err; 6031 6032 /* Nothing to do, if RSS is not enabled */ 6033 if (!ice_is_bit_set(vsi->sc->feat_en, ICE_FEATURE_RSS)) 6034 return 0; 6035 6036 err = ice_set_rss_key(vsi); 6037 if (err) 6038 return err; 6039 6040 ice_set_rss_flow_flds(vsi); 6041 6042 return ice_set_rss_lut(vsi); 6043 } 6044 6045 /** 6046 * ice_log_pkg_init - Log a message about status of DDP initialization 6047 * @sc: the device softc pointer 6048 * @pkg_status: the status result of ice_copy_and_init_pkg 6049 * 6050 * Called by ice_load_pkg after an attempt to download the DDP package 6051 * contents to the device. Determines whether the download was successful or 6052 * not and logs an appropriate message for the system administrator. 6053 * 6054 * @post if a DDP package was previously downloaded on another port and it 6055 * is not compatible with this driver, pkg_status will be updated to reflect 6056 * this, and the driver will transition to safe mode. 6057 */ 6058 void 6059 ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status) 6060 { 6061 struct ice_hw *hw = &sc->hw; 6062 device_t dev = sc->dev; 6063 struct sbuf *active_pkg, *os_pkg; 6064 6065 active_pkg = sbuf_new_auto(); 6066 ice_active_pkg_version_str(hw, active_pkg); 6067 sbuf_finish(active_pkg); 6068 6069 os_pkg = sbuf_new_auto(); 6070 ice_os_pkg_version_str(hw, os_pkg); 6071 sbuf_finish(os_pkg); 6072 6073 switch (*pkg_status) { 6074 case ICE_SUCCESS: 6075 /* The package download AdminQ command returned success because 6076 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is 6077 * already a package loaded on the device. 6078 */ 6079 if (hw->pkg_ver.major == hw->active_pkg_ver.major && 6080 hw->pkg_ver.minor == hw->active_pkg_ver.minor && 6081 hw->pkg_ver.update == hw->active_pkg_ver.update && 6082 hw->pkg_ver.draft == hw->active_pkg_ver.draft && 6083 !memcmp(hw->pkg_name, hw->active_pkg_name, 6084 sizeof(hw->pkg_name))) { 6085 switch (hw->pkg_dwnld_status) { 6086 case ICE_AQ_RC_OK: 6087 device_printf(dev, 6088 "The DDP package was successfully loaded: %s.\n", 6089 sbuf_data(active_pkg)); 6090 break; 6091 case ICE_AQ_RC_EEXIST: 6092 device_printf(dev, 6093 "DDP package already present on device: %s.\n", 6094 sbuf_data(active_pkg)); 6095 break; 6096 default: 6097 /* We do not expect this to occur, but the 6098 * extra messaging is here in case something 6099 * changes in the ice_init_pkg flow. 6100 */ 6101 device_printf(dev, 6102 "DDP package already present on device: %s. An unexpected error occurred, pkg_dwnld_status %s.\n", 6103 sbuf_data(active_pkg), 6104 ice_aq_str(hw->pkg_dwnld_status)); 6105 break; 6106 } 6107 } else if (pkg_ver_compatible(&hw->active_pkg_ver) == 0) { 6108 device_printf(dev, 6109 "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package %s. The ice_ddp module has package: %s.\n", 6110 sbuf_data(active_pkg), 6111 sbuf_data(os_pkg)); 6112 } else if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) { 6113 device_printf(dev, 6114 "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6115 sbuf_data(active_pkg), 6116 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6117 *pkg_status = ICE_ERR_NOT_SUPPORTED; 6118 } else { 6119 device_printf(dev, 6120 "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6121 sbuf_data(active_pkg), 6122 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6123 *pkg_status = ICE_ERR_NOT_SUPPORTED; 6124 } 6125 break; 6126 case ICE_ERR_NOT_SUPPORTED: 6127 /* 6128 * This assumes that the active_pkg_ver will not be 6129 * initialized if the ice_ddp package version is not 6130 * supported. 6131 */ 6132 if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) { 6133 /* The ice_ddp version is not supported */ 6134 if (pkg_ver_compatible(&hw->pkg_ver) > 0) { 6135 device_printf(dev, 6136 "The DDP package in the ice_ddp module is higher than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated driver. Entering Safe Mode.\n", 6137 sbuf_data(os_pkg), 6138 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6139 } else if (pkg_ver_compatible(&hw->pkg_ver) < 0) { 6140 device_printf(dev, 6141 "The DDP package in the ice_ddp module is lower than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated ice_ddp module. Entering Safe Mode.\n", 6142 sbuf_data(os_pkg), 6143 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6144 } else { 6145 device_printf(dev, 6146 "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6147 ice_status_str(*pkg_status), 6148 ice_aq_str(hw->pkg_dwnld_status), 6149 sbuf_data(os_pkg), 6150 sbuf_data(active_pkg), 6151 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6152 } 6153 } else { 6154 if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) { 6155 device_printf(dev, 6156 "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6157 sbuf_data(active_pkg), 6158 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6159 } else if (pkg_ver_compatible(&hw->active_pkg_ver) < 0) { 6160 device_printf(dev, 6161 "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6162 sbuf_data(active_pkg), 6163 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6164 } else { 6165 device_printf(dev, 6166 "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6167 ice_status_str(*pkg_status), 6168 ice_aq_str(hw->pkg_dwnld_status), 6169 sbuf_data(os_pkg), 6170 sbuf_data(active_pkg), 6171 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6172 } 6173 } 6174 break; 6175 case ICE_ERR_CFG: 6176 case ICE_ERR_BUF_TOO_SHORT: 6177 case ICE_ERR_PARAM: 6178 device_printf(dev, 6179 "The DDP package in the ice_ddp module is invalid. Entering Safe Mode\n"); 6180 break; 6181 case ICE_ERR_FW_DDP_MISMATCH: 6182 device_printf(dev, 6183 "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 6184 break; 6185 case ICE_ERR_AQ_ERROR: 6186 switch (hw->pkg_dwnld_status) { 6187 case ICE_AQ_RC_ENOSEC: 6188 case ICE_AQ_RC_EBADSIG: 6189 device_printf(dev, 6190 "The DDP package in the ice_ddp module cannot be loaded because its signature is not valid. Please use a valid ice_ddp module. Entering Safe Mode.\n"); 6191 goto free_sbufs; 6192 case ICE_AQ_RC_ESVN: 6193 device_printf(dev, 6194 "The DDP package in the ice_ddp module could not be loaded because its security revision is too low. Please use an updated ice_ddp module. Entering Safe Mode.\n"); 6195 goto free_sbufs; 6196 case ICE_AQ_RC_EBADMAN: 6197 case ICE_AQ_RC_EBADBUF: 6198 device_printf(dev, 6199 "An error occurred on the device while loading the DDP package. Entering Safe Mode.\n"); 6200 goto free_sbufs; 6201 default: 6202 break; 6203 } 6204 /* fall-through */ 6205 default: 6206 device_printf(dev, 6207 "An unknown error (%s aq_err %s) occurred when loading the DDP package. Entering Safe Mode.\n", 6208 ice_status_str(*pkg_status), 6209 ice_aq_str(hw->pkg_dwnld_status)); 6210 break; 6211 } 6212 6213 free_sbufs: 6214 sbuf_delete(active_pkg); 6215 sbuf_delete(os_pkg); 6216 } 6217 6218 /** 6219 * ice_load_pkg_file - Load the DDP package file using firmware_get 6220 * @sc: device private softc 6221 * 6222 * Use firmware_get to load the DDP package memory and then request that 6223 * firmware download the package contents and program the relevant hardware 6224 * bits. 6225 * 6226 * This function makes a copy of the DDP package memory which is tracked in 6227 * the ice_hw structure. The copy will be managed and released by 6228 * ice_deinit_hw(). This allows the firmware reference to be immediately 6229 * released using firmware_put. 6230 */ 6231 void 6232 ice_load_pkg_file(struct ice_softc *sc) 6233 { 6234 struct ice_hw *hw = &sc->hw; 6235 device_t dev = sc->dev; 6236 enum ice_status status; 6237 const struct firmware *pkg; 6238 6239 pkg = firmware_get("ice_ddp"); 6240 if (!pkg) { 6241 device_printf(dev, "The DDP package module (ice_ddp) failed to load or could not be found. Entering Safe Mode.\n"); 6242 if (cold) 6243 device_printf(dev, 6244 "The DDP package module cannot be automatically loaded while booting. You may want to specify ice_ddp_load=\"YES\" in your loader.conf\n"); 6245 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); 6246 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); 6247 return; 6248 } 6249 6250 /* Copy and download the pkg contents */ 6251 status = ice_copy_and_init_pkg(hw, (const u8 *)pkg->data, pkg->datasize); 6252 6253 /* Release the firmware reference */ 6254 firmware_put(pkg, FIRMWARE_UNLOAD); 6255 6256 /* Check the active DDP package version and log a message */ 6257 ice_log_pkg_init(sc, &status); 6258 6259 /* Place the driver into safe mode */ 6260 if (status != ICE_SUCCESS) { 6261 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); 6262 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); 6263 } 6264 } 6265 6266 /** 6267 * ice_get_ifnet_counter - Retrieve counter value for a given ifnet counter 6268 * @vsi: the vsi to retrieve the value for 6269 * @counter: the counter type to retrieve 6270 * 6271 * Returns the value for a given ifnet counter. To do so, we calculate the 6272 * value based on the matching hardware statistics. 6273 */ 6274 uint64_t 6275 ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter) 6276 { 6277 struct ice_hw_port_stats *hs = &vsi->sc->stats.cur; 6278 struct ice_eth_stats *es = &vsi->hw_stats.cur; 6279 6280 /* For some statistics, especially those related to error flows, we do 6281 * not have per-VSI counters. In this case, we just report the global 6282 * counters. 6283 */ 6284 6285 switch (counter) { 6286 case IFCOUNTER_IPACKETS: 6287 return (es->rx_unicast + es->rx_multicast + es->rx_broadcast); 6288 case IFCOUNTER_IERRORS: 6289 return (hs->crc_errors + hs->illegal_bytes + 6290 hs->mac_local_faults + hs->mac_remote_faults + 6291 hs->rx_len_errors + hs->rx_undersize + 6292 hs->rx_oversize + hs->rx_fragments + hs->rx_jabber); 6293 case IFCOUNTER_OPACKETS: 6294 return (es->tx_unicast + es->tx_multicast + es->tx_broadcast); 6295 case IFCOUNTER_OERRORS: 6296 return (es->tx_errors); 6297 case IFCOUNTER_COLLISIONS: 6298 return (0); 6299 case IFCOUNTER_IBYTES: 6300 return (es->rx_bytes); 6301 case IFCOUNTER_OBYTES: 6302 return (es->tx_bytes); 6303 case IFCOUNTER_IMCASTS: 6304 return (es->rx_multicast); 6305 case IFCOUNTER_OMCASTS: 6306 return (es->tx_multicast); 6307 case IFCOUNTER_IQDROPS: 6308 return (es->rx_discards); 6309 case IFCOUNTER_OQDROPS: 6310 return (hs->tx_dropped_link_down); 6311 case IFCOUNTER_NOPROTO: 6312 return (es->rx_unknown_protocol); 6313 default: 6314 return if_get_counter_default(vsi->sc->ifp, counter); 6315 } 6316 } 6317 6318 /** 6319 * ice_save_pci_info - Save PCI configuration fields in HW struct 6320 * @hw: the ice_hw struct to save the PCI information in 6321 * @dev: the device to get the PCI information from 6322 * 6323 * This should only be called once, early in the device attach 6324 * process. 6325 */ 6326 void 6327 ice_save_pci_info(struct ice_hw *hw, device_t dev) 6328 { 6329 hw->vendor_id = pci_get_vendor(dev); 6330 hw->device_id = pci_get_device(dev); 6331 hw->subsystem_vendor_id = pci_get_subvendor(dev); 6332 hw->subsystem_device_id = pci_get_subdevice(dev); 6333 hw->revision_id = pci_get_revid(dev); 6334 hw->bus.device = pci_get_slot(dev); 6335 hw->bus.func = pci_get_function(dev); 6336 } 6337 6338 /** 6339 * ice_replay_all_vsi_cfg - Replace configuration for all VSIs after reset 6340 * @sc: the device softc 6341 * 6342 * Replace the configuration for each VSI, and then cleanup replay 6343 * information. Called after a hardware reset in order to reconfigure the 6344 * active VSIs. 6345 */ 6346 int 6347 ice_replay_all_vsi_cfg(struct ice_softc *sc) 6348 { 6349 struct ice_hw *hw = &sc->hw; 6350 enum ice_status status; 6351 int i; 6352 6353 for (i = 0 ; i < sc->num_available_vsi; i++) { 6354 struct ice_vsi *vsi = sc->all_vsi[i]; 6355 6356 if (!vsi) 6357 continue; 6358 6359 status = ice_replay_vsi(hw, vsi->idx); 6360 if (status) { 6361 device_printf(sc->dev, "Failed to replay VSI %d, err %s aq_err %s\n", 6362 vsi->idx, ice_status_str(status), 6363 ice_aq_str(hw->adminq.sq_last_status)); 6364 return (EIO); 6365 } 6366 } 6367 6368 /* Cleanup replay filters after successful reconfiguration */ 6369 ice_replay_post(hw); 6370 return (0); 6371 } 6372 6373 /** 6374 * ice_clean_vsi_rss_cfg - Cleanup RSS configuration for a given VSI 6375 * @vsi: pointer to the VSI structure 6376 * 6377 * Cleanup the advanced RSS configuration for a given VSI. This is necessary 6378 * during driver removal to ensure that all RSS resources are properly 6379 * released. 6380 * 6381 * @remark this function doesn't report an error as it is expected to be 6382 * called during driver reset and unload, and there isn't much the driver can 6383 * do if freeing RSS resources fails. 6384 */ 6385 static void 6386 ice_clean_vsi_rss_cfg(struct ice_vsi *vsi) 6387 { 6388 struct ice_softc *sc = vsi->sc; 6389 struct ice_hw *hw = &sc->hw; 6390 device_t dev = sc->dev; 6391 enum ice_status status; 6392 6393 status = ice_rem_vsi_rss_cfg(hw, vsi->idx); 6394 if (status) 6395 device_printf(dev, 6396 "Failed to remove RSS configuration for VSI %d, err %s\n", 6397 vsi->idx, ice_status_str(status)); 6398 6399 /* Remove this VSI from the RSS list */ 6400 ice_rem_vsi_rss_list(hw, vsi->idx); 6401 } 6402 6403 /** 6404 * ice_clean_all_vsi_rss_cfg - Cleanup RSS configuration for all VSIs 6405 * @sc: the device softc pointer 6406 * 6407 * Cleanup the advanced RSS configuration for all VSIs on a given PF 6408 * interface. 6409 * 6410 * @remark This should be called while preparing for a reset, to cleanup stale 6411 * RSS configuration for all VSIs. 6412 */ 6413 void 6414 ice_clean_all_vsi_rss_cfg(struct ice_softc *sc) 6415 { 6416 int i; 6417 6418 /* No need to cleanup if RSS is not enabled */ 6419 if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS)) 6420 return; 6421 6422 for (i = 0; i < sc->num_available_vsi; i++) { 6423 struct ice_vsi *vsi = sc->all_vsi[i]; 6424 6425 if (vsi) 6426 ice_clean_vsi_rss_cfg(vsi); 6427 } 6428 } 6429 6430 /** 6431 * ice_requested_fec_mode - Return the requested FEC mode as a string 6432 * @pi: The port info structure 6433 * 6434 * Return a string representing the requested FEC mode. 6435 */ 6436 static const char * 6437 ice_requested_fec_mode(struct ice_port_info *pi) 6438 { 6439 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 6440 enum ice_status status; 6441 6442 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 6443 &pcaps, NULL); 6444 if (status) 6445 /* Just report unknown if we can't get capabilities */ 6446 return "Unknown"; 6447 6448 /* Check if RS-FEC has been requested first */ 6449 if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 6450 ICE_AQC_PHY_FEC_25G_RS_544_REQ)) 6451 return ice_fec_str(ICE_FEC_RS); 6452 6453 /* If RS FEC has not been requested, then check BASE-R */ 6454 if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 6455 ICE_AQC_PHY_FEC_25G_KR_REQ)) 6456 return ice_fec_str(ICE_FEC_BASER); 6457 6458 return ice_fec_str(ICE_FEC_NONE); 6459 } 6460 6461 /** 6462 * ice_negotiated_fec_mode - Return the negotiated FEC mode as a string 6463 * @pi: The port info structure 6464 * 6465 * Return a string representing the current FEC mode. 6466 */ 6467 static const char * 6468 ice_negotiated_fec_mode(struct ice_port_info *pi) 6469 { 6470 /* First, check if RS has been requested first */ 6471 if (pi->phy.link_info.fec_info & (ICE_AQ_LINK_25G_RS_528_FEC_EN | 6472 ICE_AQ_LINK_25G_RS_544_FEC_EN)) 6473 return ice_fec_str(ICE_FEC_RS); 6474 6475 /* If RS FEC has not been requested, then check BASE-R */ 6476 if (pi->phy.link_info.fec_info & ICE_AQ_LINK_25G_KR_FEC_EN) 6477 return ice_fec_str(ICE_FEC_BASER); 6478 6479 return ice_fec_str(ICE_FEC_NONE); 6480 } 6481 6482 /** 6483 * ice_autoneg_mode - Return string indicating of autoneg completed 6484 * @pi: The port info structure 6485 * 6486 * Return "True" if autonegotiation is completed, "False" otherwise. 6487 */ 6488 static const char * 6489 ice_autoneg_mode(struct ice_port_info *pi) 6490 { 6491 if (pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 6492 return "True"; 6493 else 6494 return "False"; 6495 } 6496 6497 /** 6498 * ice_flowcontrol_mode - Return string indicating the Flow Control mode 6499 * @pi: The port info structure 6500 * 6501 * Returns the current Flow Control mode as a string. 6502 */ 6503 static const char * 6504 ice_flowcontrol_mode(struct ice_port_info *pi) 6505 { 6506 return ice_fc_str(pi->fc.current_mode); 6507 } 6508 6509 /** 6510 * ice_link_up_msg - Log a link up message with associated info 6511 * @sc: the device private softc 6512 * 6513 * Log a link up message with LOG_NOTICE message level. Include information 6514 * about the duplex, FEC mode, autonegotiation and flow control. 6515 */ 6516 void 6517 ice_link_up_msg(struct ice_softc *sc) 6518 { 6519 struct ice_hw *hw = &sc->hw; 6520 struct ifnet *ifp = sc->ifp; 6521 const char *speed, *req_fec, *neg_fec, *autoneg, *flowcontrol; 6522 6523 speed = ice_aq_speed_to_str(hw->port_info); 6524 req_fec = ice_requested_fec_mode(hw->port_info); 6525 neg_fec = ice_negotiated_fec_mode(hw->port_info); 6526 autoneg = ice_autoneg_mode(hw->port_info); 6527 flowcontrol = ice_flowcontrol_mode(hw->port_info); 6528 6529 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 6530 ifp->if_xname, speed, req_fec, neg_fec, autoneg, flowcontrol); 6531 } 6532 6533 /** 6534 * ice_update_laa_mac - Update MAC address if Locally Administered 6535 * @sc: the device softc 6536 * 6537 * Update the device MAC address when a Locally Administered Address is 6538 * assigned. 6539 * 6540 * This function does *not* update the MAC filter list itself. Instead, it 6541 * should be called after ice_rm_pf_default_mac_filters, so that the previous 6542 * address filter will be removed, and before ice_cfg_pf_default_mac_filters, 6543 * so that the new address filter will be assigned. 6544 */ 6545 int 6546 ice_update_laa_mac(struct ice_softc *sc) 6547 { 6548 const u8 *lladdr = (const u8 *)IF_LLADDR(sc->ifp); 6549 struct ice_hw *hw = &sc->hw; 6550 enum ice_status status; 6551 6552 /* If the address is the same, then there is nothing to update */ 6553 if (!memcmp(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN)) 6554 return (0); 6555 6556 /* Reject Multicast addresses */ 6557 if (ETHER_IS_MULTICAST(lladdr)) 6558 return (EINVAL); 6559 6560 status = ice_aq_manage_mac_write(hw, lladdr, ICE_AQC_MAN_MAC_UPDATE_LAA_WOL, NULL); 6561 if (status) { 6562 device_printf(sc->dev, "Failed to write mac %6D to firmware, err %s aq_err %s\n", 6563 lladdr, ":", ice_status_str(status), 6564 ice_aq_str(hw->adminq.sq_last_status)); 6565 return (EFAULT); 6566 } 6567 6568 /* Copy the address into place of the LAN address. */ 6569 bcopy(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN); 6570 6571 return (0); 6572 } 6573 6574 /** 6575 * ice_get_and_print_bus_info - Save (PCI) bus info and print messages 6576 * @sc: device softc 6577 * 6578 * This will potentially print out a warning message if bus bandwidth 6579 * is insufficient for full-speed operation. 6580 * 6581 * This should only be called once, during the attach process, after 6582 * hw->port_info has been filled out with port link topology information 6583 * (from the Get PHY Capabilities Admin Queue command). 6584 */ 6585 void 6586 ice_get_and_print_bus_info(struct ice_softc *sc) 6587 { 6588 struct ice_hw *hw = &sc->hw; 6589 device_t dev = sc->dev; 6590 u16 pci_link_status; 6591 int offset; 6592 6593 pci_find_cap(dev, PCIY_EXPRESS, &offset); 6594 pci_link_status = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 6595 6596 /* Fill out hw struct with PCIE link status info */ 6597 ice_set_pci_link_status_data(hw, pci_link_status); 6598 6599 /* Use info to print out bandwidth messages */ 6600 ice_print_bus_link_data(dev, hw); 6601 6602 if (ice_pcie_bandwidth_check(sc)) { 6603 device_printf(dev, 6604 "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 6605 device_printf(dev, 6606 "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 6607 } 6608 } 6609 6610 /** 6611 * ice_pcie_bus_speed_to_rate - Convert driver bus speed enum value to 6612 * a 64-bit baudrate. 6613 * @speed: enum value to convert 6614 * 6615 * This only goes up to PCIE Gen 4. 6616 */ 6617 static uint64_t 6618 ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed) 6619 { 6620 /* If the PCI-E speed is Gen1 or Gen2, then report 6621 * only 80% of bus speed to account for encoding overhead. 6622 */ 6623 switch (speed) { 6624 case ice_pcie_speed_2_5GT: 6625 return IF_Gbps(2); 6626 case ice_pcie_speed_5_0GT: 6627 return IF_Gbps(4); 6628 case ice_pcie_speed_8_0GT: 6629 return IF_Gbps(8); 6630 case ice_pcie_speed_16_0GT: 6631 return IF_Gbps(16); 6632 case ice_pcie_speed_unknown: 6633 default: 6634 return 0; 6635 } 6636 } 6637 6638 /** 6639 * ice_pcie_lnk_width_to_int - Convert driver pci-e width enum value to 6640 * a 32-bit number. 6641 * @width: enum value to convert 6642 */ 6643 static int 6644 ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width) 6645 { 6646 switch (width) { 6647 case ice_pcie_lnk_x1: 6648 return (1); 6649 case ice_pcie_lnk_x2: 6650 return (2); 6651 case ice_pcie_lnk_x4: 6652 return (4); 6653 case ice_pcie_lnk_x8: 6654 return (8); 6655 case ice_pcie_lnk_x12: 6656 return (12); 6657 case ice_pcie_lnk_x16: 6658 return (16); 6659 case ice_pcie_lnk_x32: 6660 return (32); 6661 case ice_pcie_lnk_width_resrv: 6662 case ice_pcie_lnk_width_unknown: 6663 default: 6664 return (0); 6665 } 6666 } 6667 6668 /** 6669 * ice_pcie_bandwidth_check - Check if PCI-E bandwidth is sufficient for 6670 * full-speed device operation. 6671 * @sc: adapter softc 6672 * 6673 * Returns 0 if sufficient; 1 if not. 6674 */ 6675 static uint8_t 6676 ice_pcie_bandwidth_check(struct ice_softc *sc) 6677 { 6678 struct ice_hw *hw = &sc->hw; 6679 int num_ports, pcie_width; 6680 u64 pcie_speed, port_speed; 6681 6682 MPASS(hw->port_info); 6683 6684 num_ports = bitcount32(hw->func_caps.common_cap.valid_functions); 6685 port_speed = ice_phy_types_to_max_rate(hw->port_info); 6686 pcie_speed = ice_pcie_bus_speed_to_rate(hw->bus.speed); 6687 pcie_width = ice_pcie_lnk_width_to_int(hw->bus.width); 6688 6689 /* 6690 * If 2x100, clamp ports to 1 -- 2nd port is intended for 6691 * failover. 6692 */ 6693 if (port_speed == IF_Gbps(100)) 6694 num_ports = 1; 6695 6696 return !!((num_ports * port_speed) > pcie_speed * pcie_width); 6697 } 6698 6699 /** 6700 * ice_print_bus_link_data - Print PCI-E bandwidth information 6701 * @dev: device to print string for 6702 * @hw: hw struct with PCI-e link information 6703 */ 6704 static void 6705 ice_print_bus_link_data(device_t dev, struct ice_hw *hw) 6706 { 6707 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 6708 ((hw->bus.speed == ice_pcie_speed_16_0GT) ? "16.0GT/s" : 6709 (hw->bus.speed == ice_pcie_speed_8_0GT) ? "8.0GT/s" : 6710 (hw->bus.speed == ice_pcie_speed_5_0GT) ? "5.0GT/s" : 6711 (hw->bus.speed == ice_pcie_speed_2_5GT) ? "2.5GT/s" : "Unknown"), 6712 (hw->bus.width == ice_pcie_lnk_x32) ? "Width x32" : 6713 (hw->bus.width == ice_pcie_lnk_x16) ? "Width x16" : 6714 (hw->bus.width == ice_pcie_lnk_x12) ? "Width x12" : 6715 (hw->bus.width == ice_pcie_lnk_x8) ? "Width x8" : 6716 (hw->bus.width == ice_pcie_lnk_x4) ? "Width x4" : 6717 (hw->bus.width == ice_pcie_lnk_x2) ? "Width x2" : 6718 (hw->bus.width == ice_pcie_lnk_x1) ? "Width x1" : "Width Unknown"); 6719 } 6720 6721 /** 6722 * ice_set_pci_link_status_data - store PCI bus info 6723 * @hw: pointer to hardware structure 6724 * @link_status: the link status word from PCI config space 6725 * 6726 * Stores the PCI bus info (speed, width, type) within the ice_hw structure 6727 **/ 6728 static void 6729 ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status) 6730 { 6731 u16 reg; 6732 6733 hw->bus.type = ice_bus_pci_express; 6734 6735 reg = (link_status & PCIEM_LINK_STA_WIDTH) >> 4; 6736 6737 switch (reg) { 6738 case ice_pcie_lnk_x1: 6739 case ice_pcie_lnk_x2: 6740 case ice_pcie_lnk_x4: 6741 case ice_pcie_lnk_x8: 6742 case ice_pcie_lnk_x12: 6743 case ice_pcie_lnk_x16: 6744 case ice_pcie_lnk_x32: 6745 hw->bus.width = (enum ice_pcie_link_width)reg; 6746 break; 6747 default: 6748 hw->bus.width = ice_pcie_lnk_width_unknown; 6749 break; 6750 } 6751 6752 reg = (link_status & PCIEM_LINK_STA_SPEED) + 0x13; 6753 6754 switch (reg) { 6755 case ice_pcie_speed_2_5GT: 6756 case ice_pcie_speed_5_0GT: 6757 case ice_pcie_speed_8_0GT: 6758 case ice_pcie_speed_16_0GT: 6759 hw->bus.speed = (enum ice_pcie_bus_speed)reg; 6760 break; 6761 default: 6762 hw->bus.speed = ice_pcie_speed_unknown; 6763 break; 6764 } 6765 } 6766 6767 /** 6768 * ice_init_link_events - Initialize Link Status Events mask 6769 * @sc: the device softc 6770 * 6771 * Initialize the Link Status Events mask to disable notification of link 6772 * events we don't care about in software. Also request that link status 6773 * events be enabled. 6774 */ 6775 int 6776 ice_init_link_events(struct ice_softc *sc) 6777 { 6778 struct ice_hw *hw = &sc->hw; 6779 enum ice_status status; 6780 u16 wanted_events; 6781 6782 /* Set the bits for the events that we want to be notified by */ 6783 wanted_events = (ICE_AQ_LINK_EVENT_UPDOWN | 6784 ICE_AQ_LINK_EVENT_MEDIA_NA | 6785 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL); 6786 6787 /* request that every event except the wanted events be masked */ 6788 status = ice_aq_set_event_mask(hw, hw->port_info->lport, ~wanted_events, NULL); 6789 if (status) { 6790 device_printf(sc->dev, 6791 "Failed to set link status event mask, err %s aq_err %s\n", 6792 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 6793 return (EIO); 6794 } 6795 6796 /* Request link info with the LSE bit set to enable link status events */ 6797 status = ice_aq_get_link_info(hw->port_info, true, NULL, NULL); 6798 if (status) { 6799 device_printf(sc->dev, 6800 "Failed to enable link status events, err %s aq_err %s\n", 6801 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 6802 return (EIO); 6803 } 6804 6805 return (0); 6806 } 6807 6808 /** 6809 * ice_handle_mdd_event - Handle possibly malicious events 6810 * @sc: the device softc 6811 * 6812 * Called by the admin task if an MDD detection interrupt is triggered. 6813 * Identifies possibly malicious events coming from VFs. Also triggers for 6814 * similar incorrect behavior from the PF as well. 6815 */ 6816 void 6817 ice_handle_mdd_event(struct ice_softc *sc) 6818 { 6819 struct ice_hw *hw = &sc->hw; 6820 bool mdd_detected = false, request_reinit = false; 6821 device_t dev = sc->dev; 6822 u32 reg; 6823 6824 if (!ice_testandclear_state(&sc->state, ICE_STATE_MDD_PENDING)) 6825 return; 6826 6827 reg = rd32(hw, GL_MDET_TX_TCLAN); 6828 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 6829 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> GL_MDET_TX_TCLAN_PF_NUM_S; 6830 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> GL_MDET_TX_TCLAN_VF_NUM_S; 6831 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> GL_MDET_TX_TCLAN_MAL_TYPE_S; 6832 u16 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >> GL_MDET_TX_TCLAN_QNUM_S; 6833 6834 device_printf(dev, "Malicious Driver Detection Tx Descriptor check event '%s' on Tx queue %u PF# %u VF# %u\n", 6835 ice_mdd_tx_tclan_str(event), queue, pf_num, vf_num); 6836 6837 /* Only clear this event if it matches this PF, that way other 6838 * PFs can read the event and determine VF and queue number. 6839 */ 6840 if (pf_num == hw->pf_id) 6841 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 6842 6843 mdd_detected = true; 6844 } 6845 6846 /* Determine what triggered the MDD event */ 6847 reg = rd32(hw, GL_MDET_TX_PQM); 6848 if (reg & GL_MDET_TX_PQM_VALID_M) { 6849 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> GL_MDET_TX_PQM_PF_NUM_S; 6850 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> GL_MDET_TX_PQM_VF_NUM_S; 6851 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> GL_MDET_TX_PQM_MAL_TYPE_S; 6852 u16 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >> GL_MDET_TX_PQM_QNUM_S; 6853 6854 device_printf(dev, "Malicious Driver Detection Tx Quanta check event '%s' on Tx queue %u PF# %u VF# %u\n", 6855 ice_mdd_tx_pqm_str(event), queue, pf_num, vf_num); 6856 6857 /* Only clear this event if it matches this PF, that way other 6858 * PFs can read the event and determine VF and queue number. 6859 */ 6860 if (pf_num == hw->pf_id) 6861 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 6862 6863 mdd_detected = true; 6864 } 6865 6866 reg = rd32(hw, GL_MDET_RX); 6867 if (reg & GL_MDET_RX_VALID_M) { 6868 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> GL_MDET_RX_PF_NUM_S; 6869 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> GL_MDET_RX_VF_NUM_S; 6870 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> GL_MDET_RX_MAL_TYPE_S; 6871 u16 queue = (reg & GL_MDET_RX_QNUM_M) >> GL_MDET_RX_QNUM_S; 6872 6873 device_printf(dev, "Malicious Driver Detection Rx event '%s' on Rx queue %u PF# %u VF# %u\n", 6874 ice_mdd_rx_str(event), queue, pf_num, vf_num); 6875 6876 /* Only clear this event if it matches this PF, that way other 6877 * PFs can read the event and determine VF and queue number. 6878 */ 6879 if (pf_num == hw->pf_id) 6880 wr32(hw, GL_MDET_RX, 0xffffffff); 6881 6882 mdd_detected = true; 6883 } 6884 6885 /* Now, confirm that this event actually affects this PF, by checking 6886 * the PF registers. 6887 */ 6888 if (mdd_detected) { 6889 reg = rd32(hw, PF_MDET_TX_TCLAN); 6890 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 6891 wr32(hw, PF_MDET_TX_TCLAN, 0xffff); 6892 sc->soft_stats.tx_mdd_count++; 6893 request_reinit = true; 6894 } 6895 6896 reg = rd32(hw, PF_MDET_TX_PQM); 6897 if (reg & PF_MDET_TX_PQM_VALID_M) { 6898 wr32(hw, PF_MDET_TX_PQM, 0xffff); 6899 sc->soft_stats.tx_mdd_count++; 6900 request_reinit = true; 6901 } 6902 6903 reg = rd32(hw, PF_MDET_RX); 6904 if (reg & PF_MDET_RX_VALID_M) { 6905 wr32(hw, PF_MDET_RX, 0xffff); 6906 sc->soft_stats.rx_mdd_count++; 6907 request_reinit = true; 6908 } 6909 } 6910 6911 /* TODO: Implement logic to detect and handle events caused by VFs. */ 6912 6913 /* request that the upper stack re-initialize the Tx/Rx queues */ 6914 if (request_reinit) 6915 ice_request_stack_reinit(sc); 6916 6917 ice_flush(hw); 6918 } 6919 6920 /** 6921 * ice_init_dcb_setup - Initialize DCB settings for HW 6922 * @sc: the device softc 6923 * 6924 * This needs to be called after the fw_lldp_agent sysctl is added, since that 6925 * can update the device's LLDP agent status if a tunable value is set. 6926 * 6927 * Get and store the initial state of DCB settings on driver load. Print out 6928 * informational messages as well. 6929 */ 6930 void 6931 ice_init_dcb_setup(struct ice_softc *sc) 6932 { 6933 struct ice_hw *hw = &sc->hw; 6934 device_t dev = sc->dev; 6935 bool dcbx_agent_status; 6936 enum ice_status status; 6937 6938 /* Don't do anything if DCB isn't supported */ 6939 if (!hw->func_caps.common_cap.dcb) { 6940 device_printf(dev, "%s: No DCB support\n", 6941 __func__); 6942 return; 6943 } 6944 6945 hw->port_info->qos_cfg.dcbx_status = ice_get_dcbx_status(hw); 6946 if (hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_DONE && 6947 hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) { 6948 /* 6949 * Start DCBX agent, but not LLDP. The return value isn't 6950 * checked here because a more detailed dcbx agent status is 6951 * retrieved and checked in ice_init_dcb() and below. 6952 */ 6953 ice_aq_start_stop_dcbx(hw, true, &dcbx_agent_status, NULL); 6954 } 6955 6956 /* This sets hw->port_info->qos_cfg.is_sw_lldp */ 6957 status = ice_init_dcb(hw, true); 6958 6959 /* If there is an error, then FW LLDP is not in a usable state */ 6960 if (status != 0 && status != ICE_ERR_NOT_READY) { 6961 /* Don't print an error message if the return code from the AQ 6962 * cmd performed in ice_init_dcb() is is EPERM; that means the 6963 * FW LLDP engine is disabled, and that is a valid state. 6964 */ 6965 if (!(status == ICE_ERR_AQ_ERROR && 6966 hw->adminq.sq_last_status == ICE_AQ_RC_EPERM)) { 6967 device_printf(dev, "DCB init failed, err %s aq_err %s\n", 6968 ice_status_str(status), 6969 ice_aq_str(hw->adminq.sq_last_status)); 6970 } 6971 hw->port_info->qos_cfg.dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; 6972 } 6973 6974 switch (hw->port_info->qos_cfg.dcbx_status) { 6975 case ICE_DCBX_STATUS_DIS: 6976 ice_debug(hw, ICE_DBG_DCB, "DCBX disabled\n"); 6977 break; 6978 case ICE_DCBX_STATUS_NOT_STARTED: 6979 ice_debug(hw, ICE_DBG_DCB, "DCBX not started\n"); 6980 break; 6981 case ICE_DCBX_STATUS_MULTIPLE_PEERS: 6982 ice_debug(hw, ICE_DBG_DCB, "DCBX detected multiple peers\n"); 6983 break; 6984 default: 6985 break; 6986 } 6987 6988 /* LLDP disabled in FW */ 6989 if (hw->port_info->qos_cfg.is_sw_lldp) { 6990 ice_add_rx_lldp_filter(sc); 6991 device_printf(dev, "Firmware LLDP agent disabled\n"); 6992 } 6993 } 6994 6995 /** 6996 * ice_handle_mib_change_event - helper function to log LLDP MIB change events 6997 * @sc: device softc 6998 * @event: event received on a control queue 6999 * 7000 * Prints out the type of an LLDP MIB change event in a DCB debug message. 7001 * 7002 * XXX: Should be extended to do more if the driver decides to notify other SW 7003 * of LLDP MIB changes, or needs to extract info from the MIB. 7004 */ 7005 static void 7006 ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *event) 7007 { 7008 struct ice_aqc_lldp_get_mib *params = 7009 (struct ice_aqc_lldp_get_mib *)&event->desc.params.lldp_get_mib; 7010 u8 mib_type, bridge_type, tx_status; 7011 7012 /* XXX: To get the contents of the MIB that caused the event, set the 7013 * ICE_DBG_AQ debug mask and read that output 7014 */ 7015 static const char* mib_type_strings[] = { 7016 "Local MIB", 7017 "Remote MIB", 7018 "Reserved", 7019 "Reserved" 7020 }; 7021 static const char* bridge_type_strings[] = { 7022 "Nearest Bridge", 7023 "Non-TPMR Bridge", 7024 "Reserved", 7025 "Reserved" 7026 }; 7027 static const char* tx_status_strings[] = { 7028 "Port's TX active", 7029 "Port's TX suspended and drained", 7030 "Reserved", 7031 "Port's TX suspended and srained; blocked TC pipe flushed" 7032 }; 7033 7034 mib_type = (params->type & ICE_AQ_LLDP_MIB_TYPE_M) >> 7035 ICE_AQ_LLDP_MIB_TYPE_S; 7036 bridge_type = (params->type & ICE_AQ_LLDP_BRID_TYPE_M) >> 7037 ICE_AQ_LLDP_BRID_TYPE_S; 7038 tx_status = (params->type & ICE_AQ_LLDP_TX_M) >> 7039 ICE_AQ_LLDP_TX_S; 7040 7041 ice_debug(&sc->hw, ICE_DBG_DCB, "LLDP MIB Change Event (%s, %s, %s)\n", 7042 mib_type_strings[mib_type], bridge_type_strings[bridge_type], 7043 tx_status_strings[tx_status]); 7044 } 7045 7046 /** 7047 * ice_send_version - Send driver version to firmware 7048 * @sc: the device private softc 7049 * 7050 * Send the driver version to the firmware. This must be called as early as 7051 * possible after ice_init_hw(). 7052 */ 7053 int 7054 ice_send_version(struct ice_softc *sc) 7055 { 7056 struct ice_driver_ver driver_version = {0}; 7057 struct ice_hw *hw = &sc->hw; 7058 device_t dev = sc->dev; 7059 enum ice_status status; 7060 7061 driver_version.major_ver = ice_major_version; 7062 driver_version.minor_ver = ice_minor_version; 7063 driver_version.build_ver = ice_patch_version; 7064 driver_version.subbuild_ver = ice_rc_version; 7065 7066 strlcpy((char *)driver_version.driver_string, ice_driver_version, 7067 sizeof(driver_version.driver_string)); 7068 7069 status = ice_aq_send_driver_ver(hw, &driver_version, NULL); 7070 if (status) { 7071 device_printf(dev, "Unable to send driver version to firmware, err %s aq_err %s\n", 7072 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 7073 return (EIO); 7074 } 7075 7076 return (0); 7077 } 7078 7079 /** 7080 * ice_handle_lan_overflow_event - helper function to log LAN overflow events 7081 * @sc: device softc 7082 * @event: event received on a control queue 7083 * 7084 * Prints out a message when a LAN overflow event is detected on a receive 7085 * queue. 7086 */ 7087 static void 7088 ice_handle_lan_overflow_event(struct ice_softc *sc, struct ice_rq_event_info *event) 7089 { 7090 struct ice_aqc_event_lan_overflow *params = 7091 (struct ice_aqc_event_lan_overflow *)&event->desc.params.lan_overflow; 7092 struct ice_hw *hw = &sc->hw; 7093 7094 ice_debug(hw, ICE_DBG_DCB, "LAN overflow event detected, prtdcb_ruptq=0x%08x, qtx_ctl=0x%08x\n", 7095 LE32_TO_CPU(params->prtdcb_ruptq), 7096 LE32_TO_CPU(params->qtx_ctl)); 7097 } 7098 7099 /** 7100 * ice_add_ethertype_to_list - Add an Ethertype filter to a filter list 7101 * @vsi: the VSI to target packets to 7102 * @list: the list to add the filter to 7103 * @ethertype: the Ethertype to filter on 7104 * @direction: The direction of the filter (Tx or Rx) 7105 * @action: the action to take 7106 * 7107 * Add an Ethertype filter to a filter list. Used to forward a series of 7108 * filters to the firmware for configuring the switch. 7109 * 7110 * Returns 0 on success, and an error code on failure. 7111 */ 7112 static int 7113 ice_add_ethertype_to_list(struct ice_vsi *vsi, struct ice_list_head *list, 7114 u16 ethertype, u16 direction, 7115 enum ice_sw_fwd_act_type action) 7116 { 7117 struct ice_fltr_list_entry *entry; 7118 7119 MPASS((direction == ICE_FLTR_TX) || (direction == ICE_FLTR_RX)); 7120 7121 entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO); 7122 if (!entry) 7123 return (ENOMEM); 7124 7125 entry->fltr_info.flag = direction; 7126 entry->fltr_info.src_id = ICE_SRC_ID_VSI; 7127 entry->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; 7128 entry->fltr_info.fltr_act = action; 7129 entry->fltr_info.vsi_handle = vsi->idx; 7130 entry->fltr_info.l_data.ethertype_mac.ethertype = ethertype; 7131 7132 LIST_ADD(&entry->list_entry, list); 7133 7134 return 0; 7135 } 7136 7137 #define ETHERTYPE_PAUSE_FRAMES 0x8808 7138 #define ETHERTYPE_LLDP_FRAMES 0x88cc 7139 7140 /** 7141 * ice_cfg_pf_ethertype_filters - Configure switch to drop ethertypes 7142 * @sc: the device private softc 7143 * 7144 * Configure the switch to drop PAUSE frames and LLDP frames transmitted from 7145 * the host. This prevents malicious VFs from sending these frames and being 7146 * able to control or configure the network. 7147 */ 7148 int 7149 ice_cfg_pf_ethertype_filters(struct ice_softc *sc) 7150 { 7151 struct ice_list_head ethertype_list; 7152 struct ice_vsi *vsi = &sc->pf_vsi; 7153 struct ice_hw *hw = &sc->hw; 7154 device_t dev = sc->dev; 7155 enum ice_status status; 7156 int err = 0; 7157 7158 INIT_LIST_HEAD(ðertype_list); 7159 7160 /* 7161 * Note that the switch filters will ignore the VSI index for the drop 7162 * action, so we only need to program drop filters once for the main 7163 * VSI. 7164 */ 7165 7166 /* Configure switch to drop all Tx pause frames coming from any VSI. */ 7167 if (sc->enable_tx_fc_filter) { 7168 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7169 ETHERTYPE_PAUSE_FRAMES, 7170 ICE_FLTR_TX, ICE_DROP_PACKET); 7171 if (err) 7172 goto free_ethertype_list; 7173 } 7174 7175 /* Configure switch to drop LLDP frames coming from any VSI */ 7176 if (sc->enable_tx_lldp_filter) { 7177 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7178 ETHERTYPE_LLDP_FRAMES, 7179 ICE_FLTR_TX, ICE_DROP_PACKET); 7180 if (err) 7181 goto free_ethertype_list; 7182 } 7183 7184 status = ice_add_eth_mac(hw, ðertype_list); 7185 if (status) { 7186 device_printf(dev, 7187 "Failed to add Tx Ethertype filters, err %s aq_err %s\n", 7188 ice_status_str(status), 7189 ice_aq_str(hw->adminq.sq_last_status)); 7190 err = (EIO); 7191 } 7192 7193 free_ethertype_list: 7194 ice_free_fltr_list(ðertype_list); 7195 return err; 7196 } 7197 7198 /** 7199 * ice_add_rx_lldp_filter - add ethertype filter for Rx LLDP frames 7200 * @sc: the device private structure 7201 * 7202 * Add a switch ethertype filter which forwards the LLDP frames to the main PF 7203 * VSI. Called when the fw_lldp_agent is disabled, to allow the LLDP frames to 7204 * be forwarded to the stack. 7205 */ 7206 static void 7207 ice_add_rx_lldp_filter(struct ice_softc *sc) 7208 { 7209 struct ice_list_head ethertype_list; 7210 struct ice_vsi *vsi = &sc->pf_vsi; 7211 struct ice_hw *hw = &sc->hw; 7212 device_t dev = sc->dev; 7213 enum ice_status status; 7214 int err; 7215 u16 vsi_num; 7216 7217 /* 7218 * If FW is new enough, use a direct AQ command to perform the filter 7219 * addition. 7220 */ 7221 if (ice_fw_supports_lldp_fltr_ctrl(hw)) { 7222 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); 7223 status = ice_lldp_fltr_add_remove(hw, vsi_num, true); 7224 if (status) { 7225 device_printf(dev, 7226 "Failed to add Rx LLDP filter, err %s aq_err %s\n", 7227 ice_status_str(status), 7228 ice_aq_str(hw->adminq.sq_last_status)); 7229 } else 7230 ice_set_state(&sc->state, 7231 ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER); 7232 return; 7233 } 7234 7235 INIT_LIST_HEAD(ðertype_list); 7236 7237 /* Forward Rx LLDP frames to the stack */ 7238 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7239 ETHERTYPE_LLDP_FRAMES, 7240 ICE_FLTR_RX, ICE_FWD_TO_VSI); 7241 if (err) { 7242 device_printf(dev, 7243 "Failed to add Rx LLDP filter, err %s\n", 7244 ice_err_str(err)); 7245 goto free_ethertype_list; 7246 } 7247 7248 status = ice_add_eth_mac(hw, ðertype_list); 7249 if (status && status != ICE_ERR_ALREADY_EXISTS) { 7250 device_printf(dev, 7251 "Failed to add Rx LLDP filter, err %s aq_err %s\n", 7252 ice_status_str(status), 7253 ice_aq_str(hw->adminq.sq_last_status)); 7254 } else { 7255 /* 7256 * If status == ICE_ERR_ALREADY_EXISTS, we won't treat an 7257 * already existing filter as an error case. 7258 */ 7259 ice_set_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER); 7260 } 7261 7262 free_ethertype_list: 7263 ice_free_fltr_list(ðertype_list); 7264 } 7265 7266 /** 7267 * ice_del_rx_lldp_filter - Remove ethertype filter for Rx LLDP frames 7268 * @sc: the device private structure 7269 * 7270 * Remove the switch filter forwarding LLDP frames to the main PF VSI, called 7271 * when the firmware LLDP agent is enabled, to stop routing LLDP frames to the 7272 * stack. 7273 */ 7274 static void 7275 ice_del_rx_lldp_filter(struct ice_softc *sc) 7276 { 7277 struct ice_list_head ethertype_list; 7278 struct ice_vsi *vsi = &sc->pf_vsi; 7279 struct ice_hw *hw = &sc->hw; 7280 device_t dev = sc->dev; 7281 enum ice_status status; 7282 int err; 7283 u16 vsi_num; 7284 7285 /* 7286 * Only in the scenario where the driver added the filter during 7287 * this session (while the driver was loaded) would we be able to 7288 * delete this filter. 7289 */ 7290 if (!ice_test_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER)) 7291 return; 7292 7293 /* 7294 * If FW is new enough, use a direct AQ command to perform the filter 7295 * removal. 7296 */ 7297 if (ice_fw_supports_lldp_fltr_ctrl(hw)) { 7298 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); 7299 status = ice_lldp_fltr_add_remove(hw, vsi_num, false); 7300 if (status) { 7301 device_printf(dev, 7302 "Failed to remove Rx LLDP filter, err %s aq_err %s\n", 7303 ice_status_str(status), 7304 ice_aq_str(hw->adminq.sq_last_status)); 7305 } 7306 return; 7307 } 7308 7309 INIT_LIST_HEAD(ðertype_list); 7310 7311 /* Remove filter forwarding Rx LLDP frames to the stack */ 7312 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7313 ETHERTYPE_LLDP_FRAMES, 7314 ICE_FLTR_RX, ICE_FWD_TO_VSI); 7315 if (err) { 7316 device_printf(dev, 7317 "Failed to remove Rx LLDP filter, err %s\n", 7318 ice_err_str(err)); 7319 goto free_ethertype_list; 7320 } 7321 7322 status = ice_remove_eth_mac(hw, ðertype_list); 7323 if (status == ICE_ERR_DOES_NOT_EXIST) { 7324 ; /* Don't complain if we try to remove a filter that doesn't exist */ 7325 } else if (status) { 7326 device_printf(dev, 7327 "Failed to remove Rx LLDP filter, err %s aq_err %s\n", 7328 ice_status_str(status), 7329 ice_aq_str(hw->adminq.sq_last_status)); 7330 } 7331 7332 free_ethertype_list: 7333 ice_free_fltr_list(ðertype_list); 7334 } 7335 7336 /** 7337 * ice_init_link_configuration -- Setup link in different ways depending 7338 * on whether media is available or not. 7339 * @sc: device private structure 7340 * 7341 * Called at the end of the attach process to either set default link 7342 * parameters if there is media available, or force HW link down and 7343 * set a state bit if there is no media. 7344 */ 7345 void 7346 ice_init_link_configuration(struct ice_softc *sc) 7347 { 7348 struct ice_port_info *pi = sc->hw.port_info; 7349 struct ice_hw *hw = &sc->hw; 7350 device_t dev = sc->dev; 7351 enum ice_status status; 7352 7353 pi->phy.get_link_info = true; 7354 status = ice_get_link_status(pi, &sc->link_up); 7355 if (status != ICE_SUCCESS) { 7356 device_printf(dev, 7357 "%s: ice_get_link_status failed; status %s, aq_err %s\n", 7358 __func__, ice_status_str(status), 7359 ice_aq_str(hw->adminq.sq_last_status)); 7360 return; 7361 } 7362 7363 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 7364 ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA); 7365 /* Apply default link settings */ 7366 ice_apply_saved_phy_cfg(sc); 7367 } else { 7368 /* Set link down, and poll for media available in timer. This prevents the 7369 * driver from receiving spurious link-related events. 7370 */ 7371 ice_set_state(&sc->state, ICE_STATE_NO_MEDIA); 7372 status = ice_aq_set_link_restart_an(pi, false, NULL); 7373 if (status != ICE_SUCCESS) 7374 device_printf(dev, 7375 "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", 7376 __func__, ice_status_str(status), 7377 ice_aq_str(hw->adminq.sq_last_status)); 7378 } 7379 } 7380 7381 /** 7382 * ice_apply_saved_phy_req_to_cfg -- Write saved user PHY settings to cfg data 7383 * @pi: port info struct 7384 * @pcaps: TOPO_CAPS capability data to use for defaults 7385 * @cfg: new PHY config data to be modified 7386 * 7387 * Applies user settings for advertised speeds to the PHY type fields in the 7388 * supplied PHY config struct. It uses the data from pcaps to check if the 7389 * saved settings are invalid and uses the pcaps data instead if they are 7390 * invalid. 7391 */ 7392 static void 7393 ice_apply_saved_phy_req_to_cfg(struct ice_port_info *pi, 7394 struct ice_aqc_get_phy_caps_data *pcaps, 7395 struct ice_aqc_set_phy_cfg_data *cfg) 7396 { 7397 u64 phy_low = 0, phy_high = 0; 7398 7399 ice_update_phy_type(&phy_low, &phy_high, pi->phy.curr_user_speed_req); 7400 cfg->phy_type_low = pcaps->phy_type_low & htole64(phy_low); 7401 cfg->phy_type_high = pcaps->phy_type_high & htole64(phy_high); 7402 7403 /* Can't use saved user speed request; use NVM default PHY capabilities */ 7404 if (!cfg->phy_type_low && !cfg->phy_type_high) { 7405 cfg->phy_type_low = pcaps->phy_type_low; 7406 cfg->phy_type_high = pcaps->phy_type_high; 7407 } 7408 } 7409 7410 /** 7411 * ice_apply_saved_fec_req_to_cfg -- Write saved user FEC mode to cfg data 7412 * @pi: port info struct 7413 * @pcaps: TOPO_CAPS capability data to use for defaults 7414 * @cfg: new PHY config data to be modified 7415 * 7416 * Applies user setting for FEC mode to PHY config struct. It uses the data 7417 * from pcaps to check if the saved settings are invalid and uses the pcaps 7418 * data instead if they are invalid. 7419 */ 7420 static void 7421 ice_apply_saved_fec_req_to_cfg(struct ice_port_info *pi, 7422 struct ice_aqc_get_phy_caps_data *pcaps, 7423 struct ice_aqc_set_phy_cfg_data *cfg) 7424 { 7425 ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); 7426 7427 /* Can't use saved user FEC mode; use NVM default PHY capabilities */ 7428 if (cfg->link_fec_opt && 7429 !(cfg->link_fec_opt & pcaps->link_fec_options)) { 7430 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 7431 cfg->link_fec_opt = pcaps->link_fec_options; 7432 } 7433 } 7434 7435 /** 7436 * ice_apply_saved_fc_req_to_cfg -- Write saved user flow control mode to cfg data 7437 * @pi: port info struct 7438 * @cfg: new PHY config data to be modified 7439 * 7440 * Applies user setting for flow control mode to PHY config struct. There are 7441 * no invalid flow control mode settings; if there are, then this function 7442 * treats them like "ICE_FC_NONE". 7443 */ 7444 static void 7445 ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, 7446 struct ice_aqc_set_phy_cfg_data *cfg) 7447 { 7448 cfg->caps &= ~(ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | 7449 ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY); 7450 7451 switch (pi->phy.curr_user_fc_req) { 7452 case ICE_FC_FULL: 7453 cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | 7454 ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; 7455 break; 7456 case ICE_FC_RX_PAUSE: 7457 cfg->caps |= ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; 7458 break; 7459 case ICE_FC_TX_PAUSE: 7460 cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY; 7461 break; 7462 default: 7463 /* ICE_FC_NONE */ 7464 break; 7465 } 7466 } 7467 7468 /** 7469 * ice_apply_saved_user_req_to_cfg -- Apply all saved user settings to AQ cfg data 7470 * @pi: port info struct 7471 * @pcaps: TOPO_CAPS capability data to use for defaults 7472 * @cfg: new PHY config data to be modified 7473 * 7474 * Applies user settings for advertised speeds, FEC mode, and flow control 7475 * mode to the supplied PHY config struct; it uses the data from pcaps to check 7476 * if the saved settings are invalid and uses the pcaps data instead if they 7477 * are invalid. 7478 */ 7479 static void 7480 ice_apply_saved_user_req_to_cfg(struct ice_port_info *pi, 7481 struct ice_aqc_get_phy_caps_data *pcaps, 7482 struct ice_aqc_set_phy_cfg_data *cfg) 7483 { 7484 ice_apply_saved_phy_req_to_cfg(pi, pcaps, cfg); 7485 ice_apply_saved_fec_req_to_cfg(pi, pcaps, cfg); 7486 ice_apply_saved_fc_req_to_cfg(pi, cfg); 7487 } 7488 7489 /** 7490 * ice_apply_saved_phy_cfg -- Re-apply user PHY config settings 7491 * @sc: device private structure 7492 * 7493 * Takes the saved user PHY config settings, overwrites the NVM 7494 * default with them if they're valid, and uses the Set PHY Config AQ command 7495 * to apply them. 7496 * 7497 * Intended for use when media is inserted. 7498 * 7499 * @pre Port has media available 7500 */ 7501 void 7502 ice_apply_saved_phy_cfg(struct ice_softc *sc) 7503 { 7504 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 7505 struct ice_port_info *pi = sc->hw.port_info; 7506 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 7507 struct ice_hw *hw = &sc->hw; 7508 device_t dev = sc->dev; 7509 enum ice_status status; 7510 7511 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 7512 &pcaps, NULL); 7513 if (status != ICE_SUCCESS) { 7514 device_printf(dev, 7515 "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", 7516 __func__, ice_status_str(status), 7517 ice_aq_str(hw->adminq.sq_last_status)); 7518 return; 7519 } 7520 7521 /* Setup new PHY config */ 7522 ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); 7523 7524 /* Apply settings requested by user */ 7525 ice_apply_saved_user_req_to_cfg(pi, &pcaps, &cfg); 7526 7527 /* Enable link and re-negotiate it */ 7528 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 7529 7530 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 7531 if (status != ICE_SUCCESS) { 7532 if ((status == ICE_ERR_AQ_ERROR) && 7533 (hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)) 7534 device_printf(dev, 7535 "%s: User PHY cfg not applied; no media in port\n", 7536 __func__); 7537 else 7538 device_printf(dev, 7539 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 7540 __func__, ice_status_str(status), 7541 ice_aq_str(hw->adminq.sq_last_status)); 7542 } 7543 } 7544 7545 /** 7546 * ice_print_ldo_tlv - Print out LDO TLV information 7547 * @sc: device private structure 7548 * @tlv: LDO TLV information from the adapter NVM 7549 * 7550 * Dump out the information in tlv to the kernel message buffer; intended for 7551 * debugging purposes. 7552 */ 7553 static void 7554 ice_print_ldo_tlv(struct ice_softc *sc, struct ice_link_default_override_tlv *tlv) 7555 { 7556 device_t dev = sc->dev; 7557 7558 device_printf(dev, "TLV: -options 0x%02x\n", tlv->options); 7559 device_printf(dev, " -phy_config 0x%02x\n", tlv->phy_config); 7560 device_printf(dev, " -fec_options 0x%02x\n", tlv->fec_options); 7561 device_printf(dev, " -phy_high 0x%016llx\n", 7562 (unsigned long long)tlv->phy_type_high); 7563 device_printf(dev, " -phy_low 0x%016llx\n", 7564 (unsigned long long)tlv->phy_type_low); 7565 } 7566 7567 /** 7568 * ice_set_link_management_mode -- Strict or lenient link management 7569 * @sc: device private structure 7570 * 7571 * Some NVMs give the adapter the option to advertise a superset of link 7572 * configurations. This checks to see if that option is enabled. 7573 * Further, the NVM could also provide a specific set of configurations 7574 * to try; these are cached in the driver's private structure if they 7575 * are available. 7576 */ 7577 void 7578 ice_set_link_management_mode(struct ice_softc *sc) 7579 { 7580 struct ice_port_info *pi = sc->hw.port_info; 7581 device_t dev = sc->dev; 7582 struct ice_link_default_override_tlv tlv = { 0 }; 7583 enum ice_status status; 7584 7585 /* Port must be in strict mode if FW version is below a certain 7586 * version. (i.e. Don't set lenient mode features) 7587 */ 7588 if (!(ice_fw_supports_link_override(&sc->hw))) 7589 return; 7590 7591 status = ice_get_link_default_override(&tlv, pi); 7592 if (status != ICE_SUCCESS) { 7593 device_printf(dev, 7594 "%s: ice_get_link_default_override failed; status %s, aq_err %s\n", 7595 __func__, ice_status_str(status), 7596 ice_aq_str(sc->hw.adminq.sq_last_status)); 7597 return; 7598 } 7599 7600 if (sc->hw.debug_mask & ICE_DBG_LINK) 7601 ice_print_ldo_tlv(sc, &tlv); 7602 7603 /* Set lenient link mode */ 7604 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LENIENT_LINK_MODE) && 7605 (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE))) 7606 ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_en); 7607 7608 /* Default overrides only work if in lenient link mode */ 7609 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_DEFAULT_OVERRIDE) && 7610 ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE) && 7611 (tlv.options & ICE_LINK_OVERRIDE_EN)) 7612 ice_set_bit(ICE_FEATURE_DEFAULT_OVERRIDE, sc->feat_en); 7613 7614 /* Cache the LDO TLV structure in the driver, since it won't change 7615 * during the driver's lifetime. 7616 */ 7617 sc->ldo_tlv = tlv; 7618 } 7619 7620 /** 7621 * ice_init_saved_phy_cfg -- Set cached user PHY cfg settings with NVM defaults 7622 * @sc: device private structure 7623 * 7624 * This should be called before the tunables for these link settings 7625 * (e.g. advertise_speed) are added -- so that these defaults don't overwrite 7626 * the cached values that the sysctl handlers will write. 7627 * 7628 * This also needs to be called before ice_init_link_configuration, to ensure 7629 * that there are sane values that can be written if there is media available 7630 * in the port. 7631 */ 7632 void 7633 ice_init_saved_phy_cfg(struct ice_softc *sc) 7634 { 7635 struct ice_port_info *pi = sc->hw.port_info; 7636 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 7637 struct ice_hw *hw = &sc->hw; 7638 device_t dev = sc->dev; 7639 enum ice_status status; 7640 u64 phy_low, phy_high; 7641 7642 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 7643 &pcaps, NULL); 7644 if (status != ICE_SUCCESS) { 7645 device_printf(dev, 7646 "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", 7647 __func__, ice_status_str(status), 7648 ice_aq_str(hw->adminq.sq_last_status)); 7649 return; 7650 } 7651 7652 phy_low = le64toh(pcaps.phy_type_low); 7653 phy_high = le64toh(pcaps.phy_type_high); 7654 7655 /* Save off initial config parameters */ 7656 pi->phy.curr_user_speed_req = 7657 ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); 7658 pi->phy.curr_user_fec_req = ice_caps_to_fec_mode(pcaps.caps, 7659 pcaps.link_fec_options); 7660 pi->phy.curr_user_fc_req = ice_caps_to_fc_mode(pcaps.caps); 7661 } 7662 7663 /** 7664 * ice_module_init - Driver callback to handle module load 7665 * 7666 * Callback for handling module load events. This function should initialize 7667 * any data structures that are used for the life of the device driver. 7668 */ 7669 static int 7670 ice_module_init(void) 7671 { 7672 return (0); 7673 } 7674 7675 /** 7676 * ice_module_exit - Driver callback to handle module exit 7677 * 7678 * Callback for handling module unload events. This function should release 7679 * any resources initialized during ice_module_init. 7680 * 7681 * If this function returns non-zero, the module will not be unloaded. It 7682 * should only return such a value if the module cannot be unloaded at all, 7683 * such as due to outstanding memory references that cannot be revoked. 7684 */ 7685 static int 7686 ice_module_exit(void) 7687 { 7688 return (0); 7689 } 7690 7691 /** 7692 * ice_module_event_handler - Callback for module events 7693 * @mod: unused module_t parameter 7694 * @what: the event requested 7695 * @arg: unused event argument 7696 * 7697 * Callback used to handle module events from the stack. Used to allow the 7698 * driver to define custom behavior that should happen at module load and 7699 * unload. 7700 */ 7701 int 7702 ice_module_event_handler(module_t __unused mod, int what, void __unused *arg) 7703 { 7704 switch (what) { 7705 case MOD_LOAD: 7706 return ice_module_init(); 7707 case MOD_UNLOAD: 7708 return ice_module_exit(); 7709 default: 7710 /* TODO: do we need to handle MOD_QUIESCE and MOD_SHUTDOWN? */ 7711 return (EOPNOTSUPP); 7712 } 7713 } 7714 7715 /** 7716 * ice_handle_nvm_access_ioctl - Handle an NVM access ioctl request 7717 * @sc: the device private softc 7718 * @ifd: ifdrv ioctl request pointer 7719 */ 7720 int 7721 ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd) 7722 { 7723 union ice_nvm_access_data *data; 7724 struct ice_nvm_access_cmd *cmd; 7725 size_t ifd_len = ifd->ifd_len, malloc_len; 7726 struct ice_hw *hw = &sc->hw; 7727 device_t dev = sc->dev; 7728 enum ice_status status; 7729 u8 *nvm_buffer; 7730 int err; 7731 7732 /* 7733 * ifioctl forwards SIOCxDRVSPEC to iflib without performing 7734 * a privilege check. In turn, iflib forwards the ioctl to the driver 7735 * without performing a privilege check. Perform one here to ensure 7736 * that non-privileged threads cannot access this interface. 7737 */ 7738 err = priv_check(curthread, PRIV_DRIVER); 7739 if (err) 7740 return (err); 7741 7742 if (ifd_len < sizeof(struct ice_nvm_access_cmd)) { 7743 device_printf(dev, "%s: ifdrv length is too small. Got %zu, but expected %zu\n", 7744 __func__, ifd_len, sizeof(struct ice_nvm_access_cmd)); 7745 return (EINVAL); 7746 } 7747 7748 if (ifd->ifd_data == NULL) { 7749 device_printf(dev, "%s: ifd data buffer not present.\n", 7750 __func__); 7751 return (EINVAL); 7752 } 7753 7754 /* 7755 * If everything works correctly, ice_handle_nvm_access should not 7756 * modify data past the size of the ioctl length. However, it could 7757 * lead to memory corruption if it did. Make sure to allocate at least 7758 * enough space for the command and data regardless. This 7759 * ensures that any access to the data union will not access invalid 7760 * memory. 7761 */ 7762 malloc_len = max(ifd_len, sizeof(*data) + sizeof(*cmd)); 7763 7764 nvm_buffer = (u8 *)malloc(malloc_len, M_ICE, M_ZERO | M_WAITOK); 7765 if (!nvm_buffer) 7766 return (ENOMEM); 7767 7768 /* Copy the NVM access command and data in from user space */ 7769 /* coverity[tainted_data_argument] */ 7770 err = copyin(ifd->ifd_data, nvm_buffer, ifd_len); 7771 if (err) { 7772 device_printf(dev, "%s: Copying request from user space failed, err %s\n", 7773 __func__, ice_err_str(err)); 7774 goto cleanup_free_nvm_buffer; 7775 } 7776 7777 /* 7778 * The NVM command structure is immediately followed by data which 7779 * varies in size based on the command. 7780 */ 7781 cmd = (struct ice_nvm_access_cmd *)nvm_buffer; 7782 data = (union ice_nvm_access_data *)(nvm_buffer + sizeof(struct ice_nvm_access_cmd)); 7783 7784 /* Handle the NVM access request */ 7785 status = ice_handle_nvm_access(hw, cmd, data); 7786 if (status) 7787 ice_debug(hw, ICE_DBG_NVM, 7788 "NVM access request failed, err %s\n", 7789 ice_status_str(status)); 7790 7791 /* Copy the possibly modified contents of the handled request out */ 7792 err = copyout(nvm_buffer, ifd->ifd_data, ifd_len); 7793 if (err) { 7794 device_printf(dev, "%s: Copying response back to user space failed, err %s\n", 7795 __func__, ice_err_str(err)); 7796 goto cleanup_free_nvm_buffer; 7797 } 7798 7799 /* Convert private status to an error code for proper ioctl response */ 7800 switch (status) { 7801 case ICE_SUCCESS: 7802 err = (0); 7803 break; 7804 case ICE_ERR_NO_MEMORY: 7805 err = (ENOMEM); 7806 break; 7807 case ICE_ERR_OUT_OF_RANGE: 7808 err = (ENOTTY); 7809 break; 7810 case ICE_ERR_PARAM: 7811 default: 7812 err = (EINVAL); 7813 break; 7814 } 7815 7816 cleanup_free_nvm_buffer: 7817 free(nvm_buffer, M_ICE); 7818 return err; 7819 } 7820 7821 /** 7822 * ice_read_sff_eeprom - Read data from SFF eeprom 7823 * @sc: device softc 7824 * @dev_addr: I2C device address (typically 0xA0 or 0xA2) 7825 * @offset: offset into the eeprom 7826 * @data: pointer to data buffer to store read data in 7827 * @length: length to read; max length is 16 7828 * 7829 * Read from the SFF eeprom in the module for this PF's port. For more details 7830 * on the contents of an SFF eeprom, refer to SFF-8724 (SFP), SFF-8636 (QSFP), 7831 * and SFF-8024 (both). 7832 */ 7833 int 7834 ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length) 7835 { 7836 struct ice_hw *hw = &sc->hw; 7837 int error = 0, retries = 0; 7838 enum ice_status status; 7839 7840 if (length > 16) 7841 return (EINVAL); 7842 7843 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) 7844 return (ENOSYS); 7845 7846 if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) 7847 return (ENXIO); 7848 7849 do { 7850 status = ice_aq_sff_eeprom(hw, 0, dev_addr, 7851 offset, 0, 0, data, length, 7852 false, NULL); 7853 if (!status) { 7854 error = 0; 7855 break; 7856 } 7857 if (status == ICE_ERR_AQ_ERROR && 7858 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 7859 error = EBUSY; 7860 continue; 7861 } 7862 if (status == ICE_ERR_AQ_ERROR && 7863 hw->adminq.sq_last_status == ICE_AQ_RC_EACCES) { 7864 /* FW says I2C access isn't supported */ 7865 error = EACCES; 7866 break; 7867 } 7868 if (status == ICE_ERR_AQ_ERROR && 7869 hw->adminq.sq_last_status == ICE_AQ_RC_EPERM) { 7870 device_printf(sc->dev, 7871 "%s: Module pointer location specified in command does not permit the required operation.\n", 7872 __func__); 7873 error = EPERM; 7874 break; 7875 } else { 7876 device_printf(sc->dev, 7877 "%s: Error reading I2C data: err %s aq_err %s\n", 7878 __func__, ice_status_str(status), 7879 ice_aq_str(hw->adminq.sq_last_status)); 7880 error = EIO; 7881 break; 7882 } 7883 } while (retries++ < ICE_I2C_MAX_RETRIES); 7884 7885 if (error == EBUSY) 7886 device_printf(sc->dev, 7887 "%s: Error reading I2C data after %d retries\n", 7888 __func__, ICE_I2C_MAX_RETRIES); 7889 7890 return (error); 7891 } 7892 7893 /** 7894 * ice_handle_i2c_req - Driver independent I2C request handler 7895 * @sc: device softc 7896 * @req: The I2C parameters to use 7897 * 7898 * Read from the port's I2C eeprom using the parameters from the ioctl. 7899 */ 7900 int 7901 ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req) 7902 { 7903 return ice_read_sff_eeprom(sc, req->dev_addr, req->offset, req->data, req->len); 7904 } 7905 7906 /** 7907 * ice_sysctl_read_i2c_diag_data - Read some module diagnostic data via i2c 7908 * @oidp: sysctl oid structure 7909 * @arg1: pointer to private data structure 7910 * @arg2: unused 7911 * @req: sysctl request pointer 7912 * 7913 * Read 8 bytes of diagnostic data from the SFF eeprom in the (Q)SFP module 7914 * inserted into the port. 7915 * 7916 * | SFP A2 | QSFP Lower Page 7917 * ------------|---------|---------------- 7918 * Temperature | 96-97 | 22-23 7919 * Vcc | 98-99 | 26-27 7920 * TX power | 102-103 | 34-35..40-41 7921 * RX power | 104-105 | 50-51..56-57 7922 */ 7923 static int 7924 ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 7925 { 7926 struct ice_softc *sc = (struct ice_softc *)arg1; 7927 device_t dev = sc->dev; 7928 struct sbuf *sbuf; 7929 int error = 0; 7930 u8 data[16]; 7931 7932 UNREFERENCED_PARAMETER(arg2); 7933 UNREFERENCED_PARAMETER(oidp); 7934 7935 if (ice_driver_is_detaching(sc)) 7936 return (ESHUTDOWN); 7937 7938 if (req->oldptr == NULL) { 7939 error = SYSCTL_OUT(req, 0, 128); 7940 return (error); 7941 } 7942 7943 error = ice_read_sff_eeprom(sc, 0xA0, 0, data, 1); 7944 if (error) 7945 return (error); 7946 7947 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 7948 if (data[0] == 0x3) { 7949 /* 7950 * Check for: 7951 * - Internally calibrated data 7952 * - Diagnostic monitoring is implemented 7953 */ 7954 ice_read_sff_eeprom(sc, 0xA0, 92, data, 1); 7955 if (!(data[0] & 0x60)) { 7956 device_printf(dev, "Module doesn't support diagnostics: 0xA0[92] = %02X\n", data[0]); 7957 return (ENODEV); 7958 } 7959 7960 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 7961 7962 ice_read_sff_eeprom(sc, 0xA2, 96, data, 4); 7963 for (int i = 0; i < 4; i++) 7964 sbuf_printf(sbuf, "%02X ", data[i]); 7965 7966 ice_read_sff_eeprom(sc, 0xA2, 102, data, 4); 7967 for (int i = 0; i < 4; i++) 7968 sbuf_printf(sbuf, "%02X ", data[i]); 7969 } else if (data[0] == 0xD || data[0] == 0x11) { 7970 /* 7971 * QSFP+ modules are always internally calibrated, and must indicate 7972 * what types of diagnostic monitoring are implemented 7973 */ 7974 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 7975 7976 ice_read_sff_eeprom(sc, 0xA0, 22, data, 2); 7977 for (int i = 0; i < 2; i++) 7978 sbuf_printf(sbuf, "%02X ", data[i]); 7979 7980 ice_read_sff_eeprom(sc, 0xA0, 26, data, 2); 7981 for (int i = 0; i < 2; i++) 7982 sbuf_printf(sbuf, "%02X ", data[i]); 7983 7984 ice_read_sff_eeprom(sc, 0xA0, 34, data, 2); 7985 for (int i = 0; i < 2; i++) 7986 sbuf_printf(sbuf, "%02X ", data[i]); 7987 7988 ice_read_sff_eeprom(sc, 0xA0, 50, data, 2); 7989 for (int i = 0; i < 2; i++) 7990 sbuf_printf(sbuf, "%02X ", data[i]); 7991 } else { 7992 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", data[0]); 7993 return (ENODEV); 7994 } 7995 7996 sbuf_finish(sbuf); 7997 sbuf_delete(sbuf); 7998 7999 return (0); 8000 } 8001 8002 /** 8003 * ice_alloc_intr_tracking - Setup interrupt tracking structures 8004 * @sc: device softc structure 8005 * 8006 * Sets up the resource manager for keeping track of interrupt allocations, 8007 * and initializes the tracking maps for the PF's interrupt allocations. 8008 * 8009 * Unlike the scheme for queues, this is done in one step since both the 8010 * manager and the maps both have the same lifetime. 8011 * 8012 * @returns 0 on success, or an error code on failure. 8013 */ 8014 int 8015 ice_alloc_intr_tracking(struct ice_softc *sc) 8016 { 8017 struct ice_hw *hw = &sc->hw; 8018 device_t dev = sc->dev; 8019 int err; 8020 8021 /* Initialize the interrupt allocation manager */ 8022 err = ice_resmgr_init_contig_only(&sc->imgr, 8023 hw->func_caps.common_cap.num_msix_vectors); 8024 if (err) { 8025 device_printf(dev, "Unable to initialize PF interrupt manager: %s\n", 8026 ice_err_str(err)); 8027 return (err); 8028 } 8029 8030 /* Allocate PF interrupt mapping storage */ 8031 if (!(sc->pf_imap = 8032 (u16 *)malloc(sizeof(u16) * hw->func_caps.common_cap.num_msix_vectors, 8033 M_ICE, M_NOWAIT))) { 8034 device_printf(dev, "Unable to allocate PF imap memory\n"); 8035 err = ENOMEM; 8036 goto free_imgr; 8037 } 8038 for (u32 i = 0; i < hw->func_caps.common_cap.num_msix_vectors; i++) { 8039 sc->pf_imap[i] = ICE_INVALID_RES_IDX; 8040 } 8041 8042 return (0); 8043 8044 free_imgr: 8045 ice_resmgr_destroy(&sc->imgr); 8046 return (err); 8047 } 8048 8049 /** 8050 * ice_free_intr_tracking - Free PF interrupt tracking structures 8051 * @sc: device softc structure 8052 * 8053 * Frees the interrupt resource allocation manager and the PF's owned maps. 8054 * 8055 * VF maps are released when the owning VF's are destroyed, which should always 8056 * happen before this function is called. 8057 */ 8058 void 8059 ice_free_intr_tracking(struct ice_softc *sc) 8060 { 8061 if (sc->pf_imap) { 8062 ice_resmgr_release_map(&sc->imgr, sc->pf_imap, 8063 sc->lan_vectors); 8064 free(sc->pf_imap, M_ICE); 8065 sc->pf_imap = NULL; 8066 } 8067 8068 ice_resmgr_destroy(&sc->imgr); 8069 } 8070 8071 /** 8072 * ice_apply_supported_speed_filter - Mask off unsupported speeds 8073 * @phy_type_low: bit-field for the low quad word of PHY types 8074 * @phy_type_high: bit-field for the high quad word of PHY types 8075 * 8076 * Given the two quad words containing the supported PHY types, 8077 * this function will mask off the speeds that are not currently 8078 * supported by the device. 8079 */ 8080 static void 8081 ice_apply_supported_speed_filter(u64 *phy_type_low, u64 *phy_type_high) 8082 { 8083 u64 phylow_mask; 8084 8085 /* We won't offer anything lower than 1G for any part, 8086 * but we also won't offer anything under 25G for 100G 8087 * parts. 8088 */ 8089 phylow_mask = ~(ICE_PHY_TYPE_LOW_1000BASE_T - 1); 8090 if (*phy_type_high || 8091 *phy_type_low & ~(ICE_PHY_TYPE_LOW_100GBASE_CR4 - 1)) 8092 phylow_mask = ~(ICE_PHY_TYPE_LOW_25GBASE_T - 1); 8093 *phy_type_low &= phylow_mask; 8094 } 8095 8096 /** 8097 * ice_get_phy_types - Report appropriate PHY types 8098 * @sc: device softc structure 8099 * @phy_type_low: bit-field for the low quad word of PHY types 8100 * @phy_type_high: bit-field for the high quad word of PHY types 8101 * 8102 * Populate the two quad words with bits representing the PHY types 8103 * supported by the device. This is really just a wrapper around 8104 * the ice_aq_get_phy_caps() that chooses the appropriate report 8105 * mode (lenient or strict) and reports back only the relevant PHY 8106 * types. In lenient mode the capabilities are retrieved with the 8107 * NVM_CAP report mode, otherwise they're retrieved using the 8108 * TOPO_CAP report mode (NVM intersected with current media). 8109 * 8110 * @returns 0 on success, or an error code on failure. 8111 */ 8112 static enum ice_status 8113 ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high) 8114 { 8115 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 8116 struct ice_port_info *pi = sc->hw.port_info; 8117 device_t dev = sc->dev; 8118 enum ice_status status; 8119 u8 report_mode; 8120 8121 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) 8122 report_mode = ICE_AQC_REPORT_NVM_CAP; 8123 else 8124 report_mode = ICE_AQC_REPORT_TOPO_CAP; 8125 status = ice_aq_get_phy_caps(pi, false, report_mode, &pcaps, NULL); 8126 if (status != ICE_SUCCESS) { 8127 device_printf(dev, 8128 "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", 8129 __func__, (report_mode) ? "TOPO_CAP" : "NVM_CAP", 8130 ice_status_str(status), 8131 ice_aq_str(sc->hw.adminq.sq_last_status)); 8132 return (status); 8133 } 8134 8135 *phy_type_low = le64toh(pcaps.phy_type_low); 8136 *phy_type_high = le64toh(pcaps.phy_type_high); 8137 8138 return (ICE_SUCCESS); 8139 } 8140 8141 /** 8142 * ice_set_default_local_lldp_mib - Set Local LLDP MIB to default settings 8143 * @sc: device softc structure 8144 * 8145 * This function needs to be called after link up; it makes sure the FW 8146 * has certain PFC/DCB settings. This is intended to workaround a FW behavior 8147 * where these settings seem to be cleared on link up. 8148 */ 8149 void 8150 ice_set_default_local_lldp_mib(struct ice_softc *sc) 8151 { 8152 struct ice_dcbx_cfg *dcbcfg; 8153 struct ice_hw *hw = &sc->hw; 8154 struct ice_port_info *pi; 8155 device_t dev = sc->dev; 8156 enum ice_status status; 8157 8158 pi = hw->port_info; 8159 dcbcfg = &pi->qos_cfg.local_dcbx_cfg; 8160 8161 /* This value is only 3 bits; 8 TCs maps to 0 */ 8162 u8 maxtcs = hw->func_caps.common_cap.maxtc & ICE_IEEE_ETS_MAXTC_M; 8163 8164 /** 8165 * Setup the default settings used by the driver for the Set Local 8166 * LLDP MIB Admin Queue command (0x0A08). (1TC w/ 100% BW, ETS, no 8167 * PFC). 8168 */ 8169 memset(dcbcfg, 0, sizeof(*dcbcfg)); 8170 dcbcfg->etscfg.willing = 1; 8171 dcbcfg->etscfg.tcbwtable[0] = 100; 8172 dcbcfg->etscfg.maxtcs = maxtcs; 8173 dcbcfg->etsrec.willing = 1; 8174 dcbcfg->etsrec.tcbwtable[0] = 100; 8175 dcbcfg->etsrec.maxtcs = maxtcs; 8176 dcbcfg->pfc.willing = 1; 8177 dcbcfg->pfc.pfccap = maxtcs; 8178 8179 status = ice_set_dcb_cfg(pi); 8180 8181 if (status) 8182 device_printf(dev, 8183 "Error setting Local LLDP MIB: %s aq_err %s\n", 8184 ice_status_str(status), 8185 ice_aq_str(hw->adminq.sq_last_status)); 8186 } 8187