1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2020, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 /** 34 * @file ice_lib.c 35 * @brief Generic device setup and sysctl functions 36 * 37 * Library of generic device functions not specific to the networking stack. 38 * 39 * This includes hardware initialization functions, as well as handlers for 40 * many of the device sysctls used to probe driver status or tune specific 41 * behaviors. 42 */ 43 44 #include "ice_lib.h" 45 #include "ice_iflib.h" 46 #include <dev/pci/pcivar.h> 47 #include <dev/pci/pcireg.h> 48 #include <machine/resource.h> 49 #include <net/if_dl.h> 50 #include <sys/firmware.h> 51 #include <sys/priv.h> 52 53 /** 54 * @var M_ICE 55 * @brief main ice driver allocation type 56 * 57 * malloc(9) allocation type used by the majority of memory allocations in the 58 * ice driver. 59 */ 60 MALLOC_DEFINE(M_ICE, "ice", "Intel(R) 100Gb Network Driver lib allocations"); 61 62 /* 63 * Helper function prototypes 64 */ 65 static int ice_get_next_vsi(struct ice_vsi **all_vsi, int size); 66 static void ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx); 67 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type); 68 static int ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx); 69 static int ice_setup_tx_ctx(struct ice_tx_queue *txq, 70 struct ice_tlan_ctx *tlan_ctx, u16 pf_q); 71 static int ice_setup_rx_ctx(struct ice_rx_queue *rxq); 72 static int ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg); 73 static void ice_free_fltr_list(struct ice_list_head *list); 74 static int ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list, 75 const u8 *addr, enum ice_sw_fwd_act_type action); 76 static void ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname, 77 struct ice_ctl_q_info *cq); 78 static void ice_process_link_event(struct ice_softc *sc, struct ice_rq_event_info *e); 79 static void ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, 80 struct ice_rq_event_info *event); 81 static void ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf); 82 static void ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf); 83 static void ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf); 84 static bool ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info); 85 static u_int ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, u_int errors); 86 static void ice_add_debug_tunables(struct ice_softc *sc); 87 static void ice_add_debug_sysctls(struct ice_softc *sc); 88 static void ice_vsi_set_rss_params(struct ice_vsi *vsi); 89 static void ice_get_default_rss_key(u8 *seed); 90 static int ice_set_rss_key(struct ice_vsi *vsi); 91 static int ice_set_rss_lut(struct ice_vsi *vsi); 92 static void ice_set_rss_flow_flds(struct ice_vsi *vsi); 93 static void ice_clean_vsi_rss_cfg(struct ice_vsi *vsi); 94 static const char *ice_aq_speed_to_str(struct ice_port_info *pi); 95 static const char *ice_requested_fec_mode(struct ice_port_info *pi); 96 static const char *ice_negotiated_fec_mode(struct ice_port_info *pi); 97 static const char *ice_autoneg_mode(struct ice_port_info *pi); 98 static const char *ice_flowcontrol_mode(struct ice_port_info *pi); 99 static void ice_print_bus_link_data(device_t dev, struct ice_hw *hw); 100 static void ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status); 101 static uint8_t ice_pcie_bandwidth_check(struct ice_softc *sc); 102 static uint64_t ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed); 103 static int ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width); 104 static uint64_t ice_phy_types_to_max_rate(struct ice_port_info *pi); 105 static void ice_add_sysctls_sw_stats(struct ice_vsi *vsi, 106 struct sysctl_ctx_list *ctx, 107 struct sysctl_oid *parent); 108 static void ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi, 109 enum ice_vsi_type type, int idx, 110 bool dynamic); 111 static void ice_handle_mib_change_event(struct ice_softc *sc, 112 struct ice_rq_event_info *event); 113 static void 114 ice_handle_lan_overflow_event(struct ice_softc *sc, 115 struct ice_rq_event_info *event); 116 static int ice_add_ethertype_to_list(struct ice_vsi *vsi, 117 struct ice_list_head *list, 118 u16 ethertype, u16 direction, 119 enum ice_sw_fwd_act_type action); 120 static void ice_add_rx_lldp_filter(struct ice_softc *sc); 121 static void ice_del_rx_lldp_filter(struct ice_softc *sc); 122 static u16 ice_aq_phy_types_to_sysctl_speeds(u64 phy_type_low, 123 u64 phy_type_high); 124 static void 125 ice_apply_saved_phy_req_to_cfg(struct ice_port_info *pi, 126 struct ice_aqc_get_phy_caps_data *pcaps, 127 struct ice_aqc_set_phy_cfg_data *cfg); 128 static void 129 ice_apply_saved_fec_req_to_cfg(struct ice_port_info *pi, 130 struct ice_aqc_get_phy_caps_data *pcaps, 131 struct ice_aqc_set_phy_cfg_data *cfg); 132 static void 133 ice_apply_saved_user_req_to_cfg(struct ice_port_info *pi, 134 struct ice_aqc_get_phy_caps_data *pcaps, 135 struct ice_aqc_set_phy_cfg_data *cfg); 136 static void 137 ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, 138 struct ice_aqc_set_phy_cfg_data *cfg); 139 static void 140 ice_print_ldo_tlv(struct ice_softc *sc, 141 struct ice_link_default_override_tlv *tlv); 142 static void 143 ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, 144 u64 *phy_type_high); 145 static int 146 ice_intersect_media_types_with_caps(struct ice_softc *sc, u16 sysctl_speeds, 147 u64 *phy_type_low, u64 *phy_type_high); 148 static int 149 ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low, 150 u64 *phy_type_high); 151 static void 152 ice_apply_supported_speed_filter(u64 *phy_type_low, u64 *phy_type_high); 153 static enum ice_status 154 ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high); 155 156 static int ice_module_init(void); 157 static int ice_module_exit(void); 158 159 /* 160 * package version comparison functions 161 */ 162 static bool pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name); 163 static int pkg_ver_compatible(struct ice_pkg_ver *pkg_ver); 164 165 /* 166 * dynamic sysctl handlers 167 */ 168 static int ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 169 static int ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS); 170 static int ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS); 171 static int ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS); 172 static int ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS); 173 static int ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS); 174 static int ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS); 175 static int ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 176 static int ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS); 177 static int ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS); 178 static int ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS); 179 static int ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS); 180 static int ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS); 181 static int ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS); 182 static int ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS); 183 static int ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS); 184 static int __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, 185 bool is_phy_type_high); 186 static int ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS); 187 static int ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS); 188 static int ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS); 189 static int ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS); 190 static int ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS); 191 static int ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS); 192 static int ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode); 193 static int ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS); 194 static int ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS); 195 static int ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS); 196 static int ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS); 197 static int ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 198 static int ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS); 199 static int ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS); 200 static int ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS); 201 202 /** 203 * ice_map_bar - Map PCIe BAR memory 204 * @dev: the PCIe device 205 * @bar: the BAR info structure 206 * @bar_num: PCIe BAR number 207 * 208 * Maps the specified PCIe BAR. Stores the mapping data in struct 209 * ice_bar_info. 210 */ 211 int 212 ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num) 213 { 214 if (bar->res != NULL) { 215 device_printf(dev, "PCI BAR%d already mapped\n", bar_num); 216 return (EDOOFUS); 217 } 218 219 bar->rid = PCIR_BAR(bar_num); 220 bar->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar->rid, 221 RF_ACTIVE); 222 if (!bar->res) { 223 device_printf(dev, "PCI BAR%d mapping failed\n", bar_num); 224 return (ENXIO); 225 } 226 227 bar->tag = rman_get_bustag(bar->res); 228 bar->handle = rman_get_bushandle(bar->res); 229 bar->size = rman_get_size(bar->res); 230 231 return (0); 232 } 233 234 /** 235 * ice_free_bar - Free PCIe BAR memory 236 * @dev: the PCIe device 237 * @bar: the BAR info structure 238 * 239 * Frees the specified PCIe BAR, releasing its resources. 240 */ 241 void 242 ice_free_bar(device_t dev, struct ice_bar_info *bar) 243 { 244 if (bar->res != NULL) 245 bus_release_resource(dev, SYS_RES_MEMORY, bar->rid, bar->res); 246 bar->res = NULL; 247 } 248 249 /** 250 * ice_set_ctrlq_len - Configure ctrlq lengths for a device 251 * @hw: the device hardware structure 252 * 253 * Configures the control queues for the given device, setting up the 254 * specified lengths, prior to initializing hardware. 255 */ 256 void 257 ice_set_ctrlq_len(struct ice_hw *hw) 258 { 259 hw->adminq.num_rq_entries = ICE_AQ_LEN; 260 hw->adminq.num_sq_entries = ICE_AQ_LEN; 261 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 262 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 263 264 hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; 265 hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; 266 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 267 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 268 269 } 270 271 /** 272 * ice_get_next_vsi - Get the next available VSI slot 273 * @all_vsi: the VSI list 274 * @size: the size of the VSI list 275 * 276 * Returns the index to the first available VSI slot. Will return size (one 277 * past the last index) if there are no slots available. 278 */ 279 static int 280 ice_get_next_vsi(struct ice_vsi **all_vsi, int size) 281 { 282 int i; 283 284 for (i = 0; i < size; i++) { 285 if (all_vsi[i] == NULL) 286 return i; 287 } 288 289 return size; 290 } 291 292 /** 293 * ice_setup_vsi_common - Common VSI setup for both dynamic and static VSIs 294 * @sc: the device private softc structure 295 * @vsi: the VSI to setup 296 * @type: the VSI type of the new VSI 297 * @idx: the index in the all_vsi array to use 298 * @dynamic: whether this VSI memory was dynamically allocated 299 * 300 * Perform setup for a VSI that is common to both dynamically allocated VSIs 301 * and the static PF VSI which is embedded in the softc structure. 302 */ 303 static void 304 ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi, 305 enum ice_vsi_type type, int idx, bool dynamic) 306 { 307 /* Store important values in VSI struct */ 308 vsi->type = type; 309 vsi->sc = sc; 310 vsi->idx = idx; 311 sc->all_vsi[idx] = vsi; 312 vsi->dynamic = dynamic; 313 314 /* Setup the VSI tunables now */ 315 ice_add_vsi_tunables(vsi, sc->vsi_sysctls); 316 } 317 318 /** 319 * ice_alloc_vsi - Allocate a dynamic VSI 320 * @sc: device softc structure 321 * @type: VSI type 322 * 323 * Allocates a new dynamic VSI structure and inserts it into the VSI list. 324 */ 325 struct ice_vsi * 326 ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type) 327 { 328 struct ice_vsi *vsi; 329 int idx; 330 331 /* Find an open index for a new VSI to be allocated. If the returned 332 * index is >= the num_available_vsi then it means no slot is 333 * available. 334 */ 335 idx = ice_get_next_vsi(sc->all_vsi, sc->num_available_vsi); 336 if (idx >= sc->num_available_vsi) { 337 device_printf(sc->dev, "No available VSI slots\n"); 338 return NULL; 339 } 340 341 vsi = (struct ice_vsi *)malloc(sizeof(*vsi), M_ICE, M_WAITOK|M_ZERO); 342 if (!vsi) { 343 device_printf(sc->dev, "Unable to allocate VSI memory\n"); 344 return NULL; 345 } 346 347 ice_setup_vsi_common(sc, vsi, type, idx, true); 348 349 return vsi; 350 } 351 352 /** 353 * ice_setup_pf_vsi - Setup the PF VSI 354 * @sc: the device private softc 355 * 356 * Setup the PF VSI structure which is embedded as sc->pf_vsi in the device 357 * private softc. Unlike other VSIs, the PF VSI memory is allocated as part of 358 * the softc memory, instead of being dynamically allocated at creation. 359 */ 360 void 361 ice_setup_pf_vsi(struct ice_softc *sc) 362 { 363 ice_setup_vsi_common(sc, &sc->pf_vsi, ICE_VSI_PF, 0, false); 364 } 365 366 /** 367 * ice_alloc_vsi_qmap 368 * @vsi: VSI structure 369 * @max_tx_queues: Number of transmit queues to identify 370 * @max_rx_queues: Number of receive queues to identify 371 * 372 * Allocates a max_[t|r]x_queues array of words for the VSI where each 373 * word contains the index of the queue it represents. In here, all 374 * words are initialized to an index of ICE_INVALID_RES_IDX, indicating 375 * all queues for this VSI are not yet assigned an index and thus, 376 * not ready for use. 377 * 378 * Returns an error code on failure. 379 */ 380 int 381 ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues, 382 const int max_rx_queues) 383 { 384 struct ice_softc *sc = vsi->sc; 385 int i; 386 387 MPASS(max_tx_queues > 0); 388 MPASS(max_rx_queues > 0); 389 390 /* Allocate Tx queue mapping memory */ 391 if (!(vsi->tx_qmap = 392 (u16 *) malloc(sizeof(u16) * max_tx_queues, M_ICE, M_WAITOK))) { 393 device_printf(sc->dev, "Unable to allocate Tx qmap memory\n"); 394 return (ENOMEM); 395 } 396 397 /* Allocate Rx queue mapping memory */ 398 if (!(vsi->rx_qmap = 399 (u16 *) malloc(sizeof(u16) * max_rx_queues, M_ICE, M_WAITOK))) { 400 device_printf(sc->dev, "Unable to allocate Rx qmap memory\n"); 401 goto free_tx_qmap; 402 } 403 404 /* Mark every queue map as invalid to start with */ 405 for (i = 0; i < max_tx_queues; i++) { 406 vsi->tx_qmap[i] = ICE_INVALID_RES_IDX; 407 } 408 for (i = 0; i < max_rx_queues; i++) { 409 vsi->rx_qmap[i] = ICE_INVALID_RES_IDX; 410 } 411 412 return 0; 413 414 free_tx_qmap: 415 free(vsi->tx_qmap, M_ICE); 416 vsi->tx_qmap = NULL; 417 418 return (ENOMEM); 419 } 420 421 /** 422 * ice_free_vsi_qmaps - Free the PF qmaps associated with a VSI 423 * @vsi: the VSI private structure 424 * 425 * Frees the PF qmaps associated with the given VSI. Generally this will be 426 * called by ice_release_vsi, but may need to be called during attach cleanup, 427 * depending on when the qmaps were allocated. 428 */ 429 void 430 ice_free_vsi_qmaps(struct ice_vsi *vsi) 431 { 432 struct ice_softc *sc = vsi->sc; 433 434 if (vsi->tx_qmap) { 435 ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, 436 vsi->num_tx_queues); 437 free(vsi->tx_qmap, M_ICE); 438 vsi->tx_qmap = NULL; 439 } 440 441 if (vsi->rx_qmap) { 442 ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap, 443 vsi->num_rx_queues); 444 free(vsi->rx_qmap, M_ICE); 445 vsi->rx_qmap = NULL; 446 } 447 } 448 449 /** 450 * ice_set_default_vsi_ctx - Setup default VSI context parameters 451 * @ctx: the VSI context to initialize 452 * 453 * Initialize and prepare a default VSI context for configuring a new VSI. 454 */ 455 static void 456 ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx) 457 { 458 u32 table = 0; 459 460 memset(&ctx->info, 0, sizeof(ctx->info)); 461 /* VSI will be allocated from shared pool */ 462 ctx->alloc_from_pool = true; 463 /* Enable source pruning by default */ 464 ctx->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 465 /* Traffic from VSI can be sent to LAN */ 466 ctx->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 467 /* Allow all packets untagged/tagged */ 468 ctx->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & 469 ICE_AQ_VSI_VLAN_MODE_M) >> 470 ICE_AQ_VSI_VLAN_MODE_S); 471 /* Show VLAN/UP from packets in Rx descriptors */ 472 ctx->info.vlan_flags |= ((ICE_AQ_VSI_VLAN_EMOD_STR_BOTH & 473 ICE_AQ_VSI_VLAN_EMOD_M) >> 474 ICE_AQ_VSI_VLAN_EMOD_S); 475 /* Have 1:1 UP mapping for both ingress/egress tables */ 476 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 477 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 478 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 479 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 480 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 481 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 482 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 483 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 484 ctx->info.ingress_table = CPU_TO_LE32(table); 485 ctx->info.egress_table = CPU_TO_LE32(table); 486 /* Have 1:1 UP mapping for outer to inner UP table */ 487 ctx->info.outer_up_table = CPU_TO_LE32(table); 488 /* No Outer tag support, so outer_tag_flags remains zero */ 489 } 490 491 /** 492 * ice_set_rss_vsi_ctx - Setup VSI context parameters for RSS 493 * @ctx: the VSI context to configure 494 * @type: the VSI type 495 * 496 * Configures the VSI context for RSS, based on the VSI type. 497 */ 498 static void 499 ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type) 500 { 501 u8 lut_type, hash_type; 502 503 switch (type) { 504 case ICE_VSI_PF: 505 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 506 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 507 break; 508 case ICE_VSI_VF: 509 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 510 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 511 break; 512 default: 513 /* Other VSI types do not support RSS */ 514 return; 515 } 516 517 ctx->info.q_opt_rss = (((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 518 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 519 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & 520 ICE_AQ_VSI_Q_OPT_RSS_HASH_M)); 521 } 522 523 /** 524 * ice_setup_vsi_qmap - Setup the queue mapping for a VSI 525 * @vsi: the VSI to configure 526 * @ctx: the VSI context to configure 527 * 528 * Configures the context for the given VSI, setting up how the firmware 529 * should map the queues for this VSI. 530 */ 531 static int 532 ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 533 { 534 int pow = 0; 535 u16 qmap; 536 537 MPASS(vsi->rx_qmap != NULL); 538 539 /* TODO: 540 * Handle multiple Traffic Classes 541 * Handle scattered queues (for VFs) 542 */ 543 if (vsi->qmap_type != ICE_RESMGR_ALLOC_CONTIGUOUS) 544 return (EOPNOTSUPP); 545 546 ctx->info.mapping_flags |= CPU_TO_LE16(ICE_AQ_VSI_Q_MAP_CONTIG); 547 548 ctx->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]); 549 ctx->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues); 550 551 552 /* Calculate the next power-of-2 of number of queues */ 553 if (vsi->num_rx_queues) 554 pow = flsl(vsi->num_rx_queues - 1); 555 556 /* Assign all the queues to traffic class zero */ 557 qmap = (pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M; 558 ctx->info.tc_mapping[0] = CPU_TO_LE16(qmap); 559 560 return 0; 561 } 562 563 /** 564 * ice_initialize_vsi - Initialize a VSI for use 565 * @vsi: the vsi to initialize 566 * 567 * Initialize a VSI over the adminq and prepare it for operation. 568 */ 569 int 570 ice_initialize_vsi(struct ice_vsi *vsi) 571 { 572 struct ice_vsi_ctx ctx = { 0 }; 573 struct ice_hw *hw = &vsi->sc->hw; 574 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 575 enum ice_status status; 576 int err; 577 578 /* For now, we only have code supporting PF VSIs */ 579 switch (vsi->type) { 580 case ICE_VSI_PF: 581 ctx.flags = ICE_AQ_VSI_TYPE_PF; 582 break; 583 default: 584 return (ENODEV); 585 } 586 587 ice_set_default_vsi_ctx(&ctx); 588 ice_set_rss_vsi_ctx(&ctx, vsi->type); 589 590 /* XXX: VSIs of other types may need different port info? */ 591 ctx.info.sw_id = hw->port_info->sw_id; 592 593 /* Set some RSS parameters based on the VSI type */ 594 ice_vsi_set_rss_params(vsi); 595 596 /* Initialize the Rx queue mapping for this VSI */ 597 err = ice_setup_vsi_qmap(vsi, &ctx); 598 if (err) { 599 return err; 600 } 601 602 /* (Re-)add VSI to HW VSI handle list */ 603 status = ice_add_vsi(hw, vsi->idx, &ctx, NULL); 604 if (status != 0) { 605 device_printf(vsi->sc->dev, 606 "Add VSI AQ call failed, err %s aq_err %s\n", 607 ice_status_str(status), 608 ice_aq_str(hw->adminq.sq_last_status)); 609 return (EIO); 610 } 611 vsi->info = ctx.info; 612 613 /* TODO: DCB traffic class support? */ 614 max_txqs[0] = vsi->num_tx_queues; 615 616 status = ice_cfg_vsi_lan(hw->port_info, vsi->idx, 617 ICE_DFLT_TRAFFIC_CLASS, max_txqs); 618 if (status) { 619 device_printf(vsi->sc->dev, 620 "Failed VSI lan queue config, err %s aq_err %s\n", 621 ice_status_str(status), 622 ice_aq_str(hw->adminq.sq_last_status)); 623 ice_deinit_vsi(vsi); 624 return (ENODEV); 625 } 626 627 /* Reset VSI stats */ 628 ice_reset_vsi_stats(vsi); 629 630 return 0; 631 } 632 633 /** 634 * ice_deinit_vsi - Tell firmware to release resources for a VSI 635 * @vsi: the VSI to release 636 * 637 * Helper function which requests the firmware to release the hardware 638 * resources associated with a given VSI. 639 */ 640 void 641 ice_deinit_vsi(struct ice_vsi *vsi) 642 { 643 struct ice_vsi_ctx ctx = { 0 }; 644 struct ice_softc *sc = vsi->sc; 645 struct ice_hw *hw = &sc->hw; 646 enum ice_status status; 647 648 /* Assert that the VSI pointer matches in the list */ 649 MPASS(vsi == sc->all_vsi[vsi->idx]); 650 651 ctx.info = vsi->info; 652 653 status = ice_rm_vsi_lan_cfg(hw->port_info, vsi->idx); 654 if (status) { 655 /* 656 * This should only fail if the VSI handle is invalid, or if 657 * any of the nodes have leaf nodes which are still in use. 658 */ 659 device_printf(sc->dev, 660 "Unable to remove scheduler nodes for VSI %d, err %s\n", 661 vsi->idx, ice_status_str(status)); 662 } 663 664 /* Tell firmware to release the VSI resources */ 665 status = ice_free_vsi(hw, vsi->idx, &ctx, false, NULL); 666 if (status != 0) { 667 device_printf(sc->dev, 668 "Free VSI %u AQ call failed, err %s aq_err %s\n", 669 vsi->idx, ice_status_str(status), 670 ice_aq_str(hw->adminq.sq_last_status)); 671 } 672 } 673 674 /** 675 * ice_release_vsi - Release resources associated with a VSI 676 * @vsi: the VSI to release 677 * 678 * Release software and firmware resources associated with a VSI. Release the 679 * queue managers associated with this VSI. Also free the VSI structure memory 680 * if the VSI was allocated dynamically using ice_alloc_vsi(). 681 */ 682 void 683 ice_release_vsi(struct ice_vsi *vsi) 684 { 685 struct ice_softc *sc = vsi->sc; 686 int idx = vsi->idx; 687 688 /* Assert that the VSI pointer matches in the list */ 689 MPASS(vsi == sc->all_vsi[idx]); 690 691 /* Cleanup RSS configuration */ 692 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS)) 693 ice_clean_vsi_rss_cfg(vsi); 694 695 ice_del_vsi_sysctl_ctx(vsi); 696 697 ice_deinit_vsi(vsi); 698 699 ice_free_vsi_qmaps(vsi); 700 701 if (vsi->dynamic) { 702 free(sc->all_vsi[idx], M_ICE); 703 } 704 705 sc->all_vsi[idx] = NULL; 706 } 707 708 /** 709 * ice_aq_speed_to_rate - Convert AdminQ speed enum to baudrate 710 * @pi: port info data 711 * 712 * Returns the baudrate value for the current link speed of a given port. 713 */ 714 uint64_t 715 ice_aq_speed_to_rate(struct ice_port_info *pi) 716 { 717 switch (pi->phy.link_info.link_speed) { 718 case ICE_AQ_LINK_SPEED_100GB: 719 return IF_Gbps(100); 720 case ICE_AQ_LINK_SPEED_50GB: 721 return IF_Gbps(50); 722 case ICE_AQ_LINK_SPEED_40GB: 723 return IF_Gbps(40); 724 case ICE_AQ_LINK_SPEED_25GB: 725 return IF_Gbps(25); 726 case ICE_AQ_LINK_SPEED_10GB: 727 return IF_Gbps(10); 728 case ICE_AQ_LINK_SPEED_5GB: 729 return IF_Gbps(5); 730 case ICE_AQ_LINK_SPEED_2500MB: 731 return IF_Mbps(2500); 732 case ICE_AQ_LINK_SPEED_1000MB: 733 return IF_Mbps(1000); 734 case ICE_AQ_LINK_SPEED_100MB: 735 return IF_Mbps(100); 736 case ICE_AQ_LINK_SPEED_10MB: 737 return IF_Mbps(10); 738 case ICE_AQ_LINK_SPEED_UNKNOWN: 739 default: 740 /* return 0 if we don't know the link speed */ 741 return 0; 742 } 743 } 744 745 /** 746 * ice_aq_speed_to_str - Convert AdminQ speed enum to string representation 747 * @pi: port info data 748 * 749 * Returns the string representation of the current link speed for a given 750 * port. 751 */ 752 static const char * 753 ice_aq_speed_to_str(struct ice_port_info *pi) 754 { 755 switch (pi->phy.link_info.link_speed) { 756 case ICE_AQ_LINK_SPEED_100GB: 757 return "100 Gbps"; 758 case ICE_AQ_LINK_SPEED_50GB: 759 return "50 Gbps"; 760 case ICE_AQ_LINK_SPEED_40GB: 761 return "40 Gbps"; 762 case ICE_AQ_LINK_SPEED_25GB: 763 return "25 Gbps"; 764 case ICE_AQ_LINK_SPEED_20GB: 765 return "20 Gbps"; 766 case ICE_AQ_LINK_SPEED_10GB: 767 return "10 Gbps"; 768 case ICE_AQ_LINK_SPEED_5GB: 769 return "5 Gbps"; 770 case ICE_AQ_LINK_SPEED_2500MB: 771 return "2.5 Gbps"; 772 case ICE_AQ_LINK_SPEED_1000MB: 773 return "1 Gbps"; 774 case ICE_AQ_LINK_SPEED_100MB: 775 return "100 Mbps"; 776 case ICE_AQ_LINK_SPEED_10MB: 777 return "10 Mbps"; 778 case ICE_AQ_LINK_SPEED_UNKNOWN: 779 default: 780 return "Unknown speed"; 781 } 782 } 783 784 /** 785 * ice_get_phy_type_low - Get media associated with phy_type_low 786 * @phy_type_low: the low 64bits of phy_type from the AdminQ 787 * 788 * Given the lower 64bits of the phy_type from the hardware, return the 789 * ifm_active bit associated. Return IFM_UNKNOWN when phy_type_low is unknown. 790 * Note that only one of ice_get_phy_type_low or ice_get_phy_type_high should 791 * be called. If phy_type_low is zero, call ice_phy_type_high. 792 */ 793 int 794 ice_get_phy_type_low(uint64_t phy_type_low) 795 { 796 switch (phy_type_low) { 797 case ICE_PHY_TYPE_LOW_100BASE_TX: 798 return IFM_100_TX; 799 case ICE_PHY_TYPE_LOW_100M_SGMII: 800 return IFM_100_SGMII; 801 case ICE_PHY_TYPE_LOW_1000BASE_T: 802 return IFM_1000_T; 803 case ICE_PHY_TYPE_LOW_1000BASE_SX: 804 return IFM_1000_SX; 805 case ICE_PHY_TYPE_LOW_1000BASE_LX: 806 return IFM_1000_LX; 807 case ICE_PHY_TYPE_LOW_1000BASE_KX: 808 return IFM_1000_KX; 809 case ICE_PHY_TYPE_LOW_1G_SGMII: 810 return IFM_1000_SGMII; 811 case ICE_PHY_TYPE_LOW_2500BASE_T: 812 return IFM_2500_T; 813 case ICE_PHY_TYPE_LOW_2500BASE_X: 814 return IFM_2500_X; 815 case ICE_PHY_TYPE_LOW_2500BASE_KX: 816 return IFM_2500_KX; 817 case ICE_PHY_TYPE_LOW_5GBASE_T: 818 return IFM_5000_T; 819 case ICE_PHY_TYPE_LOW_5GBASE_KR: 820 return IFM_5000_KR; 821 case ICE_PHY_TYPE_LOW_10GBASE_T: 822 return IFM_10G_T; 823 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 824 return IFM_10G_TWINAX; 825 case ICE_PHY_TYPE_LOW_10GBASE_SR: 826 return IFM_10G_SR; 827 case ICE_PHY_TYPE_LOW_10GBASE_LR: 828 return IFM_10G_LR; 829 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 830 return IFM_10G_KR; 831 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 832 return IFM_10G_AOC; 833 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 834 return IFM_10G_SFI; 835 case ICE_PHY_TYPE_LOW_25GBASE_T: 836 return IFM_25G_T; 837 case ICE_PHY_TYPE_LOW_25GBASE_CR: 838 return IFM_25G_CR; 839 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 840 return IFM_25G_CR_S; 841 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 842 return IFM_25G_CR1; 843 case ICE_PHY_TYPE_LOW_25GBASE_SR: 844 return IFM_25G_SR; 845 case ICE_PHY_TYPE_LOW_25GBASE_LR: 846 return IFM_25G_LR; 847 case ICE_PHY_TYPE_LOW_25GBASE_KR: 848 return IFM_25G_KR; 849 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 850 return IFM_25G_KR_S; 851 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 852 return IFM_25G_KR1; 853 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 854 return IFM_25G_AOC; 855 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 856 return IFM_25G_AUI; 857 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 858 return IFM_40G_CR4; 859 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 860 return IFM_40G_SR4; 861 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 862 return IFM_40G_LR4; 863 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 864 return IFM_40G_KR4; 865 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 866 return IFM_40G_XLAUI_AC; 867 case ICE_PHY_TYPE_LOW_40G_XLAUI: 868 return IFM_40G_XLAUI; 869 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 870 return IFM_50G_CR2; 871 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 872 return IFM_50G_SR2; 873 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 874 return IFM_50G_LR2; 875 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 876 return IFM_50G_KR2; 877 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 878 return IFM_50G_LAUI2_AC; 879 case ICE_PHY_TYPE_LOW_50G_LAUI2: 880 return IFM_50G_LAUI2; 881 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 882 return IFM_50G_AUI2_AC; 883 case ICE_PHY_TYPE_LOW_50G_AUI2: 884 return IFM_50G_AUI2; 885 case ICE_PHY_TYPE_LOW_50GBASE_CP: 886 return IFM_50G_CP; 887 case ICE_PHY_TYPE_LOW_50GBASE_SR: 888 return IFM_50G_SR; 889 case ICE_PHY_TYPE_LOW_50GBASE_FR: 890 return IFM_50G_FR; 891 case ICE_PHY_TYPE_LOW_50GBASE_LR: 892 return IFM_50G_LR; 893 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 894 return IFM_50G_KR_PAM4; 895 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 896 return IFM_50G_AUI1_AC; 897 case ICE_PHY_TYPE_LOW_50G_AUI1: 898 return IFM_50G_AUI1; 899 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 900 return IFM_100G_CR4; 901 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 902 return IFM_100G_SR4; 903 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 904 return IFM_100G_LR4; 905 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 906 return IFM_100G_KR4; 907 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 908 return IFM_100G_CAUI4_AC; 909 case ICE_PHY_TYPE_LOW_100G_CAUI4: 910 return IFM_100G_CAUI4; 911 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 912 return IFM_100G_AUI4_AC; 913 case ICE_PHY_TYPE_LOW_100G_AUI4: 914 return IFM_100G_AUI4; 915 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 916 return IFM_100G_CR_PAM4; 917 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 918 return IFM_100G_KR_PAM4; 919 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 920 return IFM_100G_CP2; 921 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 922 return IFM_100G_SR2; 923 case ICE_PHY_TYPE_LOW_100GBASE_DR: 924 return IFM_100G_DR; 925 default: 926 return IFM_UNKNOWN; 927 } 928 } 929 930 /** 931 * ice_get_phy_type_high - Get media associated with phy_type_high 932 * @phy_type_high: the upper 64bits of phy_type from the AdminQ 933 * 934 * Given the upper 64bits of the phy_type from the hardware, return the 935 * ifm_active bit associated. Return IFM_UNKNOWN on an unknown value. Note 936 * that only one of ice_get_phy_type_low or ice_get_phy_type_high should be 937 * called. If phy_type_high is zero, call ice_get_phy_type_low. 938 */ 939 int 940 ice_get_phy_type_high(uint64_t phy_type_high) 941 { 942 switch (phy_type_high) { 943 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 944 return IFM_100G_KR2_PAM4; 945 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 946 return IFM_100G_CAUI2_AC; 947 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 948 return IFM_100G_CAUI2; 949 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 950 return IFM_100G_AUI2_AC; 951 case ICE_PHY_TYPE_HIGH_100G_AUI2: 952 return IFM_100G_AUI2; 953 default: 954 return IFM_UNKNOWN; 955 } 956 } 957 958 /** 959 * ice_phy_types_to_max_rate - Returns port's max supported baudrate 960 * @pi: port info struct 961 * 962 * ice_aq_get_phy_caps() w/ ICE_AQC_REPORT_TOPO_CAP parameter needs to have 963 * been called before this function for it to work. 964 */ 965 static uint64_t 966 ice_phy_types_to_max_rate(struct ice_port_info *pi) 967 { 968 uint64_t phy_low = pi->phy.phy_type_low; 969 uint64_t phy_high = pi->phy.phy_type_high; 970 uint64_t max_rate = 0; 971 int bit; 972 973 /* 974 * These are based on the indices used in the BIT() macros for 975 * ICE_PHY_TYPE_LOW_* 976 */ 977 static const uint64_t phy_rates[] = { 978 IF_Mbps(100), 979 IF_Mbps(100), 980 IF_Gbps(1ULL), 981 IF_Gbps(1ULL), 982 IF_Gbps(1ULL), 983 IF_Gbps(1ULL), 984 IF_Gbps(1ULL), 985 IF_Mbps(2500ULL), 986 IF_Mbps(2500ULL), 987 IF_Mbps(2500ULL), 988 IF_Gbps(5ULL), 989 IF_Gbps(5ULL), 990 IF_Gbps(10ULL), 991 IF_Gbps(10ULL), 992 IF_Gbps(10ULL), 993 IF_Gbps(10ULL), 994 IF_Gbps(10ULL), 995 IF_Gbps(10ULL), 996 IF_Gbps(10ULL), 997 IF_Gbps(25ULL), 998 IF_Gbps(25ULL), 999 IF_Gbps(25ULL), 1000 IF_Gbps(25ULL), 1001 IF_Gbps(25ULL), 1002 IF_Gbps(25ULL), 1003 IF_Gbps(25ULL), 1004 IF_Gbps(25ULL), 1005 IF_Gbps(25ULL), 1006 IF_Gbps(25ULL), 1007 IF_Gbps(25ULL), 1008 IF_Gbps(40ULL), 1009 IF_Gbps(40ULL), 1010 IF_Gbps(40ULL), 1011 IF_Gbps(40ULL), 1012 IF_Gbps(40ULL), 1013 IF_Gbps(40ULL), 1014 IF_Gbps(50ULL), 1015 IF_Gbps(50ULL), 1016 IF_Gbps(50ULL), 1017 IF_Gbps(50ULL), 1018 IF_Gbps(50ULL), 1019 IF_Gbps(50ULL), 1020 IF_Gbps(50ULL), 1021 IF_Gbps(50ULL), 1022 IF_Gbps(50ULL), 1023 IF_Gbps(50ULL), 1024 IF_Gbps(50ULL), 1025 IF_Gbps(50ULL), 1026 IF_Gbps(50ULL), 1027 IF_Gbps(50ULL), 1028 IF_Gbps(50ULL), 1029 IF_Gbps(100ULL), 1030 IF_Gbps(100ULL), 1031 IF_Gbps(100ULL), 1032 IF_Gbps(100ULL), 1033 IF_Gbps(100ULL), 1034 IF_Gbps(100ULL), 1035 IF_Gbps(100ULL), 1036 IF_Gbps(100ULL), 1037 IF_Gbps(100ULL), 1038 IF_Gbps(100ULL), 1039 IF_Gbps(100ULL), 1040 IF_Gbps(100ULL), 1041 IF_Gbps(100ULL), 1042 /* These rates are for ICE_PHY_TYPE_HIGH_* */ 1043 IF_Gbps(100ULL), 1044 IF_Gbps(100ULL), 1045 IF_Gbps(100ULL), 1046 IF_Gbps(100ULL), 1047 IF_Gbps(100ULL) 1048 }; 1049 1050 /* coverity[address_of] */ 1051 for_each_set_bit(bit, &phy_high, 64) 1052 if ((bit + 64) < (int)ARRAY_SIZE(phy_rates)) 1053 max_rate = uqmax(max_rate, phy_rates[(bit + 64)]); 1054 1055 /* coverity[address_of] */ 1056 for_each_set_bit(bit, &phy_low, 64) 1057 max_rate = uqmax(max_rate, phy_rates[bit]); 1058 1059 return (max_rate); 1060 } 1061 1062 /* The if_media type is split over the original 5 bit media variant field, 1063 * along with extended types using up extra bits in the options section. 1064 * We want to convert this split number into a bitmap index, so we reverse the 1065 * calculation of IFM_X here. 1066 */ 1067 #define IFM_IDX(x) (((x) & IFM_TMASK) | \ 1068 (((x) & IFM_ETH_XTYPE) >> IFM_ETH_XSHIFT)) 1069 1070 /** 1071 * ice_add_media_types - Add supported media types to the media structure 1072 * @sc: ice private softc structure 1073 * @media: ifmedia structure to setup 1074 * 1075 * Looks up the supported phy types, and initializes the various media types 1076 * available. 1077 * 1078 * @pre this function must be protected from being called while another thread 1079 * is accessing the ifmedia types. 1080 */ 1081 enum ice_status 1082 ice_add_media_types(struct ice_softc *sc, struct ifmedia *media) 1083 { 1084 enum ice_status status; 1085 uint64_t phy_low, phy_high; 1086 int bit; 1087 1088 ASSERT_CFG_LOCKED(sc); 1089 1090 /* the maximum possible media type index is 511. We probably don't 1091 * need most of this space, but this ensures future compatibility when 1092 * additional media types are used. 1093 */ 1094 ice_declare_bitmap(already_added, 511); 1095 1096 /* Remove all previous media types */ 1097 ifmedia_removeall(media); 1098 1099 status = ice_get_phy_types(sc, &phy_low, &phy_high); 1100 if (status != ICE_SUCCESS) { 1101 /* Function already prints appropriate error 1102 * message 1103 */ 1104 return (status); 1105 } 1106 1107 /* make sure the added bitmap is zero'd */ 1108 memset(already_added, 0, sizeof(already_added)); 1109 1110 /* coverity[address_of] */ 1111 for_each_set_bit(bit, &phy_low, 64) { 1112 uint64_t type = BIT_ULL(bit); 1113 int ostype; 1114 1115 /* get the OS media type */ 1116 ostype = ice_get_phy_type_low(type); 1117 1118 /* don't bother adding the unknown type */ 1119 if (ostype == IFM_UNKNOWN) 1120 continue; 1121 1122 /* only add each media type to the list once */ 1123 if (ice_is_bit_set(already_added, IFM_IDX(ostype))) 1124 continue; 1125 1126 ifmedia_add(media, IFM_ETHER | ostype, 0, NULL); 1127 ice_set_bit(IFM_IDX(ostype), already_added); 1128 } 1129 1130 /* coverity[address_of] */ 1131 for_each_set_bit(bit, &phy_high, 64) { 1132 uint64_t type = BIT_ULL(bit); 1133 int ostype; 1134 1135 /* get the OS media type */ 1136 ostype = ice_get_phy_type_high(type); 1137 1138 /* don't bother adding the unknown type */ 1139 if (ostype == IFM_UNKNOWN) 1140 continue; 1141 1142 /* only add each media type to the list once */ 1143 if (ice_is_bit_set(already_added, IFM_IDX(ostype))) 1144 continue; 1145 1146 ifmedia_add(media, IFM_ETHER | ostype, 0, NULL); 1147 ice_set_bit(IFM_IDX(ostype), already_added); 1148 } 1149 1150 /* Use autoselect media by default */ 1151 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 1152 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 1153 1154 return (ICE_SUCCESS); 1155 } 1156 1157 /** 1158 * ice_configure_rxq_interrupts - Configure HW Rx queues for MSI-X interrupts 1159 * @vsi: the VSI to configure 1160 * 1161 * Called when setting up MSI-X interrupts to configure the Rx hardware queues. 1162 */ 1163 void 1164 ice_configure_rxq_interrupts(struct ice_vsi *vsi) 1165 { 1166 struct ice_hw *hw = &vsi->sc->hw; 1167 int i; 1168 1169 for (i = 0; i < vsi->num_rx_queues; i++) { 1170 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1171 u32 val; 1172 1173 val = (QINT_RQCTL_CAUSE_ENA_M | 1174 (ICE_RX_ITR << QINT_RQCTL_ITR_INDX_S) | 1175 (rxq->irqv->me << QINT_RQCTL_MSIX_INDX_S)); 1176 wr32(hw, QINT_RQCTL(vsi->rx_qmap[rxq->me]), val); 1177 } 1178 1179 ice_flush(hw); 1180 } 1181 1182 /** 1183 * ice_configure_txq_interrupts - Configure HW Tx queues for MSI-X interrupts 1184 * @vsi: the VSI to configure 1185 * 1186 * Called when setting up MSI-X interrupts to configure the Tx hardware queues. 1187 */ 1188 void 1189 ice_configure_txq_interrupts(struct ice_vsi *vsi) 1190 { 1191 struct ice_hw *hw = &vsi->sc->hw; 1192 int i; 1193 1194 for (i = 0; i < vsi->num_tx_queues; i++) { 1195 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1196 u32 val; 1197 1198 val = (QINT_TQCTL_CAUSE_ENA_M | 1199 (ICE_TX_ITR << QINT_TQCTL_ITR_INDX_S) | 1200 (txq->irqv->me << QINT_TQCTL_MSIX_INDX_S)); 1201 wr32(hw, QINT_TQCTL(vsi->tx_qmap[txq->me]), val); 1202 } 1203 1204 ice_flush(hw); 1205 } 1206 1207 /** 1208 * ice_flush_rxq_interrupts - Unconfigure Hw Rx queues MSI-X interrupt cause 1209 * @vsi: the VSI to configure 1210 * 1211 * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger 1212 * a software interrupt on that cause. This is required as part of the Rx 1213 * queue disable logic to dissociate the Rx queue from the interrupt. 1214 * 1215 * Note: this function must be called prior to disabling Rx queues with 1216 * ice_control_rx_queues, otherwise the Rx queue may not be disabled properly. 1217 */ 1218 void 1219 ice_flush_rxq_interrupts(struct ice_vsi *vsi) 1220 { 1221 struct ice_hw *hw = &vsi->sc->hw; 1222 int i; 1223 1224 for (i = 0; i < vsi->num_rx_queues; i++) { 1225 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1226 u32 reg, val; 1227 1228 /* Clear the CAUSE_ENA flag */ 1229 reg = vsi->rx_qmap[rxq->me]; 1230 val = rd32(hw, QINT_RQCTL(reg)); 1231 val &= ~QINT_RQCTL_CAUSE_ENA_M; 1232 wr32(hw, QINT_RQCTL(reg), val); 1233 1234 ice_flush(hw); 1235 1236 /* Trigger a software interrupt to complete interrupt 1237 * dissociation. 1238 */ 1239 wr32(hw, GLINT_DYN_CTL(rxq->irqv->me), 1240 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 1241 } 1242 } 1243 1244 /** 1245 * ice_flush_txq_interrupts - Unconfigure Hw Tx queues MSI-X interrupt cause 1246 * @vsi: the VSI to configure 1247 * 1248 * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger 1249 * a software interrupt on that cause. This is required as part of the Tx 1250 * queue disable logic to dissociate the Tx queue from the interrupt. 1251 * 1252 * Note: this function must be called prior to ice_vsi_disable_tx, otherwise 1253 * the Tx queue disable may not complete properly. 1254 */ 1255 void 1256 ice_flush_txq_interrupts(struct ice_vsi *vsi) 1257 { 1258 struct ice_hw *hw = &vsi->sc->hw; 1259 int i; 1260 1261 for (i = 0; i < vsi->num_tx_queues; i++) { 1262 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1263 u32 reg, val; 1264 1265 /* Clear the CAUSE_ENA flag */ 1266 reg = vsi->tx_qmap[txq->me]; 1267 val = rd32(hw, QINT_TQCTL(reg)); 1268 val &= ~QINT_TQCTL_CAUSE_ENA_M; 1269 wr32(hw, QINT_TQCTL(reg), val); 1270 1271 ice_flush(hw); 1272 1273 /* Trigger a software interrupt to complete interrupt 1274 * dissociation. 1275 */ 1276 wr32(hw, GLINT_DYN_CTL(txq->irqv->me), 1277 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 1278 } 1279 } 1280 1281 /** 1282 * ice_configure_rx_itr - Configure the Rx ITR settings for this VSI 1283 * @vsi: the VSI to configure 1284 * 1285 * Program the hardware ITR registers with the settings for this VSI. 1286 */ 1287 void 1288 ice_configure_rx_itr(struct ice_vsi *vsi) 1289 { 1290 struct ice_hw *hw = &vsi->sc->hw; 1291 int i; 1292 1293 /* TODO: Handle per-queue/per-vector ITR? */ 1294 1295 for (i = 0; i < vsi->num_rx_queues; i++) { 1296 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1297 1298 wr32(hw, GLINT_ITR(ICE_RX_ITR, rxq->irqv->me), 1299 ice_itr_to_reg(hw, vsi->rx_itr)); 1300 } 1301 1302 ice_flush(hw); 1303 } 1304 1305 /** 1306 * ice_configure_tx_itr - Configure the Tx ITR settings for this VSI 1307 * @vsi: the VSI to configure 1308 * 1309 * Program the hardware ITR registers with the settings for this VSI. 1310 */ 1311 void 1312 ice_configure_tx_itr(struct ice_vsi *vsi) 1313 { 1314 struct ice_hw *hw = &vsi->sc->hw; 1315 int i; 1316 1317 /* TODO: Handle per-queue/per-vector ITR? */ 1318 1319 for (i = 0; i < vsi->num_tx_queues; i++) { 1320 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1321 1322 wr32(hw, GLINT_ITR(ICE_TX_ITR, txq->irqv->me), 1323 ice_itr_to_reg(hw, vsi->tx_itr)); 1324 } 1325 1326 ice_flush(hw); 1327 } 1328 1329 /** 1330 * ice_setup_tx_ctx - Setup an ice_tlan_ctx structure for a queue 1331 * @txq: the Tx queue to configure 1332 * @tlan_ctx: the Tx LAN queue context structure to initialize 1333 * @pf_q: real queue number 1334 */ 1335 static int 1336 ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 1337 { 1338 struct ice_vsi *vsi = txq->vsi; 1339 struct ice_softc *sc = vsi->sc; 1340 struct ice_hw *hw = &sc->hw; 1341 1342 tlan_ctx->port_num = hw->port_info->lport; 1343 1344 /* number of descriptors in the queue */ 1345 tlan_ctx->qlen = txq->desc_count; 1346 1347 /* set the transmit queue base address, defined in 128 byte units */ 1348 tlan_ctx->base = txq->tx_paddr >> 7; 1349 1350 tlan_ctx->pf_num = hw->pf_id; 1351 1352 /* For now, we only have code supporting PF VSIs */ 1353 switch (vsi->type) { 1354 case ICE_VSI_PF: 1355 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 1356 break; 1357 default: 1358 return (ENODEV); 1359 } 1360 1361 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 1362 1363 /* Enable TSO */ 1364 tlan_ctx->tso_ena = 1; 1365 tlan_ctx->internal_usage_flag = 1; 1366 1367 tlan_ctx->tso_qnum = pf_q; 1368 1369 /* 1370 * Stick with the older legacy Tx queue interface, instead of the new 1371 * advanced queue interface. 1372 */ 1373 tlan_ctx->legacy_int = 1; 1374 1375 /* Descriptor WB mode */ 1376 tlan_ctx->wb_mode = 0; 1377 1378 return (0); 1379 } 1380 1381 /** 1382 * ice_cfg_vsi_for_tx - Configure the hardware for Tx 1383 * @vsi: the VSI to configure 1384 * 1385 * Configure the device Tx queues through firmware AdminQ commands. After 1386 * this, Tx queues will be ready for transmit. 1387 */ 1388 int 1389 ice_cfg_vsi_for_tx(struct ice_vsi *vsi) 1390 { 1391 struct ice_aqc_add_tx_qgrp *qg; 1392 struct ice_hw *hw = &vsi->sc->hw; 1393 device_t dev = vsi->sc->dev; 1394 enum ice_status status; 1395 int i; 1396 int err = 0; 1397 u16 qg_size, pf_q; 1398 1399 qg_size = ice_struct_size(qg, txqs, 1); 1400 qg = (struct ice_aqc_add_tx_qgrp *)malloc(qg_size, M_ICE, M_NOWAIT|M_ZERO); 1401 if (!qg) 1402 return (ENOMEM); 1403 1404 qg->num_txqs = 1; 1405 1406 for (i = 0; i < vsi->num_tx_queues; i++) { 1407 struct ice_tlan_ctx tlan_ctx = { 0 }; 1408 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1409 1410 pf_q = vsi->tx_qmap[txq->me]; 1411 qg->txqs[0].txq_id = htole16(pf_q); 1412 1413 err = ice_setup_tx_ctx(txq, &tlan_ctx, pf_q); 1414 if (err) 1415 goto free_txqg; 1416 1417 ice_set_ctx(hw, (u8 *)&tlan_ctx, qg->txqs[0].txq_ctx, 1418 ice_tlan_ctx_info); 1419 1420 status = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1421 i, 1, qg, qg_size, NULL); 1422 if (status) { 1423 device_printf(dev, 1424 "Failed to set LAN Tx queue context, err %s aq_err %s\n", 1425 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 1426 err = ENODEV; 1427 goto free_txqg; 1428 } 1429 1430 /* Keep track of the Tx queue TEID */ 1431 if (pf_q == le16toh(qg->txqs[0].txq_id)) 1432 txq->q_teid = le32toh(qg->txqs[0].q_teid); 1433 } 1434 1435 free_txqg: 1436 free(qg, M_ICE); 1437 1438 return (err); 1439 } 1440 1441 /** 1442 * ice_setup_rx_ctx - Setup an Rx context structure for a receive queue 1443 * @rxq: the receive queue to program 1444 * 1445 * Setup an Rx queue context structure and program it into the hardware 1446 * registers. This is a necessary step for enabling the Rx queue. 1447 * 1448 * @pre the VSI associated with this queue must have initialized mbuf_sz 1449 */ 1450 static int 1451 ice_setup_rx_ctx(struct ice_rx_queue *rxq) 1452 { 1453 struct ice_rlan_ctx rlan_ctx = {0}; 1454 struct ice_vsi *vsi = rxq->vsi; 1455 struct ice_softc *sc = vsi->sc; 1456 struct ice_hw *hw = &sc->hw; 1457 enum ice_status status; 1458 u32 rxdid = ICE_RXDID_FLEX_NIC; 1459 u32 regval; 1460 u16 pf_q; 1461 1462 pf_q = vsi->rx_qmap[rxq->me]; 1463 1464 /* set the receive queue base address, defined in 128 byte units */ 1465 rlan_ctx.base = rxq->rx_paddr >> 7; 1466 1467 rlan_ctx.qlen = rxq->desc_count; 1468 1469 rlan_ctx.dbuf = vsi->mbuf_sz >> ICE_RLAN_CTX_DBUF_S; 1470 1471 /* use 32 byte descriptors */ 1472 rlan_ctx.dsize = 1; 1473 1474 /* Strip the Ethernet CRC bytes before the packet is posted to the 1475 * host memory. 1476 */ 1477 rlan_ctx.crcstrip = 1; 1478 1479 rlan_ctx.l2tsel = 1; 1480 1481 /* don't do header splitting */ 1482 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; 1483 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; 1484 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; 1485 1486 /* strip VLAN from inner headers */ 1487 rlan_ctx.showiv = 1; 1488 1489 rlan_ctx.rxmax = min(vsi->max_frame_size, 1490 ICE_MAX_RX_SEGS * vsi->mbuf_sz); 1491 1492 rlan_ctx.lrxqthresh = 1; 1493 1494 if (vsi->type != ICE_VSI_VF) { 1495 regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 1496 regval &= ~QRXFLXP_CNTXT_RXDID_IDX_M; 1497 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 1498 QRXFLXP_CNTXT_RXDID_IDX_M; 1499 1500 regval &= ~QRXFLXP_CNTXT_RXDID_PRIO_M; 1501 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 1502 QRXFLXP_CNTXT_RXDID_PRIO_M; 1503 1504 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 1505 } 1506 1507 status = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); 1508 if (status) { 1509 device_printf(sc->dev, 1510 "Failed to set LAN Rx queue context, err %s aq_err %s\n", 1511 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 1512 return (EIO); 1513 } 1514 1515 wr32(hw, rxq->tail, 0); 1516 1517 return 0; 1518 } 1519 1520 /** 1521 * ice_cfg_vsi_for_rx - Configure the hardware for Rx 1522 * @vsi: the VSI to configure 1523 * 1524 * Prepare an Rx context descriptor and configure the device to receive 1525 * traffic. 1526 * 1527 * @pre the VSI must have initialized mbuf_sz 1528 */ 1529 int 1530 ice_cfg_vsi_for_rx(struct ice_vsi *vsi) 1531 { 1532 int i, err; 1533 1534 for (i = 0; i < vsi->num_rx_queues; i++) { 1535 MPASS(vsi->mbuf_sz > 0); 1536 err = ice_setup_rx_ctx(&vsi->rx_queues[i]); 1537 if (err) 1538 return err; 1539 } 1540 1541 return (0); 1542 } 1543 1544 /** 1545 * ice_is_rxq_ready - Check if an Rx queue is ready 1546 * @hw: ice hw structure 1547 * @pf_q: absolute PF queue index to check 1548 * @reg: on successful return, contains qrx_ctrl contents 1549 * 1550 * Reads the QRX_CTRL register and verifies if the queue is in a consistent 1551 * state. That is, QENA_REQ matches QENA_STAT. Used to check before making 1552 * a request to change the queue, as well as to verify the request has 1553 * finished. The queue should change status within a few microseconds, so we 1554 * use a small delay while polling the register. 1555 * 1556 * Returns an error code if the queue does not update after a few retries. 1557 */ 1558 static int 1559 ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg) 1560 { 1561 u32 qrx_ctrl, qena_req, qena_stat; 1562 int i; 1563 1564 for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { 1565 qrx_ctrl = rd32(hw, QRX_CTRL(pf_q)); 1566 qena_req = (qrx_ctrl >> QRX_CTRL_QENA_REQ_S) & 1; 1567 qena_stat = (qrx_ctrl >> QRX_CTRL_QENA_STAT_S) & 1; 1568 1569 /* if the request and status bits equal, then the queue is 1570 * fully disabled or enabled. 1571 */ 1572 if (qena_req == qena_stat) { 1573 *reg = qrx_ctrl; 1574 return (0); 1575 } 1576 1577 /* wait a few microseconds before we check again */ 1578 DELAY(10); 1579 } 1580 1581 return (ETIMEDOUT); 1582 } 1583 1584 /** 1585 * ice_control_rx_queues - Configure hardware to start or stop the Rx queues 1586 * @vsi: VSI to enable/disable queues 1587 * @enable: true to enable queues, false to disable 1588 * 1589 * Control the Rx queues through the QRX_CTRL register, enabling or disabling 1590 * them. Wait for the appropriate time to ensure that the queues have actually 1591 * reached the expected state. 1592 */ 1593 int 1594 ice_control_rx_queues(struct ice_vsi *vsi, bool enable) 1595 { 1596 struct ice_hw *hw = &vsi->sc->hw; 1597 device_t dev = vsi->sc->dev; 1598 u32 qrx_ctrl = 0; 1599 int i, err; 1600 1601 /* TODO: amortize waits by changing all queues up front and then 1602 * checking their status afterwards. This will become more necessary 1603 * when we have a large number of queues. 1604 */ 1605 for (i = 0; i < vsi->num_rx_queues; i++) { 1606 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1607 int pf_q = vsi->rx_qmap[rxq->me]; 1608 1609 err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl); 1610 if (err) { 1611 device_printf(dev, 1612 "Rx queue %d is not ready\n", 1613 pf_q); 1614 return err; 1615 } 1616 1617 /* Skip if the queue is already in correct state */ 1618 if (enable == !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) 1619 continue; 1620 1621 if (enable) 1622 qrx_ctrl |= QRX_CTRL_QENA_REQ_M; 1623 else 1624 qrx_ctrl &= ~QRX_CTRL_QENA_REQ_M; 1625 wr32(hw, QRX_CTRL(pf_q), qrx_ctrl); 1626 1627 /* wait for the queue to finalize the request */ 1628 err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl); 1629 if (err) { 1630 device_printf(dev, 1631 "Rx queue %d %sable timeout\n", 1632 pf_q, (enable ? "en" : "dis")); 1633 return err; 1634 } 1635 1636 /* this should never happen */ 1637 if (enable != !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) { 1638 device_printf(dev, 1639 "Rx queue %d invalid state\n", 1640 pf_q); 1641 return (EDOOFUS); 1642 } 1643 } 1644 1645 return (0); 1646 } 1647 1648 /** 1649 * ice_add_mac_to_list - Add MAC filter to a MAC filter list 1650 * @vsi: the VSI to forward to 1651 * @list: list which contains MAC filter entries 1652 * @addr: the MAC address to be added 1653 * @action: filter action to perform on match 1654 * 1655 * Adds a MAC address filter to the list which will be forwarded to firmware 1656 * to add a series of MAC address filters. 1657 * 1658 * Returns 0 on success, and an error code on failure. 1659 * 1660 */ 1661 static int 1662 ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list, 1663 const u8 *addr, enum ice_sw_fwd_act_type action) 1664 { 1665 struct ice_fltr_list_entry *entry; 1666 1667 entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO); 1668 if (!entry) 1669 return (ENOMEM); 1670 1671 entry->fltr_info.flag = ICE_FLTR_TX; 1672 entry->fltr_info.src_id = ICE_SRC_ID_VSI; 1673 entry->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 1674 entry->fltr_info.fltr_act = action; 1675 entry->fltr_info.vsi_handle = vsi->idx; 1676 bcopy(addr, entry->fltr_info.l_data.mac.mac_addr, ETHER_ADDR_LEN); 1677 1678 LIST_ADD(&entry->list_entry, list); 1679 1680 return 0; 1681 } 1682 1683 /** 1684 * ice_free_fltr_list - Free memory associated with a MAC address list 1685 * @list: the list to free 1686 * 1687 * Free the memory of each entry associated with the list. 1688 */ 1689 static void 1690 ice_free_fltr_list(struct ice_list_head *list) 1691 { 1692 struct ice_fltr_list_entry *e, *tmp; 1693 1694 LIST_FOR_EACH_ENTRY_SAFE(e, tmp, list, ice_fltr_list_entry, list_entry) { 1695 LIST_DEL(&e->list_entry); 1696 free(e, M_ICE); 1697 } 1698 } 1699 1700 /** 1701 * ice_add_vsi_mac_filter - Add a MAC address filter for a VSI 1702 * @vsi: the VSI to add the filter for 1703 * @addr: MAC address to add a filter for 1704 * 1705 * Add a MAC address filter for a given VSI. This is a wrapper around 1706 * ice_add_mac to simplify the interface. First, it only accepts a single 1707 * address, so we don't have to mess around with the list setup in other 1708 * functions. Second, it ignores the ICE_ERR_ALREADY_EXIST error, so that 1709 * callers don't need to worry about attempting to add the same filter twice. 1710 */ 1711 int 1712 ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr) 1713 { 1714 struct ice_list_head mac_addr_list; 1715 struct ice_hw *hw = &vsi->sc->hw; 1716 device_t dev = vsi->sc->dev; 1717 enum ice_status status; 1718 int err = 0; 1719 1720 INIT_LIST_HEAD(&mac_addr_list); 1721 1722 err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI); 1723 if (err) 1724 goto free_mac_list; 1725 1726 status = ice_add_mac(hw, &mac_addr_list); 1727 if (status == ICE_ERR_ALREADY_EXISTS) { 1728 ; /* Don't complain if we try to add a filter that already exists */ 1729 } else if (status) { 1730 device_printf(dev, 1731 "Failed to add a filter for MAC %6D, err %s aq_err %s\n", 1732 addr, ":", 1733 ice_status_str(status), 1734 ice_aq_str(hw->adminq.sq_last_status)); 1735 err = (EIO); 1736 } 1737 1738 free_mac_list: 1739 ice_free_fltr_list(&mac_addr_list); 1740 return err; 1741 } 1742 1743 /** 1744 * ice_cfg_pf_default_mac_filters - Setup default unicast and broadcast addrs 1745 * @sc: device softc structure 1746 * 1747 * Program the default unicast and broadcast filters for the PF VSI. 1748 */ 1749 int 1750 ice_cfg_pf_default_mac_filters(struct ice_softc *sc) 1751 { 1752 struct ice_vsi *vsi = &sc->pf_vsi; 1753 struct ice_hw *hw = &sc->hw; 1754 int err; 1755 1756 /* Add the LAN MAC address */ 1757 err = ice_add_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr); 1758 if (err) 1759 return err; 1760 1761 /* Add the broadcast address */ 1762 err = ice_add_vsi_mac_filter(vsi, broadcastaddr); 1763 if (err) 1764 return err; 1765 1766 return (0); 1767 } 1768 1769 /** 1770 * ice_remove_vsi_mac_filter - Remove a MAC address filter for a VSI 1771 * @vsi: the VSI to add the filter for 1772 * @addr: MAC address to remove a filter for 1773 * 1774 * Remove a MAC address filter from a given VSI. This is a wrapper around 1775 * ice_remove_mac to simplify the interface. First, it only accepts a single 1776 * address, so we don't have to mess around with the list setup in other 1777 * functions. Second, it ignores the ICE_ERR_DOES_NOT_EXIST error, so that 1778 * callers don't need to worry about attempting to remove filters which 1779 * haven't yet been added. 1780 */ 1781 int 1782 ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr) 1783 { 1784 struct ice_list_head mac_addr_list; 1785 struct ice_hw *hw = &vsi->sc->hw; 1786 device_t dev = vsi->sc->dev; 1787 enum ice_status status; 1788 int err = 0; 1789 1790 INIT_LIST_HEAD(&mac_addr_list); 1791 1792 err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI); 1793 if (err) 1794 goto free_mac_list; 1795 1796 status = ice_remove_mac(hw, &mac_addr_list); 1797 if (status == ICE_ERR_DOES_NOT_EXIST) { 1798 ; /* Don't complain if we try to remove a filter that doesn't exist */ 1799 } else if (status) { 1800 device_printf(dev, 1801 "Failed to remove a filter for MAC %6D, err %s aq_err %s\n", 1802 addr, ":", 1803 ice_status_str(status), 1804 ice_aq_str(hw->adminq.sq_last_status)); 1805 err = (EIO); 1806 } 1807 1808 free_mac_list: 1809 ice_free_fltr_list(&mac_addr_list); 1810 return err; 1811 } 1812 1813 /** 1814 * ice_rm_pf_default_mac_filters - Remove default unicast and broadcast addrs 1815 * @sc: device softc structure 1816 * 1817 * Remove the default unicast and broadcast filters from the PF VSI. 1818 */ 1819 int 1820 ice_rm_pf_default_mac_filters(struct ice_softc *sc) 1821 { 1822 struct ice_vsi *vsi = &sc->pf_vsi; 1823 struct ice_hw *hw = &sc->hw; 1824 int err; 1825 1826 /* Remove the LAN MAC address */ 1827 err = ice_remove_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr); 1828 if (err) 1829 return err; 1830 1831 /* Remove the broadcast address */ 1832 err = ice_remove_vsi_mac_filter(vsi, broadcastaddr); 1833 if (err) 1834 return (EIO); 1835 1836 return (0); 1837 } 1838 1839 /** 1840 * ice_check_ctrlq_errors - Check for and report controlq errors 1841 * @sc: device private structure 1842 * @qname: name of the controlq 1843 * @cq: the controlq to check 1844 * 1845 * Check and report controlq errors. Currently all we do is report them to the 1846 * kernel message log, but we might want to improve this in the future, such 1847 * as to keep track of statistics. 1848 */ 1849 static void 1850 ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname, 1851 struct ice_ctl_q_info *cq) 1852 { 1853 struct ice_hw *hw = &sc->hw; 1854 u32 val; 1855 1856 /* Check for error indications. Note that all the controlqs use the 1857 * same register layout, so we use the PF_FW_AxQLEN defines only. 1858 */ 1859 val = rd32(hw, cq->rq.len); 1860 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1861 PF_FW_ARQLEN_ARQCRIT_M)) { 1862 if (val & PF_FW_ARQLEN_ARQVFE_M) 1863 device_printf(sc->dev, 1864 "%s Receive Queue VF Error detected\n", qname); 1865 if (val & PF_FW_ARQLEN_ARQOVFL_M) 1866 device_printf(sc->dev, 1867 "%s Receive Queue Overflow Error detected\n", 1868 qname); 1869 if (val & PF_FW_ARQLEN_ARQCRIT_M) 1870 device_printf(sc->dev, 1871 "%s Receive Queue Critical Error detected\n", 1872 qname); 1873 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1874 PF_FW_ARQLEN_ARQCRIT_M); 1875 wr32(hw, cq->rq.len, val); 1876 } 1877 1878 val = rd32(hw, cq->sq.len); 1879 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1880 PF_FW_ATQLEN_ATQCRIT_M)) { 1881 if (val & PF_FW_ATQLEN_ATQVFE_M) 1882 device_printf(sc->dev, 1883 "%s Send Queue VF Error detected\n", qname); 1884 if (val & PF_FW_ATQLEN_ATQOVFL_M) 1885 device_printf(sc->dev, 1886 "%s Send Queue Overflow Error detected\n", 1887 qname); 1888 if (val & PF_FW_ATQLEN_ATQCRIT_M) 1889 device_printf(sc->dev, 1890 "%s Send Queue Critical Error detected\n", 1891 qname); 1892 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1893 PF_FW_ATQLEN_ATQCRIT_M); 1894 wr32(hw, cq->sq.len, val); 1895 } 1896 } 1897 1898 /** 1899 * ice_process_link_event - Process a link event indication from firmware 1900 * @sc: device softc structure 1901 * @e: the received event data 1902 * 1903 * Gets the current link status from hardware, and may print a message if an 1904 * unqualified is detected. 1905 */ 1906 static void 1907 ice_process_link_event(struct ice_softc *sc, 1908 struct ice_rq_event_info __invariant_only *e) 1909 { 1910 struct ice_port_info *pi = sc->hw.port_info; 1911 struct ice_hw *hw = &sc->hw; 1912 device_t dev = sc->dev; 1913 enum ice_status status; 1914 1915 /* Sanity check that the data length matches */ 1916 MPASS(le16toh(e->desc.datalen) == sizeof(struct ice_aqc_get_link_status_data)); 1917 1918 /* 1919 * Even though the adapter gets link status information inside the 1920 * event, it needs to send a Get Link Status AQ command in order 1921 * to re-enable link events. 1922 */ 1923 pi->phy.get_link_info = true; 1924 ice_get_link_status(pi, &sc->link_up); 1925 1926 if (pi->phy.link_info.topo_media_conflict & 1927 (ICE_AQ_LINK_TOPO_CONFLICT | ICE_AQ_LINK_MEDIA_CONFLICT | 1928 ICE_AQ_LINK_TOPO_CORRUPT)) 1929 device_printf(dev, 1930 "Possible mis-configuration of the Ethernet port detected; please use the Intel (R) Ethernet Port Configuration Tool utility to address the issue.\n"); 1931 1932 if ((pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) && 1933 !(pi->phy.link_info.link_info & ICE_AQ_LINK_UP) && 1934 !(pi->phy.link_info.an_info & ICE_AQ_QUALIFIED_MODULE)) 1935 device_printf(dev, 1936 "Link is disabled on this device because an unsupported module type was detected! Refer to the Intel (R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 1937 1938 if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 1939 if (!ice_testandset_state(&sc->state, ICE_STATE_NO_MEDIA)) { 1940 status = ice_aq_set_link_restart_an(pi, false, NULL); 1941 if (status != ICE_SUCCESS) 1942 device_printf(dev, 1943 "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", 1944 __func__, ice_status_str(status), 1945 ice_aq_str(hw->adminq.sq_last_status)); 1946 } 1947 } 1948 /* ICE_STATE_NO_MEDIA is cleared when polling task detects media */ 1949 1950 /* Indicate that link status must be reported again */ 1951 ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED); 1952 1953 /* OS link info is updated elsewhere */ 1954 } 1955 1956 /** 1957 * ice_process_ctrlq_event - Respond to a controlq event 1958 * @sc: device private structure 1959 * @qname: the name for this controlq 1960 * @event: the event to process 1961 * 1962 * Perform actions in response to various controlq event notifications. 1963 */ 1964 static void 1965 ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, 1966 struct ice_rq_event_info *event) 1967 { 1968 u16 opcode; 1969 1970 opcode = le16toh(event->desc.opcode); 1971 1972 switch (opcode) { 1973 case ice_aqc_opc_get_link_status: 1974 ice_process_link_event(sc, event); 1975 break; 1976 case ice_mbx_opc_send_msg_to_pf: 1977 /* TODO: handle IOV event */ 1978 break; 1979 case ice_aqc_opc_lldp_set_mib_change: 1980 ice_handle_mib_change_event(sc, event); 1981 break; 1982 case ice_aqc_opc_event_lan_overflow: 1983 ice_handle_lan_overflow_event(sc, event); 1984 break; 1985 default: 1986 device_printf(sc->dev, 1987 "%s Receive Queue unhandled event 0x%04x ignored\n", 1988 qname, opcode); 1989 } 1990 } 1991 1992 /** 1993 * ice_process_ctrlq - helper function to process controlq rings 1994 * @sc: device private structure 1995 * @q_type: specific control queue type 1996 * @pending: return parameter to track remaining events 1997 * 1998 * Process controlq events for a given control queue type. Returns zero on 1999 * success, and an error code on failure. If successful, pending is the number 2000 * of remaining events left in the queue. 2001 */ 2002 int 2003 ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending) 2004 { 2005 struct ice_rq_event_info event = { { 0 } }; 2006 struct ice_hw *hw = &sc->hw; 2007 struct ice_ctl_q_info *cq; 2008 enum ice_status status; 2009 const char *qname; 2010 int loop = 0; 2011 2012 switch (q_type) { 2013 case ICE_CTL_Q_ADMIN: 2014 cq = &hw->adminq; 2015 qname = "Admin"; 2016 break; 2017 case ICE_CTL_Q_MAILBOX: 2018 cq = &hw->mailboxq; 2019 qname = "Mailbox"; 2020 break; 2021 default: 2022 device_printf(sc->dev, 2023 "Unknown control queue type 0x%x\n", 2024 q_type); 2025 return 0; 2026 } 2027 2028 ice_check_ctrlq_errors(sc, qname, cq); 2029 2030 /* 2031 * Control queue processing happens during the admin task which may be 2032 * holding a non-sleepable lock, so we *must* use M_NOWAIT here. 2033 */ 2034 event.buf_len = cq->rq_buf_size; 2035 event.msg_buf = (u8 *)malloc(event.buf_len, M_ICE, M_ZERO | M_NOWAIT); 2036 if (!event.msg_buf) { 2037 device_printf(sc->dev, 2038 "Unable to allocate memory for %s Receive Queue event\n", 2039 qname); 2040 return (ENOMEM); 2041 } 2042 2043 do { 2044 status = ice_clean_rq_elem(hw, cq, &event, pending); 2045 if (status == ICE_ERR_AQ_NO_WORK) 2046 break; 2047 if (status) { 2048 if (q_type == ICE_CTL_Q_ADMIN) 2049 device_printf(sc->dev, 2050 "%s Receive Queue event error %s aq_err %s\n", 2051 qname, ice_status_str(status), 2052 ice_aq_str(cq->rq_last_status)); 2053 else 2054 device_printf(sc->dev, 2055 "%s Receive Queue event error %s cq_err %d\n", 2056 qname, ice_status_str(status), cq->rq_last_status); 2057 free(event.msg_buf, M_ICE); 2058 return (EIO); 2059 } 2060 /* XXX should we separate this handler by controlq type? */ 2061 ice_process_ctrlq_event(sc, qname, &event); 2062 } while (*pending && (++loop < ICE_CTRLQ_WORK_LIMIT)); 2063 2064 free(event.msg_buf, M_ICE); 2065 2066 return 0; 2067 } 2068 2069 /** 2070 * pkg_ver_empty - Check if a package version is empty 2071 * @pkg_ver: the package version to check 2072 * @pkg_name: the package name to check 2073 * 2074 * Checks if the package version structure is empty. We consider a package 2075 * version as empty if none of the versions are non-zero and the name string 2076 * is null as well. 2077 * 2078 * This is used to check if the package version was initialized by the driver, 2079 * as we do not expect an actual DDP package file to have a zero'd version and 2080 * name. 2081 * 2082 * @returns true if the package version is valid, or false otherwise. 2083 */ 2084 static bool 2085 pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name) 2086 { 2087 return (pkg_name[0] == '\0' && 2088 pkg_ver->major == 0 && 2089 pkg_ver->minor == 0 && 2090 pkg_ver->update == 0 && 2091 pkg_ver->draft == 0); 2092 } 2093 2094 /** 2095 * pkg_ver_compatible - Check if the package version is compatible 2096 * @pkg_ver: the package version to check 2097 * 2098 * Compares the package version number to the driver's expected major/minor 2099 * version. Returns an integer indicating whether the version is older, newer, 2100 * or compatible with the driver. 2101 * 2102 * @returns 0 if the package version is compatible, -1 if the package version 2103 * is older, and 1 if the package version is newer than the driver version. 2104 */ 2105 static int 2106 pkg_ver_compatible(struct ice_pkg_ver *pkg_ver) 2107 { 2108 if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ) 2109 return (1); /* newer */ 2110 else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) && 2111 (pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) 2112 return (1); /* newer */ 2113 else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) && 2114 (pkg_ver->minor == ICE_PKG_SUPP_VER_MNR)) 2115 return (0); /* compatible */ 2116 else 2117 return (-1); /* older */ 2118 } 2119 2120 /** 2121 * ice_os_pkg_version_str - Format OS package version info into a sbuf 2122 * @hw: device hw structure 2123 * @buf: string buffer to store name/version string 2124 * 2125 * Formats the name and version of the OS DDP package as found in the ice_ddp 2126 * module into a string. 2127 * 2128 * @remark This will almost always be the same as the active package, but 2129 * could be different in some cases. Use ice_active_pkg_version_str to get the 2130 * version of the active DDP package. 2131 */ 2132 static void 2133 ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf) 2134 { 2135 char name_buf[ICE_PKG_NAME_SIZE]; 2136 2137 /* If the OS DDP package info is empty, use "None" */ 2138 if (pkg_ver_empty(&hw->pkg_ver, hw->pkg_name)) { 2139 sbuf_printf(buf, "None"); 2140 return; 2141 } 2142 2143 /* 2144 * This should already be null-terminated, but since this is a raw 2145 * value from an external source, strlcpy() into a new buffer to 2146 * make sure. 2147 */ 2148 bzero(name_buf, sizeof(name_buf)); 2149 strlcpy(name_buf, (char *)hw->pkg_name, ICE_PKG_NAME_SIZE); 2150 2151 sbuf_printf(buf, "%s version %u.%u.%u.%u", 2152 name_buf, 2153 hw->pkg_ver.major, 2154 hw->pkg_ver.minor, 2155 hw->pkg_ver.update, 2156 hw->pkg_ver.draft); 2157 } 2158 2159 /** 2160 * ice_active_pkg_version_str - Format active package version info into a sbuf 2161 * @hw: device hw structure 2162 * @buf: string buffer to store name/version string 2163 * 2164 * Formats the name and version of the active DDP package info into a string 2165 * buffer for use. 2166 */ 2167 static void 2168 ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf) 2169 { 2170 char name_buf[ICE_PKG_NAME_SIZE]; 2171 2172 /* If the active DDP package info is empty, use "None" */ 2173 if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) { 2174 sbuf_printf(buf, "None"); 2175 return; 2176 } 2177 2178 /* 2179 * This should already be null-terminated, but since this is a raw 2180 * value from an external source, strlcpy() into a new buffer to 2181 * make sure. 2182 */ 2183 bzero(name_buf, sizeof(name_buf)); 2184 strlcpy(name_buf, (char *)hw->active_pkg_name, ICE_PKG_NAME_SIZE); 2185 2186 sbuf_printf(buf, "%s version %u.%u.%u.%u", 2187 name_buf, 2188 hw->active_pkg_ver.major, 2189 hw->active_pkg_ver.minor, 2190 hw->active_pkg_ver.update, 2191 hw->active_pkg_ver.draft); 2192 2193 if (hw->active_track_id != 0) 2194 sbuf_printf(buf, ", track id 0x%08x", hw->active_track_id); 2195 } 2196 2197 /** 2198 * ice_nvm_version_str - Format the NVM version information into a sbuf 2199 * @hw: device hw structure 2200 * @buf: string buffer to store version string 2201 * 2202 * Formats the NVM information including firmware version, API version, NVM 2203 * version, the EETRACK id, and OEM specific version information into a string 2204 * buffer. 2205 */ 2206 static void 2207 ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf) 2208 { 2209 struct ice_nvm_info *nvm = &hw->nvm; 2210 struct ice_orom_info *orom = &nvm->orom; 2211 struct ice_netlist_ver_info *netlist_ver = &hw->netlist_ver; 2212 2213 /* Note that the netlist versions are stored in packed Binary Coded 2214 * Decimal format. The use of '%x' will correctly display these as 2215 * decimal numbers. This works because every 4 bits will be displayed 2216 * as a hexadecimal digit, and the BCD format will only use the values 2217 * 0-9. 2218 */ 2219 sbuf_printf(buf, 2220 "fw %u.%u.%u api %u.%u nvm %x.%02x etid %08x netlist %x.%x.%x-%x.%x.%x.%04x oem %u.%u.%u", 2221 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch, 2222 hw->api_maj_ver, hw->api_min_ver, 2223 nvm->major_ver, nvm->minor_ver, nvm->eetrack, 2224 netlist_ver->major, netlist_ver->minor, 2225 netlist_ver->type >> 16, netlist_ver->type & 0xFFFF, 2226 netlist_ver->rev, netlist_ver->cust_ver, netlist_ver->hash, 2227 orom->major, orom->build, orom->patch); 2228 } 2229 2230 /** 2231 * ice_print_nvm_version - Print the NVM info to the kernel message log 2232 * @sc: the device softc structure 2233 * 2234 * Format and print an NVM version string using ice_nvm_version_str(). 2235 */ 2236 void 2237 ice_print_nvm_version(struct ice_softc *sc) 2238 { 2239 struct ice_hw *hw = &sc->hw; 2240 device_t dev = sc->dev; 2241 struct sbuf *sbuf; 2242 2243 sbuf = sbuf_new_auto(); 2244 ice_nvm_version_str(hw, sbuf); 2245 sbuf_finish(sbuf); 2246 device_printf(dev, "%s\n", sbuf_data(sbuf)); 2247 sbuf_delete(sbuf); 2248 } 2249 2250 /** 2251 * ice_update_vsi_hw_stats - Update VSI-specific ethernet statistics counters 2252 * @vsi: the VSI to be updated 2253 * 2254 * Reads hardware stats and updates the ice_vsi_hw_stats tracking structure with 2255 * the updated values. 2256 */ 2257 void 2258 ice_update_vsi_hw_stats(struct ice_vsi *vsi) 2259 { 2260 struct ice_eth_stats *prev_es, *cur_es; 2261 struct ice_hw *hw = &vsi->sc->hw; 2262 u16 vsi_num; 2263 2264 if (!ice_is_vsi_valid(hw, vsi->idx)) 2265 return; 2266 2267 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); /* HW absolute index of a VSI */ 2268 prev_es = &vsi->hw_stats.prev; 2269 cur_es = &vsi->hw_stats.cur; 2270 2271 #define ICE_VSI_STAT40(name, location) \ 2272 ice_stat_update40(hw, name ## L(vsi_num), \ 2273 vsi->hw_stats.offsets_loaded, \ 2274 &prev_es->location, &cur_es->location) 2275 2276 #define ICE_VSI_STAT32(name, location) \ 2277 ice_stat_update32(hw, name(vsi_num), \ 2278 vsi->hw_stats.offsets_loaded, \ 2279 &prev_es->location, &cur_es->location) 2280 2281 ICE_VSI_STAT40(GLV_GORC, rx_bytes); 2282 ICE_VSI_STAT40(GLV_UPRC, rx_unicast); 2283 ICE_VSI_STAT40(GLV_MPRC, rx_multicast); 2284 ICE_VSI_STAT40(GLV_BPRC, rx_broadcast); 2285 ICE_VSI_STAT32(GLV_RDPC, rx_discards); 2286 ICE_VSI_STAT40(GLV_GOTC, tx_bytes); 2287 ICE_VSI_STAT40(GLV_UPTC, tx_unicast); 2288 ICE_VSI_STAT40(GLV_MPTC, tx_multicast); 2289 ICE_VSI_STAT40(GLV_BPTC, tx_broadcast); 2290 ICE_VSI_STAT32(GLV_TEPC, tx_errors); 2291 2292 ice_stat_update_repc(hw, vsi->idx, vsi->hw_stats.offsets_loaded, 2293 cur_es); 2294 2295 #undef ICE_VSI_STAT40 2296 #undef ICE_VSI_STAT32 2297 2298 vsi->hw_stats.offsets_loaded = true; 2299 } 2300 2301 /** 2302 * ice_reset_vsi_stats - Reset VSI statistics counters 2303 * @vsi: VSI structure 2304 * 2305 * Resets the software tracking counters for the VSI statistics, and indicate 2306 * that the offsets haven't been loaded. This is intended to be called 2307 * post-reset so that VSI statistics count from zero again. 2308 */ 2309 void 2310 ice_reset_vsi_stats(struct ice_vsi *vsi) 2311 { 2312 /* Reset HW stats */ 2313 memset(&vsi->hw_stats.prev, 0, sizeof(vsi->hw_stats.prev)); 2314 memset(&vsi->hw_stats.cur, 0, sizeof(vsi->hw_stats.cur)); 2315 vsi->hw_stats.offsets_loaded = false; 2316 } 2317 2318 /** 2319 * ice_update_pf_stats - Update port stats counters 2320 * @sc: device private softc structure 2321 * 2322 * Reads hardware statistics registers and updates the software tracking 2323 * structure with new values. 2324 */ 2325 void 2326 ice_update_pf_stats(struct ice_softc *sc) 2327 { 2328 struct ice_hw_port_stats *prev_ps, *cur_ps; 2329 struct ice_hw *hw = &sc->hw; 2330 u8 lport; 2331 2332 MPASS(hw->port_info); 2333 2334 prev_ps = &sc->stats.prev; 2335 cur_ps = &sc->stats.cur; 2336 lport = hw->port_info->lport; 2337 2338 #define ICE_PF_STAT40(name, location) \ 2339 ice_stat_update40(hw, name ## L(lport), \ 2340 sc->stats.offsets_loaded, \ 2341 &prev_ps->location, &cur_ps->location) 2342 2343 #define ICE_PF_STAT32(name, location) \ 2344 ice_stat_update32(hw, name(lport), \ 2345 sc->stats.offsets_loaded, \ 2346 &prev_ps->location, &cur_ps->location) 2347 2348 ICE_PF_STAT40(GLPRT_GORC, eth.rx_bytes); 2349 ICE_PF_STAT40(GLPRT_UPRC, eth.rx_unicast); 2350 ICE_PF_STAT40(GLPRT_MPRC, eth.rx_multicast); 2351 ICE_PF_STAT40(GLPRT_BPRC, eth.rx_broadcast); 2352 ICE_PF_STAT40(GLPRT_GOTC, eth.tx_bytes); 2353 ICE_PF_STAT40(GLPRT_UPTC, eth.tx_unicast); 2354 ICE_PF_STAT40(GLPRT_MPTC, eth.tx_multicast); 2355 ICE_PF_STAT40(GLPRT_BPTC, eth.tx_broadcast); 2356 /* This stat register doesn't have an lport */ 2357 ice_stat_update32(hw, PRTRPB_RDPC, 2358 sc->stats.offsets_loaded, 2359 &prev_ps->eth.rx_discards, &cur_ps->eth.rx_discards); 2360 2361 ICE_PF_STAT32(GLPRT_TDOLD, tx_dropped_link_down); 2362 ICE_PF_STAT40(GLPRT_PRC64, rx_size_64); 2363 ICE_PF_STAT40(GLPRT_PRC127, rx_size_127); 2364 ICE_PF_STAT40(GLPRT_PRC255, rx_size_255); 2365 ICE_PF_STAT40(GLPRT_PRC511, rx_size_511); 2366 ICE_PF_STAT40(GLPRT_PRC1023, rx_size_1023); 2367 ICE_PF_STAT40(GLPRT_PRC1522, rx_size_1522); 2368 ICE_PF_STAT40(GLPRT_PRC9522, rx_size_big); 2369 ICE_PF_STAT40(GLPRT_PTC64, tx_size_64); 2370 ICE_PF_STAT40(GLPRT_PTC127, tx_size_127); 2371 ICE_PF_STAT40(GLPRT_PTC255, tx_size_255); 2372 ICE_PF_STAT40(GLPRT_PTC511, tx_size_511); 2373 ICE_PF_STAT40(GLPRT_PTC1023, tx_size_1023); 2374 ICE_PF_STAT40(GLPRT_PTC1522, tx_size_1522); 2375 ICE_PF_STAT40(GLPRT_PTC9522, tx_size_big); 2376 2377 ICE_PF_STAT32(GLPRT_LXONRXC, link_xon_rx); 2378 ICE_PF_STAT32(GLPRT_LXOFFRXC, link_xoff_rx); 2379 ICE_PF_STAT32(GLPRT_LXONTXC, link_xon_tx); 2380 ICE_PF_STAT32(GLPRT_LXOFFTXC, link_xoff_tx); 2381 ICE_PF_STAT32(GLPRT_CRCERRS, crc_errors); 2382 ICE_PF_STAT32(GLPRT_ILLERRC, illegal_bytes); 2383 ICE_PF_STAT32(GLPRT_MLFC, mac_local_faults); 2384 ICE_PF_STAT32(GLPRT_MRFC, mac_remote_faults); 2385 ICE_PF_STAT32(GLPRT_RLEC, rx_len_errors); 2386 ICE_PF_STAT32(GLPRT_RUC, rx_undersize); 2387 ICE_PF_STAT32(GLPRT_RFC, rx_fragments); 2388 ICE_PF_STAT32(GLPRT_ROC, rx_oversize); 2389 ICE_PF_STAT32(GLPRT_RJC, rx_jabber); 2390 2391 #undef ICE_PF_STAT40 2392 #undef ICE_PF_STAT32 2393 2394 sc->stats.offsets_loaded = true; 2395 } 2396 2397 /** 2398 * ice_reset_pf_stats - Reset port stats counters 2399 * @sc: Device private softc structure 2400 * 2401 * Reset software tracking values for statistics to zero, and indicate that 2402 * offsets haven't been loaded. Intended to be called after a device reset so 2403 * that statistics count from zero again. 2404 */ 2405 void 2406 ice_reset_pf_stats(struct ice_softc *sc) 2407 { 2408 memset(&sc->stats.prev, 0, sizeof(sc->stats.prev)); 2409 memset(&sc->stats.cur, 0, sizeof(sc->stats.cur)); 2410 sc->stats.offsets_loaded = false; 2411 } 2412 2413 /** 2414 * ice_sysctl_show_fw - sysctl callback to show firmware information 2415 * @oidp: sysctl oid structure 2416 * @arg1: pointer to private data structure 2417 * @arg2: unused 2418 * @req: sysctl request pointer 2419 * 2420 * Callback for the fw_version sysctl, to display the current firmware 2421 * information found at hardware init time. 2422 */ 2423 static int 2424 ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 2425 { 2426 struct ice_softc *sc = (struct ice_softc *)arg1; 2427 struct ice_hw *hw = &sc->hw; 2428 struct sbuf *sbuf; 2429 2430 UNREFERENCED_PARAMETER(oidp); 2431 UNREFERENCED_PARAMETER(arg2); 2432 2433 if (ice_driver_is_detaching(sc)) 2434 return (ESHUTDOWN); 2435 2436 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2437 ice_nvm_version_str(hw, sbuf); 2438 sbuf_finish(sbuf); 2439 sbuf_delete(sbuf); 2440 2441 return (0); 2442 } 2443 2444 /** 2445 * ice_sysctl_pba_number - sysctl callback to show PBA number 2446 * @oidp: sysctl oid structure 2447 * @arg1: pointer to private data structure 2448 * @arg2: unused 2449 * @req: sysctl request pointer 2450 * 2451 * Callback for the pba_number sysctl, used to read the Product Board Assembly 2452 * number for this device. 2453 */ 2454 static int 2455 ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS) 2456 { 2457 struct ice_softc *sc = (struct ice_softc *)arg1; 2458 struct ice_hw *hw = &sc->hw; 2459 device_t dev = sc->dev; 2460 u8 pba_string[32] = ""; 2461 enum ice_status status; 2462 2463 UNREFERENCED_PARAMETER(arg2); 2464 2465 if (ice_driver_is_detaching(sc)) 2466 return (ESHUTDOWN); 2467 2468 status = ice_read_pba_string(hw, pba_string, sizeof(pba_string)); 2469 if (status) { 2470 device_printf(dev, 2471 "%s: failed to read PBA string from NVM; status %s, aq_err %s\n", 2472 __func__, ice_status_str(status), 2473 ice_aq_str(hw->adminq.sq_last_status)); 2474 return (EIO); 2475 } 2476 2477 return sysctl_handle_string(oidp, pba_string, sizeof(pba_string), req); 2478 } 2479 2480 /** 2481 * ice_sysctl_pkg_version - sysctl to show the active package version info 2482 * @oidp: sysctl oid structure 2483 * @arg1: pointer to private data structure 2484 * @arg2: unused 2485 * @req: sysctl request pointer 2486 * 2487 * Callback for the pkg_version sysctl, to display the active DDP package name 2488 * and version information. 2489 */ 2490 static int 2491 ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS) 2492 { 2493 struct ice_softc *sc = (struct ice_softc *)arg1; 2494 struct ice_hw *hw = &sc->hw; 2495 struct sbuf *sbuf; 2496 2497 UNREFERENCED_PARAMETER(oidp); 2498 UNREFERENCED_PARAMETER(arg2); 2499 2500 if (ice_driver_is_detaching(sc)) 2501 return (ESHUTDOWN); 2502 2503 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2504 ice_active_pkg_version_str(hw, sbuf); 2505 sbuf_finish(sbuf); 2506 sbuf_delete(sbuf); 2507 2508 return (0); 2509 } 2510 2511 /** 2512 * ice_sysctl_os_pkg_version - sysctl to show the OS package version info 2513 * @oidp: sysctl oid structure 2514 * @arg1: pointer to private data structure 2515 * @arg2: unused 2516 * @req: sysctl request pointer 2517 * 2518 * Callback for the pkg_version sysctl, to display the OS DDP package name and 2519 * version info found in the ice_ddp module. 2520 */ 2521 static int 2522 ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS) 2523 { 2524 struct ice_softc *sc = (struct ice_softc *)arg1; 2525 struct ice_hw *hw = &sc->hw; 2526 struct sbuf *sbuf; 2527 2528 UNREFERENCED_PARAMETER(oidp); 2529 UNREFERENCED_PARAMETER(arg2); 2530 2531 if (ice_driver_is_detaching(sc)) 2532 return (ESHUTDOWN); 2533 2534 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2535 ice_os_pkg_version_str(hw, sbuf); 2536 sbuf_finish(sbuf); 2537 sbuf_delete(sbuf); 2538 2539 return (0); 2540 } 2541 2542 /** 2543 * ice_sysctl_current_speed - sysctl callback to show current link speed 2544 * @oidp: sysctl oid structure 2545 * @arg1: pointer to private data structure 2546 * @arg2: unused 2547 * @req: sysctl request pointer 2548 * 2549 * Callback for the current_speed sysctl, to display the string representing 2550 * the current link speed. 2551 */ 2552 static int 2553 ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2554 { 2555 struct ice_softc *sc = (struct ice_softc *)arg1; 2556 struct ice_hw *hw = &sc->hw; 2557 struct sbuf *sbuf; 2558 2559 UNREFERENCED_PARAMETER(oidp); 2560 UNREFERENCED_PARAMETER(arg2); 2561 2562 if (ice_driver_is_detaching(sc)) 2563 return (ESHUTDOWN); 2564 2565 sbuf = sbuf_new_for_sysctl(NULL, NULL, 10, req); 2566 sbuf_printf(sbuf, "%s", ice_aq_speed_to_str(hw->port_info)); 2567 sbuf_finish(sbuf); 2568 sbuf_delete(sbuf); 2569 2570 return (0); 2571 } 2572 2573 /** 2574 * @var phy_link_speeds 2575 * @brief PHY link speed conversion array 2576 * 2577 * Array of link speeds to convert ICE_PHY_TYPE_LOW and ICE_PHY_TYPE_HIGH into 2578 * link speeds used by the link speed sysctls. 2579 * 2580 * @remark these are based on the indices used in the BIT() macros for the 2581 * ICE_PHY_TYPE_LOW_* and ICE_PHY_TYPE_HIGH_* definitions. 2582 */ 2583 static const uint16_t phy_link_speeds[] = { 2584 ICE_AQ_LINK_SPEED_100MB, 2585 ICE_AQ_LINK_SPEED_100MB, 2586 ICE_AQ_LINK_SPEED_1000MB, 2587 ICE_AQ_LINK_SPEED_1000MB, 2588 ICE_AQ_LINK_SPEED_1000MB, 2589 ICE_AQ_LINK_SPEED_1000MB, 2590 ICE_AQ_LINK_SPEED_1000MB, 2591 ICE_AQ_LINK_SPEED_2500MB, 2592 ICE_AQ_LINK_SPEED_2500MB, 2593 ICE_AQ_LINK_SPEED_2500MB, 2594 ICE_AQ_LINK_SPEED_5GB, 2595 ICE_AQ_LINK_SPEED_5GB, 2596 ICE_AQ_LINK_SPEED_10GB, 2597 ICE_AQ_LINK_SPEED_10GB, 2598 ICE_AQ_LINK_SPEED_10GB, 2599 ICE_AQ_LINK_SPEED_10GB, 2600 ICE_AQ_LINK_SPEED_10GB, 2601 ICE_AQ_LINK_SPEED_10GB, 2602 ICE_AQ_LINK_SPEED_10GB, 2603 ICE_AQ_LINK_SPEED_25GB, 2604 ICE_AQ_LINK_SPEED_25GB, 2605 ICE_AQ_LINK_SPEED_25GB, 2606 ICE_AQ_LINK_SPEED_25GB, 2607 ICE_AQ_LINK_SPEED_25GB, 2608 ICE_AQ_LINK_SPEED_25GB, 2609 ICE_AQ_LINK_SPEED_25GB, 2610 ICE_AQ_LINK_SPEED_25GB, 2611 ICE_AQ_LINK_SPEED_25GB, 2612 ICE_AQ_LINK_SPEED_25GB, 2613 ICE_AQ_LINK_SPEED_25GB, 2614 ICE_AQ_LINK_SPEED_40GB, 2615 ICE_AQ_LINK_SPEED_40GB, 2616 ICE_AQ_LINK_SPEED_40GB, 2617 ICE_AQ_LINK_SPEED_40GB, 2618 ICE_AQ_LINK_SPEED_40GB, 2619 ICE_AQ_LINK_SPEED_40GB, 2620 ICE_AQ_LINK_SPEED_50GB, 2621 ICE_AQ_LINK_SPEED_50GB, 2622 ICE_AQ_LINK_SPEED_50GB, 2623 ICE_AQ_LINK_SPEED_50GB, 2624 ICE_AQ_LINK_SPEED_50GB, 2625 ICE_AQ_LINK_SPEED_50GB, 2626 ICE_AQ_LINK_SPEED_50GB, 2627 ICE_AQ_LINK_SPEED_50GB, 2628 ICE_AQ_LINK_SPEED_50GB, 2629 ICE_AQ_LINK_SPEED_50GB, 2630 ICE_AQ_LINK_SPEED_50GB, 2631 ICE_AQ_LINK_SPEED_50GB, 2632 ICE_AQ_LINK_SPEED_50GB, 2633 ICE_AQ_LINK_SPEED_50GB, 2634 ICE_AQ_LINK_SPEED_50GB, 2635 ICE_AQ_LINK_SPEED_100GB, 2636 ICE_AQ_LINK_SPEED_100GB, 2637 ICE_AQ_LINK_SPEED_100GB, 2638 ICE_AQ_LINK_SPEED_100GB, 2639 ICE_AQ_LINK_SPEED_100GB, 2640 ICE_AQ_LINK_SPEED_100GB, 2641 ICE_AQ_LINK_SPEED_100GB, 2642 ICE_AQ_LINK_SPEED_100GB, 2643 ICE_AQ_LINK_SPEED_100GB, 2644 ICE_AQ_LINK_SPEED_100GB, 2645 ICE_AQ_LINK_SPEED_100GB, 2646 ICE_AQ_LINK_SPEED_100GB, 2647 ICE_AQ_LINK_SPEED_100GB, 2648 /* These rates are for ICE_PHY_TYPE_HIGH_* */ 2649 ICE_AQ_LINK_SPEED_100GB, 2650 ICE_AQ_LINK_SPEED_100GB, 2651 ICE_AQ_LINK_SPEED_100GB, 2652 ICE_AQ_LINK_SPEED_100GB, 2653 ICE_AQ_LINK_SPEED_100GB 2654 }; 2655 2656 #define ICE_SYSCTL_HELP_ADVERTISE_SPEED \ 2657 "\nControl advertised link speed." \ 2658 "\nFlags:" \ 2659 "\n\t 0x0 - Auto" \ 2660 "\n\t 0x1 - 10 Mb" \ 2661 "\n\t 0x2 - 100 Mb" \ 2662 "\n\t 0x4 - 1G" \ 2663 "\n\t 0x8 - 2.5G" \ 2664 "\n\t 0x10 - 5G" \ 2665 "\n\t 0x20 - 10G" \ 2666 "\n\t 0x40 - 20G" \ 2667 "\n\t 0x80 - 25G" \ 2668 "\n\t 0x100 - 40G" \ 2669 "\n\t 0x200 - 50G" \ 2670 "\n\t 0x400 - 100G" \ 2671 "\n\t0x8000 - Unknown" \ 2672 "\n\t" \ 2673 "\nUse \"sysctl -x\" to view flags properly." 2674 2675 #define ICE_PHYS_100MB \ 2676 (ICE_PHY_TYPE_LOW_100BASE_TX | \ 2677 ICE_PHY_TYPE_LOW_100M_SGMII) 2678 #define ICE_PHYS_1000MB \ 2679 (ICE_PHY_TYPE_LOW_1000BASE_T | \ 2680 ICE_PHY_TYPE_LOW_1000BASE_SX | \ 2681 ICE_PHY_TYPE_LOW_1000BASE_LX | \ 2682 ICE_PHY_TYPE_LOW_1000BASE_KX | \ 2683 ICE_PHY_TYPE_LOW_1G_SGMII) 2684 #define ICE_PHYS_2500MB \ 2685 (ICE_PHY_TYPE_LOW_2500BASE_T | \ 2686 ICE_PHY_TYPE_LOW_2500BASE_X | \ 2687 ICE_PHY_TYPE_LOW_2500BASE_KX) 2688 #define ICE_PHYS_5GB \ 2689 (ICE_PHY_TYPE_LOW_5GBASE_T | \ 2690 ICE_PHY_TYPE_LOW_5GBASE_KR) 2691 #define ICE_PHYS_10GB \ 2692 (ICE_PHY_TYPE_LOW_10GBASE_T | \ 2693 ICE_PHY_TYPE_LOW_10G_SFI_DA | \ 2694 ICE_PHY_TYPE_LOW_10GBASE_SR | \ 2695 ICE_PHY_TYPE_LOW_10GBASE_LR | \ 2696 ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \ 2697 ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \ 2698 ICE_PHY_TYPE_LOW_10G_SFI_C2C) 2699 #define ICE_PHYS_25GB \ 2700 (ICE_PHY_TYPE_LOW_25GBASE_T | \ 2701 ICE_PHY_TYPE_LOW_25GBASE_CR | \ 2702 ICE_PHY_TYPE_LOW_25GBASE_CR_S | \ 2703 ICE_PHY_TYPE_LOW_25GBASE_CR1 | \ 2704 ICE_PHY_TYPE_LOW_25GBASE_SR | \ 2705 ICE_PHY_TYPE_LOW_25GBASE_LR | \ 2706 ICE_PHY_TYPE_LOW_25GBASE_KR | \ 2707 ICE_PHY_TYPE_LOW_25GBASE_KR_S | \ 2708 ICE_PHY_TYPE_LOW_25GBASE_KR1 | \ 2709 ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | \ 2710 ICE_PHY_TYPE_LOW_25G_AUI_C2C) 2711 #define ICE_PHYS_40GB \ 2712 (ICE_PHY_TYPE_LOW_40GBASE_CR4 | \ 2713 ICE_PHY_TYPE_LOW_40GBASE_SR4 | \ 2714 ICE_PHY_TYPE_LOW_40GBASE_LR4 | \ 2715 ICE_PHY_TYPE_LOW_40GBASE_KR4 | \ 2716 ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | \ 2717 ICE_PHY_TYPE_LOW_40G_XLAUI) 2718 #define ICE_PHYS_50GB \ 2719 (ICE_PHY_TYPE_LOW_50GBASE_CR2 | \ 2720 ICE_PHY_TYPE_LOW_50GBASE_SR2 | \ 2721 ICE_PHY_TYPE_LOW_50GBASE_LR2 | \ 2722 ICE_PHY_TYPE_LOW_50GBASE_KR2 | \ 2723 ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | \ 2724 ICE_PHY_TYPE_LOW_50G_LAUI2 | \ 2725 ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | \ 2726 ICE_PHY_TYPE_LOW_50G_AUI2 | \ 2727 ICE_PHY_TYPE_LOW_50GBASE_CP | \ 2728 ICE_PHY_TYPE_LOW_50GBASE_SR | \ 2729 ICE_PHY_TYPE_LOW_50GBASE_FR | \ 2730 ICE_PHY_TYPE_LOW_50GBASE_LR | \ 2731 ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 | \ 2732 ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | \ 2733 ICE_PHY_TYPE_LOW_50G_AUI1) 2734 #define ICE_PHYS_100GB_LOW \ 2735 (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \ 2736 ICE_PHY_TYPE_LOW_100GBASE_SR4 | \ 2737 ICE_PHY_TYPE_LOW_100GBASE_LR4 | \ 2738 ICE_PHY_TYPE_LOW_100GBASE_KR4 | \ 2739 ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \ 2740 ICE_PHY_TYPE_LOW_100G_CAUI4 | \ 2741 ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \ 2742 ICE_PHY_TYPE_LOW_100G_AUI4 | \ 2743 ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \ 2744 ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \ 2745 ICE_PHY_TYPE_LOW_100GBASE_CP2 | \ 2746 ICE_PHY_TYPE_LOW_100GBASE_SR2 | \ 2747 ICE_PHY_TYPE_LOW_100GBASE_DR) 2748 #define ICE_PHYS_100GB_HIGH \ 2749 (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \ 2750 ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | \ 2751 ICE_PHY_TYPE_HIGH_100G_CAUI2 | \ 2752 ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ 2753 ICE_PHY_TYPE_HIGH_100G_AUI2) 2754 2755 /** 2756 * ice_aq_phy_types_to_sysctl_speeds - Convert the PHY Types to speeds 2757 * @phy_type_low: lower 64-bit PHY Type bitmask 2758 * @phy_type_high: upper 64-bit PHY Type bitmask 2759 * 2760 * Convert the PHY Type fields from Get PHY Abilities and Set PHY Config into 2761 * link speed flags. If phy_type_high has an unknown PHY type, then the return 2762 * value will include the "ICE_AQ_LINK_SPEED_UNKNOWN" flag as well. 2763 */ 2764 static u16 2765 ice_aq_phy_types_to_sysctl_speeds(u64 phy_type_low, u64 phy_type_high) 2766 { 2767 u16 sysctl_speeds = 0; 2768 int bit; 2769 2770 /* coverity[address_of] */ 2771 for_each_set_bit(bit, &phy_type_low, 64) 2772 sysctl_speeds |= phy_link_speeds[bit]; 2773 2774 /* coverity[address_of] */ 2775 for_each_set_bit(bit, &phy_type_high, 64) { 2776 if ((bit + 64) < (int)ARRAY_SIZE(phy_link_speeds)) 2777 sysctl_speeds |= phy_link_speeds[bit + 64]; 2778 else 2779 sysctl_speeds |= ICE_AQ_LINK_SPEED_UNKNOWN; 2780 } 2781 2782 return (sysctl_speeds); 2783 } 2784 2785 /** 2786 * ice_sysctl_speeds_to_aq_phy_types - Convert sysctl speed flags to AQ PHY flags 2787 * @sysctl_speeds: 16-bit sysctl speeds or AQ_LINK_SPEED flags 2788 * @phy_type_low: output parameter for lower AQ PHY flags 2789 * @phy_type_high: output parameter for higher AQ PHY flags 2790 * 2791 * Converts the given link speed flags into AQ PHY type flag sets appropriate 2792 * for use in a Set PHY Config command. 2793 */ 2794 static void 2795 ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, 2796 u64 *phy_type_high) 2797 { 2798 *phy_type_low = 0, *phy_type_high = 0; 2799 2800 if (sysctl_speeds & ICE_AQ_LINK_SPEED_100MB) 2801 *phy_type_low |= ICE_PHYS_100MB; 2802 if (sysctl_speeds & ICE_AQ_LINK_SPEED_1000MB) 2803 *phy_type_low |= ICE_PHYS_1000MB; 2804 if (sysctl_speeds & ICE_AQ_LINK_SPEED_2500MB) 2805 *phy_type_low |= ICE_PHYS_2500MB; 2806 if (sysctl_speeds & ICE_AQ_LINK_SPEED_5GB) 2807 *phy_type_low |= ICE_PHYS_5GB; 2808 if (sysctl_speeds & ICE_AQ_LINK_SPEED_10GB) 2809 *phy_type_low |= ICE_PHYS_10GB; 2810 if (sysctl_speeds & ICE_AQ_LINK_SPEED_25GB) 2811 *phy_type_low |= ICE_PHYS_25GB; 2812 if (sysctl_speeds & ICE_AQ_LINK_SPEED_40GB) 2813 *phy_type_low |= ICE_PHYS_40GB; 2814 if (sysctl_speeds & ICE_AQ_LINK_SPEED_50GB) 2815 *phy_type_low |= ICE_PHYS_50GB; 2816 if (sysctl_speeds & ICE_AQ_LINK_SPEED_100GB) { 2817 *phy_type_low |= ICE_PHYS_100GB_LOW; 2818 *phy_type_high |= ICE_PHYS_100GB_HIGH; 2819 } 2820 } 2821 2822 /** 2823 * ice_intersect_media_types_with_caps - Restrict input AQ PHY flags 2824 * @sc: driver private structure 2825 * @sysctl_speeds: current SW configuration of PHY types 2826 * @phy_type_low: input/output flag set for low PHY types 2827 * @phy_type_high: input/output flag set for high PHY types 2828 * 2829 * Intersects the input PHY flags with PHY flags retrieved from the adapter to 2830 * ensure the flags are compatible. 2831 * 2832 * @returns 0 on success, EIO if an AQ command fails, or EINVAL if input PHY 2833 * types have no intersection with TOPO_CAPS and the adapter is in non-lenient 2834 * mode 2835 */ 2836 static int 2837 ice_intersect_media_types_with_caps(struct ice_softc *sc, u16 sysctl_speeds, 2838 u64 *phy_type_low, u64 *phy_type_high) 2839 { 2840 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 2841 struct ice_port_info *pi = sc->hw.port_info; 2842 device_t dev = sc->dev; 2843 enum ice_status status; 2844 u64 temp_phy_low, temp_phy_high; 2845 u64 final_phy_low, final_phy_high; 2846 u16 topo_speeds; 2847 2848 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 2849 &pcaps, NULL); 2850 if (status != ICE_SUCCESS) { 2851 device_printf(dev, 2852 "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", 2853 __func__, ice_status_str(status), 2854 ice_aq_str(sc->hw.adminq.sq_last_status)); 2855 return (EIO); 2856 } 2857 2858 final_phy_low = le64toh(pcaps.phy_type_low); 2859 final_phy_high = le64toh(pcaps.phy_type_high); 2860 2861 topo_speeds = ice_aq_phy_types_to_sysctl_speeds(final_phy_low, 2862 final_phy_high); 2863 2864 /* 2865 * If the user specifies a subset of speeds the media is already 2866 * capable of supporting, then we're good to go. 2867 */ 2868 if ((sysctl_speeds & topo_speeds) == sysctl_speeds) 2869 goto intersect_final; 2870 2871 temp_phy_low = final_phy_low; 2872 temp_phy_high = final_phy_high; 2873 /* 2874 * Otherwise, we'll have to use the superset if Lenient Mode is 2875 * supported. 2876 */ 2877 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) { 2878 /* 2879 * Start with masks that _don't_ include the PHY types 2880 * discovered by the TOPO_CAP. 2881 */ 2882 ice_sysctl_speeds_to_aq_phy_types(topo_speeds, &final_phy_low, 2883 &final_phy_high); 2884 final_phy_low = ~final_phy_low; 2885 final_phy_high = ~final_phy_high; 2886 2887 /* Get the PHY types the NVM says we can support */ 2888 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, 2889 &pcaps, NULL); 2890 if (status != ICE_SUCCESS) { 2891 device_printf(dev, 2892 "%s: ice_aq_get_phy_caps (NVM_CAP) failed; status %s, aq_err %s\n", 2893 __func__, ice_status_str(status), 2894 ice_aq_str(sc->hw.adminq.sq_last_status)); 2895 return (status); 2896 } 2897 2898 /* 2899 * Clear out the unsupported PHY types, including those 2900 * from TOPO_CAP. 2901 */ 2902 final_phy_low &= le64toh(pcaps.phy_type_low); 2903 final_phy_high &= le64toh(pcaps.phy_type_high); 2904 /* 2905 * Include PHY types from TOPO_CAP (which may be a subset 2906 * of the types the NVM specifies). 2907 */ 2908 final_phy_low |= temp_phy_low; 2909 final_phy_high |= temp_phy_high; 2910 } 2911 2912 intersect_final: 2913 2914 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) 2915 ice_apply_supported_speed_filter(&final_phy_low, &final_phy_high); 2916 2917 ice_sysctl_speeds_to_aq_phy_types(sysctl_speeds, &temp_phy_low, 2918 &temp_phy_high); 2919 2920 final_phy_low &= temp_phy_low; 2921 final_phy_high &= temp_phy_high; 2922 2923 if (final_phy_low == 0 && final_phy_high == 0) { 2924 device_printf(dev, 2925 "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 2926 return (EINVAL); 2927 } 2928 2929 /* Overwrite input phy_type values and return */ 2930 *phy_type_low = final_phy_low; 2931 *phy_type_high = final_phy_high; 2932 2933 return (0); 2934 } 2935 2936 /** 2937 * ice_get_auto_speeds - Get PHY type flags for "auto" speed 2938 * @sc: driver private structure 2939 * @phy_type_low: output low PHY type flags 2940 * @phy_type_high: output high PHY type flags 2941 * 2942 * Retrieves a suitable set of PHY type flags to use for an "auto" speed 2943 * setting by either using the NVM default overrides for speed, or retrieving 2944 * a default from the adapter using Get PHY capabilities in TOPO_CAPS mode. 2945 * 2946 * @returns 0 on success or EIO on AQ command failure 2947 */ 2948 static int 2949 ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low, 2950 u64 *phy_type_high) 2951 { 2952 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 2953 struct ice_hw *hw = &sc->hw; 2954 struct ice_port_info *pi = hw->port_info; 2955 device_t dev = sc->dev; 2956 enum ice_status status; 2957 2958 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_DEFAULT_OVERRIDE)) { 2959 /* copy over speed settings from LDO TLV */ 2960 *phy_type_low = CPU_TO_LE64(sc->ldo_tlv.phy_type_low); 2961 *phy_type_high = CPU_TO_LE64(sc->ldo_tlv.phy_type_high); 2962 } else { 2963 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 2964 &pcaps, NULL); 2965 if (status != ICE_SUCCESS) { 2966 device_printf(dev, 2967 "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", 2968 __func__, ice_status_str(status), 2969 ice_aq_str(hw->adminq.sq_last_status)); 2970 return (EIO); 2971 } 2972 2973 *phy_type_low = le64toh(pcaps.phy_type_low); 2974 *phy_type_high = le64toh(pcaps.phy_type_high); 2975 } 2976 2977 return (0); 2978 } 2979 2980 /** 2981 * ice_sysctl_advertise_speed - Display/change link speeds supported by port 2982 * @oidp: sysctl oid structure 2983 * @arg1: pointer to private data structure 2984 * @arg2: unused 2985 * @req: sysctl request pointer 2986 * 2987 * On read: Displays the currently supported speeds 2988 * On write: Sets the device's supported speeds 2989 * Valid input flags: see ICE_SYSCTL_HELP_ADVERTISE_SPEED 2990 */ 2991 static int 2992 ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS) 2993 { 2994 struct ice_softc *sc = (struct ice_softc *)arg1; 2995 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 2996 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 2997 struct ice_hw *hw = &sc->hw; 2998 struct ice_port_info *pi = hw->port_info; 2999 device_t dev = sc->dev; 3000 enum ice_status status; 3001 u64 phy_low, phy_high; 3002 u16 sysctl_speeds = 0; 3003 int error = 0; 3004 3005 UNREFERENCED_PARAMETER(arg2); 3006 3007 if (ice_driver_is_detaching(sc)) 3008 return (ESHUTDOWN); 3009 3010 /* Get the current speeds from the adapter's "active" configuration. */ 3011 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 3012 &pcaps, NULL); 3013 if (status != ICE_SUCCESS) { 3014 device_printf(dev, 3015 "%s: ice_aq_get_phy_caps (SW_CFG) failed; status %s, aq_err %s\n", 3016 __func__, ice_status_str(status), 3017 ice_aq_str(hw->adminq.sq_last_status)); 3018 return (EIO); 3019 } 3020 3021 phy_low = le64toh(pcaps.phy_type_low); 3022 phy_high = le64toh(pcaps.phy_type_high); 3023 sysctl_speeds = ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); 3024 3025 error = sysctl_handle_16(oidp, &sysctl_speeds, 0, req); 3026 if ((error) || (req->newptr == NULL)) 3027 return (error); 3028 3029 if (sysctl_speeds > 0x7FF) { 3030 device_printf(dev, 3031 "%s: \"%u\" is outside of the range of acceptable values.\n", 3032 __func__, sysctl_speeds); 3033 return (EINVAL); 3034 } 3035 3036 /* 0 is treated as "Auto"; the driver will handle selecting the correct speeds, 3037 * or apply an override if one is specified in the NVM. 3038 */ 3039 if (sysctl_speeds == 0) { 3040 error = ice_get_auto_speeds(sc, &phy_low, &phy_high); 3041 if (error) 3042 /* Function already prints appropriate error message */ 3043 return (error); 3044 } else { 3045 error = ice_intersect_media_types_with_caps(sc, sysctl_speeds, 3046 &phy_low, &phy_high); 3047 if (error) 3048 /* Function already prints appropriate error message */ 3049 return (error); 3050 } 3051 sysctl_speeds = ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); 3052 3053 /* Cache new user setting for speeds */ 3054 pi->phy.curr_user_speed_req = sysctl_speeds; 3055 3056 /* Setup new PHY config with new input PHY types */ 3057 ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); 3058 3059 cfg.phy_type_low = phy_low; 3060 cfg.phy_type_high = phy_high; 3061 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 3062 3063 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3064 if (status != ICE_SUCCESS) { 3065 /* Don't indicate failure if there's no media in the port -- the sysctl 3066 * handler has saved the value and will apply it when media is inserted. 3067 */ 3068 if (status == ICE_ERR_AQ_ERROR && 3069 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 3070 device_printf(dev, 3071 "%s: Setting will be applied when media is inserted\n", __func__); 3072 return (0); 3073 } else { 3074 device_printf(dev, 3075 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 3076 __func__, ice_status_str(status), 3077 ice_aq_str(hw->adminq.sq_last_status)); 3078 return (EIO); 3079 } 3080 } 3081 3082 return (0); 3083 } 3084 3085 #define ICE_SYSCTL_HELP_FEC_CONFIG \ 3086 "\nDisplay or set the port's requested FEC mode." \ 3087 "\n\tauto - " ICE_FEC_STRING_AUTO \ 3088 "\n\tfc - " ICE_FEC_STRING_BASER \ 3089 "\n\trs - " ICE_FEC_STRING_RS \ 3090 "\n\tnone - " ICE_FEC_STRING_NONE \ 3091 "\nEither of the left or right strings above can be used to set the requested mode." 3092 3093 /** 3094 * ice_sysctl_fec_config - Display/change the configured FEC mode 3095 * @oidp: sysctl oid structure 3096 * @arg1: pointer to private data structure 3097 * @arg2: unused 3098 * @req: sysctl request pointer 3099 * 3100 * On read: Displays the configured FEC mode 3101 * On write: Sets the device's FEC mode to the input string, if it's valid. 3102 * Valid input strings: see ICE_SYSCTL_HELP_FEC_CONFIG 3103 */ 3104 static int 3105 ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS) 3106 { 3107 struct ice_softc *sc = (struct ice_softc *)arg1; 3108 struct ice_port_info *pi = sc->hw.port_info; 3109 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3110 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3111 struct ice_hw *hw = &sc->hw; 3112 enum ice_fec_mode new_mode; 3113 enum ice_status status; 3114 device_t dev = sc->dev; 3115 char req_fec[32]; 3116 int error = 0; 3117 3118 UNREFERENCED_PARAMETER(arg2); 3119 3120 if (ice_driver_is_detaching(sc)) 3121 return (ESHUTDOWN); 3122 3123 bzero(req_fec, sizeof(req_fec)); 3124 strlcpy(req_fec, ice_requested_fec_mode(pi), sizeof(req_fec)); 3125 3126 error = sysctl_handle_string(oidp, req_fec, sizeof(req_fec), req); 3127 if ((error) || (req->newptr == NULL)) 3128 return (error); 3129 3130 if (strcmp(req_fec, "auto") == 0 || 3131 strcmp(req_fec, ice_fec_str(ICE_FEC_AUTO)) == 0) { 3132 new_mode = ICE_FEC_AUTO; 3133 } else if (strcmp(req_fec, "fc") == 0 || 3134 strcmp(req_fec, ice_fec_str(ICE_FEC_BASER)) == 0) { 3135 new_mode = ICE_FEC_BASER; 3136 } else if (strcmp(req_fec, "rs") == 0 || 3137 strcmp(req_fec, ice_fec_str(ICE_FEC_RS)) == 0) { 3138 new_mode = ICE_FEC_RS; 3139 } else if (strcmp(req_fec, "none") == 0 || 3140 strcmp(req_fec, ice_fec_str(ICE_FEC_NONE)) == 0) { 3141 new_mode = ICE_FEC_NONE; 3142 } else { 3143 device_printf(dev, 3144 "%s: \"%s\" is not a valid FEC mode\n", 3145 __func__, req_fec); 3146 return (EINVAL); 3147 } 3148 3149 /* Cache user FEC mode for later link ups */ 3150 pi->phy.curr_user_fec_req = new_mode; 3151 3152 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 3153 &pcaps, NULL); 3154 if (status != ICE_SUCCESS) { 3155 device_printf(dev, 3156 "%s: ice_aq_get_phy_caps failed (SW_CFG); status %s, aq_err %s\n", 3157 __func__, ice_status_str(status), 3158 ice_aq_str(hw->adminq.sq_last_status)); 3159 return (EIO); 3160 } 3161 3162 ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); 3163 3164 /* Get link_fec_opt/AUTO_FEC mode from TOPO caps for base for new FEC mode */ 3165 memset(&pcaps, 0, sizeof(pcaps)); 3166 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 3167 &pcaps, NULL); 3168 if (status != ICE_SUCCESS) { 3169 device_printf(dev, 3170 "%s: ice_aq_get_phy_caps failed (TOPO_CAP); status %s, aq_err %s\n", 3171 __func__, ice_status_str(status), 3172 ice_aq_str(hw->adminq.sq_last_status)); 3173 return (EIO); 3174 } 3175 3176 /* Configure new FEC options using TOPO caps */ 3177 cfg.link_fec_opt = pcaps.link_fec_options; 3178 cfg.caps &= ~ICE_AQ_PHY_ENA_AUTO_FEC; 3179 if (pcaps.caps & ICE_AQC_PHY_EN_AUTO_FEC) 3180 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_FEC; 3181 3182 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_DEFAULT_OVERRIDE) && 3183 new_mode == ICE_FEC_AUTO) { 3184 /* copy over FEC settings from LDO TLV */ 3185 cfg.link_fec_opt = sc->ldo_tlv.fec_options; 3186 } else { 3187 ice_cfg_phy_fec(pi, &cfg, new_mode); 3188 3189 /* Check if the new mode is valid, and exit with an error if not */ 3190 if (cfg.link_fec_opt && 3191 !(cfg.link_fec_opt & pcaps.link_fec_options)) { 3192 device_printf(dev, 3193 "%s: The requested FEC mode, %s, is not supported by current media\n", 3194 __func__, ice_fec_str(new_mode)); 3195 return (ENOTSUP); 3196 } 3197 } 3198 3199 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3200 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3201 if (status != ICE_SUCCESS) { 3202 /* Don't indicate failure if there's no media in the port -- the sysctl 3203 * handler has saved the value and will apply it when media is inserted. 3204 */ 3205 if (status == ICE_ERR_AQ_ERROR && 3206 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 3207 device_printf(dev, 3208 "%s: Setting will be applied when media is inserted\n", __func__); 3209 return (0); 3210 } else { 3211 device_printf(dev, 3212 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 3213 __func__, ice_status_str(status), 3214 ice_aq_str(hw->adminq.sq_last_status)); 3215 return (EIO); 3216 } 3217 } 3218 3219 return (0); 3220 } 3221 3222 /** 3223 * ice_sysctl_negotiated_fec - Display the negotiated FEC mode on the link 3224 * @oidp: sysctl oid structure 3225 * @arg1: pointer to private data structure 3226 * @arg2: unused 3227 * @req: sysctl request pointer 3228 * 3229 * On read: Displays the negotiated FEC mode, in a string 3230 */ 3231 static int 3232 ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS) 3233 { 3234 struct ice_softc *sc = (struct ice_softc *)arg1; 3235 struct ice_hw *hw = &sc->hw; 3236 char neg_fec[32]; 3237 int error; 3238 3239 UNREFERENCED_PARAMETER(arg2); 3240 3241 if (ice_driver_is_detaching(sc)) 3242 return (ESHUTDOWN); 3243 3244 /* Copy const string into a buffer to drop const qualifier */ 3245 bzero(neg_fec, sizeof(neg_fec)); 3246 strlcpy(neg_fec, ice_negotiated_fec_mode(hw->port_info), sizeof(neg_fec)); 3247 3248 error = sysctl_handle_string(oidp, neg_fec, 0, req); 3249 if (req->newptr != NULL) 3250 return (EPERM); 3251 3252 return (error); 3253 } 3254 3255 #define ICE_SYSCTL_HELP_FC_CONFIG \ 3256 "\nDisplay or set the port's advertised flow control mode.\n" \ 3257 "\t0 - " ICE_FC_STRING_NONE \ 3258 "\n\t1 - " ICE_FC_STRING_RX \ 3259 "\n\t2 - " ICE_FC_STRING_TX \ 3260 "\n\t3 - " ICE_FC_STRING_FULL \ 3261 "\nEither the numbers or the strings above can be used to set the advertised mode." 3262 3263 /** 3264 * ice_sysctl_fc_config - Display/change the advertised flow control mode 3265 * @oidp: sysctl oid structure 3266 * @arg1: pointer to private data structure 3267 * @arg2: unused 3268 * @req: sysctl request pointer 3269 * 3270 * On read: Displays the configured flow control mode 3271 * On write: Sets the device's flow control mode to the input, if it's valid. 3272 * Valid input strings: see ICE_SYSCTL_HELP_FC_CONFIG 3273 */ 3274 static int 3275 ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS) 3276 { 3277 struct ice_softc *sc = (struct ice_softc *)arg1; 3278 struct ice_port_info *pi = sc->hw.port_info; 3279 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3280 enum ice_fc_mode old_mode, new_mode; 3281 struct ice_hw *hw = &sc->hw; 3282 device_t dev = sc->dev; 3283 enum ice_status status; 3284 int error = 0, fc_num; 3285 bool mode_set = false; 3286 struct sbuf buf; 3287 char *fc_str_end; 3288 char fc_str[32]; 3289 u8 aq_failures; 3290 3291 UNREFERENCED_PARAMETER(arg2); 3292 3293 if (ice_driver_is_detaching(sc)) 3294 return (ESHUTDOWN); 3295 3296 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 3297 &pcaps, NULL); 3298 if (status != ICE_SUCCESS) { 3299 device_printf(dev, 3300 "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", 3301 __func__, ice_status_str(status), 3302 ice_aq_str(hw->adminq.sq_last_status)); 3303 return (EIO); 3304 } 3305 3306 /* Convert HW response format to SW enum value */ 3307 if ((pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && 3308 (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) 3309 old_mode = ICE_FC_FULL; 3310 else if (pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3311 old_mode = ICE_FC_TX_PAUSE; 3312 else if (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3313 old_mode = ICE_FC_RX_PAUSE; 3314 else 3315 old_mode = ICE_FC_NONE; 3316 3317 /* Create "old" string for output */ 3318 bzero(fc_str, sizeof(fc_str)); 3319 sbuf_new_for_sysctl(&buf, fc_str, sizeof(fc_str), req); 3320 sbuf_printf(&buf, "%d<%s>", old_mode, ice_fc_str(old_mode)); 3321 sbuf_finish(&buf); 3322 sbuf_delete(&buf); 3323 3324 error = sysctl_handle_string(oidp, fc_str, sizeof(fc_str), req); 3325 if ((error) || (req->newptr == NULL)) 3326 return (error); 3327 3328 /* Try to parse input as a string, first */ 3329 if (strcasecmp(ice_fc_str(ICE_FC_FULL), fc_str) == 0) { 3330 new_mode = ICE_FC_FULL; 3331 mode_set = true; 3332 } 3333 else if (strcasecmp(ice_fc_str(ICE_FC_TX_PAUSE), fc_str) == 0) { 3334 new_mode = ICE_FC_TX_PAUSE; 3335 mode_set = true; 3336 } 3337 else if (strcasecmp(ice_fc_str(ICE_FC_RX_PAUSE), fc_str) == 0) { 3338 new_mode = ICE_FC_RX_PAUSE; 3339 mode_set = true; 3340 } 3341 else if (strcasecmp(ice_fc_str(ICE_FC_NONE), fc_str) == 0) { 3342 new_mode = ICE_FC_NONE; 3343 mode_set = true; 3344 } 3345 3346 /* 3347 * Then check if it's an integer, for compatibility with the method 3348 * used in older drivers. 3349 */ 3350 if (!mode_set) { 3351 fc_num = strtol(fc_str, &fc_str_end, 0); 3352 if (fc_str_end == fc_str) 3353 fc_num = -1; 3354 switch (fc_num) { 3355 case 3: 3356 new_mode = ICE_FC_FULL; 3357 break; 3358 case 2: 3359 new_mode = ICE_FC_TX_PAUSE; 3360 break; 3361 case 1: 3362 new_mode = ICE_FC_RX_PAUSE; 3363 break; 3364 case 0: 3365 new_mode = ICE_FC_NONE; 3366 break; 3367 default: 3368 device_printf(dev, 3369 "%s: \"%s\" is not a valid flow control mode\n", 3370 __func__, fc_str); 3371 return (EINVAL); 3372 } 3373 } 3374 3375 /* Finally, set the flow control mode in FW */ 3376 hw->port_info->fc.req_mode = new_mode; 3377 status = ice_set_fc(pi, &aq_failures, true); 3378 if (status != ICE_SUCCESS) { 3379 /* Don't indicate failure if there's no media in the port -- the sysctl 3380 * handler has saved the value and will apply it when media is inserted. 3381 */ 3382 if (aq_failures == ICE_SET_FC_AQ_FAIL_SET && 3383 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 3384 device_printf(dev, 3385 "%s: Setting will be applied when media is inserted\n", __func__); 3386 return (0); 3387 } else { 3388 device_printf(dev, 3389 "%s: ice_set_fc AQ failure = %d\n", __func__, aq_failures); 3390 return (EIO); 3391 } 3392 } 3393 3394 return (0); 3395 } 3396 3397 /** 3398 * ice_sysctl_negotiated_fc - Display currently negotiated FC mode 3399 * @oidp: sysctl oid structure 3400 * @arg1: pointer to private data structure 3401 * @arg2: unused 3402 * @req: sysctl request pointer 3403 * 3404 * On read: Displays the currently negotiated flow control settings. 3405 * 3406 * If link is not established, this will report ICE_FC_NONE, as no flow 3407 * control is negotiated while link is down. 3408 */ 3409 static int 3410 ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS) 3411 { 3412 struct ice_softc *sc = (struct ice_softc *)arg1; 3413 struct ice_port_info *pi = sc->hw.port_info; 3414 const char *negotiated_fc; 3415 3416 UNREFERENCED_PARAMETER(arg2); 3417 3418 if (ice_driver_is_detaching(sc)) 3419 return (ESHUTDOWN); 3420 3421 negotiated_fc = ice_flowcontrol_mode(pi); 3422 3423 return sysctl_handle_string(oidp, __DECONST(char *, negotiated_fc), 0, req); 3424 } 3425 3426 /** 3427 * __ice_sysctl_phy_type_handler - Display/change supported PHY types/speeds 3428 * @oidp: sysctl oid structure 3429 * @arg1: pointer to private data structure 3430 * @arg2: unused 3431 * @req: sysctl request pointer 3432 * @is_phy_type_high: if true, handle the high PHY type instead of the low PHY type 3433 * 3434 * Private handler for phy_type_high and phy_type_low sysctls. 3435 */ 3436 static int 3437 __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high) 3438 { 3439 struct ice_softc *sc = (struct ice_softc *)arg1; 3440 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3441 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3442 struct ice_hw *hw = &sc->hw; 3443 device_t dev = sc->dev; 3444 enum ice_status status; 3445 uint64_t types; 3446 int error = 0; 3447 3448 UNREFERENCED_PARAMETER(arg2); 3449 3450 if (ice_driver_is_detaching(sc)) 3451 return (ESHUTDOWN); 3452 3453 status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_SW_CFG, 3454 &pcaps, NULL); 3455 if (status != ICE_SUCCESS) { 3456 device_printf(dev, 3457 "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", 3458 __func__, ice_status_str(status), 3459 ice_aq_str(hw->adminq.sq_last_status)); 3460 return (EIO); 3461 } 3462 3463 if (is_phy_type_high) 3464 types = pcaps.phy_type_high; 3465 else 3466 types = pcaps.phy_type_low; 3467 3468 error = sysctl_handle_64(oidp, &types, sizeof(types), req); 3469 if ((error) || (req->newptr == NULL)) 3470 return (error); 3471 3472 ice_copy_phy_caps_to_cfg(hw->port_info, &pcaps, &cfg); 3473 3474 if (is_phy_type_high) 3475 cfg.phy_type_high = types & hw->port_info->phy.phy_type_high; 3476 else 3477 cfg.phy_type_low = types & hw->port_info->phy.phy_type_low; 3478 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3479 3480 status = ice_aq_set_phy_cfg(hw, hw->port_info, &cfg, NULL); 3481 if (status != ICE_SUCCESS) { 3482 device_printf(dev, 3483 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 3484 __func__, ice_status_str(status), 3485 ice_aq_str(hw->adminq.sq_last_status)); 3486 return (EIO); 3487 } 3488 3489 return (0); 3490 3491 } 3492 3493 /** 3494 * ice_sysctl_phy_type_low - Display/change supported lower PHY types/speeds 3495 * @oidp: sysctl oid structure 3496 * @arg1: pointer to private data structure 3497 * @arg2: unused 3498 * @req: sysctl request pointer 3499 * 3500 * On read: Displays the currently supported lower PHY types 3501 * On write: Sets the device's supported low PHY types 3502 */ 3503 static int 3504 ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS) 3505 { 3506 return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, false); 3507 } 3508 3509 /** 3510 * ice_sysctl_phy_type_high - Display/change supported higher PHY types/speeds 3511 * @oidp: sysctl oid structure 3512 * @arg1: pointer to private data structure 3513 * @arg2: unused 3514 * @req: sysctl request pointer 3515 * 3516 * On read: Displays the currently supported higher PHY types 3517 * On write: Sets the device's supported high PHY types 3518 */ 3519 static int 3520 ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS) 3521 { 3522 return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, true); 3523 } 3524 3525 /** 3526 * ice_sysctl_phy_caps - Display response from Get PHY abililties 3527 * @oidp: sysctl oid structure 3528 * @arg1: pointer to private data structure 3529 * @arg2: unused 3530 * @req: sysctl request pointer 3531 * @report_mode: the mode to report 3532 * 3533 * On read: Display the response from Get PHY abillities with the given report 3534 * mode. 3535 */ 3536 static int 3537 ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode) 3538 { 3539 struct ice_softc *sc = (struct ice_softc *)arg1; 3540 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3541 struct ice_hw *hw = &sc->hw; 3542 struct ice_port_info *pi = hw->port_info; 3543 device_t dev = sc->dev; 3544 enum ice_status status; 3545 int error; 3546 3547 UNREFERENCED_PARAMETER(arg2); 3548 3549 error = priv_check(curthread, PRIV_DRIVER); 3550 if (error) 3551 return (error); 3552 3553 if (ice_driver_is_detaching(sc)) 3554 return (ESHUTDOWN); 3555 3556 status = ice_aq_get_phy_caps(pi, true, report_mode, &pcaps, NULL); 3557 if (status != ICE_SUCCESS) { 3558 device_printf(dev, 3559 "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", 3560 __func__, ice_status_str(status), 3561 ice_aq_str(hw->adminq.sq_last_status)); 3562 return (EIO); 3563 } 3564 3565 error = sysctl_handle_opaque(oidp, &pcaps, sizeof(pcaps), req); 3566 if (req->newptr != NULL) 3567 return (EPERM); 3568 3569 return (error); 3570 } 3571 3572 /** 3573 * ice_sysctl_phy_sw_caps - Display response from Get PHY abililties 3574 * @oidp: sysctl oid structure 3575 * @arg1: pointer to private data structure 3576 * @arg2: unused 3577 * @req: sysctl request pointer 3578 * 3579 * On read: Display the response from Get PHY abillities reporting the last 3580 * software configuration. 3581 */ 3582 static int 3583 ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS) 3584 { 3585 return ice_sysctl_phy_caps(oidp, arg1, arg2, req, 3586 ICE_AQC_REPORT_SW_CFG); 3587 } 3588 3589 /** 3590 * ice_sysctl_phy_nvm_caps - Display response from Get PHY abililties 3591 * @oidp: sysctl oid structure 3592 * @arg1: pointer to private data structure 3593 * @arg2: unused 3594 * @req: sysctl request pointer 3595 * 3596 * On read: Display the response from Get PHY abillities reporting the NVM 3597 * configuration. 3598 */ 3599 static int 3600 ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS) 3601 { 3602 return ice_sysctl_phy_caps(oidp, arg1, arg2, req, 3603 ICE_AQC_REPORT_NVM_CAP); 3604 } 3605 3606 /** 3607 * ice_sysctl_phy_topo_caps - Display response from Get PHY abililties 3608 * @oidp: sysctl oid structure 3609 * @arg1: pointer to private data structure 3610 * @arg2: unused 3611 * @req: sysctl request pointer 3612 * 3613 * On read: Display the response from Get PHY abillities reporting the 3614 * topology configuration. 3615 */ 3616 static int 3617 ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS) 3618 { 3619 return ice_sysctl_phy_caps(oidp, arg1, arg2, req, 3620 ICE_AQC_REPORT_TOPO_CAP); 3621 } 3622 3623 /** 3624 * ice_sysctl_phy_link_status - Display response from Get Link Status 3625 * @oidp: sysctl oid structure 3626 * @arg1: pointer to private data structure 3627 * @arg2: unused 3628 * @req: sysctl request pointer 3629 * 3630 * On read: Display the response from firmware for the Get Link Status 3631 * request. 3632 */ 3633 static int 3634 ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS) 3635 { 3636 struct ice_aqc_get_link_status_data link_data = { 0 }; 3637 struct ice_softc *sc = (struct ice_softc *)arg1; 3638 struct ice_hw *hw = &sc->hw; 3639 struct ice_port_info *pi = hw->port_info; 3640 struct ice_aqc_get_link_status *resp; 3641 struct ice_aq_desc desc; 3642 device_t dev = sc->dev; 3643 enum ice_status status; 3644 int error; 3645 3646 UNREFERENCED_PARAMETER(arg2); 3647 3648 /* 3649 * Ensure that only contexts with driver privilege are allowed to 3650 * access this information 3651 */ 3652 error = priv_check(curthread, PRIV_DRIVER); 3653 if (error) 3654 return (error); 3655 3656 if (ice_driver_is_detaching(sc)) 3657 return (ESHUTDOWN); 3658 3659 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 3660 resp = &desc.params.get_link_status; 3661 resp->lport_num = pi->lport; 3662 3663 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), NULL); 3664 if (status != ICE_SUCCESS) { 3665 device_printf(dev, 3666 "%s: ice_aq_send_cmd failed; status %s, aq_err %s\n", 3667 __func__, ice_status_str(status), 3668 ice_aq_str(hw->adminq.sq_last_status)); 3669 return (EIO); 3670 } 3671 3672 error = sysctl_handle_opaque(oidp, &link_data, sizeof(link_data), req); 3673 if (req->newptr != NULL) 3674 return (EPERM); 3675 3676 return (error); 3677 } 3678 3679 /** 3680 * ice_sysctl_fw_cur_lldp_persist_status - Display current FW LLDP status 3681 * @oidp: sysctl oid structure 3682 * @arg1: pointer to private softc structure 3683 * @arg2: unused 3684 * @req: sysctl request pointer 3685 * 3686 * On read: Displays current persistent LLDP status. 3687 */ 3688 static int 3689 ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS) 3690 { 3691 struct ice_softc *sc = (struct ice_softc *)arg1; 3692 struct ice_hw *hw = &sc->hw; 3693 device_t dev = sc->dev; 3694 enum ice_status status; 3695 struct sbuf *sbuf; 3696 u32 lldp_state; 3697 3698 UNREFERENCED_PARAMETER(arg2); 3699 UNREFERENCED_PARAMETER(oidp); 3700 3701 if (ice_driver_is_detaching(sc)) 3702 return (ESHUTDOWN); 3703 3704 status = ice_get_cur_lldp_persist_status(hw, &lldp_state); 3705 if (status) { 3706 device_printf(dev, 3707 "Could not acquire current LLDP persistence status, err %s aq_err %s\n", 3708 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 3709 return (EIO); 3710 } 3711 3712 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3713 sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state)); 3714 sbuf_finish(sbuf); 3715 sbuf_delete(sbuf); 3716 3717 return (0); 3718 } 3719 3720 /** 3721 * ice_sysctl_fw_dflt_lldp_persist_status - Display default FW LLDP status 3722 * @oidp: sysctl oid structure 3723 * @arg1: pointer to private softc structure 3724 * @arg2: unused 3725 * @req: sysctl request pointer 3726 * 3727 * On read: Displays default persistent LLDP status. 3728 */ 3729 static int 3730 ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS) 3731 { 3732 struct ice_softc *sc = (struct ice_softc *)arg1; 3733 struct ice_hw *hw = &sc->hw; 3734 device_t dev = sc->dev; 3735 enum ice_status status; 3736 struct sbuf *sbuf; 3737 u32 lldp_state; 3738 3739 UNREFERENCED_PARAMETER(arg2); 3740 UNREFERENCED_PARAMETER(oidp); 3741 3742 if (ice_driver_is_detaching(sc)) 3743 return (ESHUTDOWN); 3744 3745 status = ice_get_dflt_lldp_persist_status(hw, &lldp_state); 3746 if (status) { 3747 device_printf(dev, 3748 "Could not acquire default LLDP persistence status, err %s aq_err %s\n", 3749 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 3750 return (EIO); 3751 } 3752 3753 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3754 sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state)); 3755 sbuf_finish(sbuf); 3756 sbuf_delete(sbuf); 3757 3758 return (0); 3759 } 3760 3761 #define ICE_SYSCTL_HELP_FW_LLDP_AGENT \ 3762 "\nDisplay or change FW LLDP agent state:" \ 3763 "\n\t0 - disabled" \ 3764 "\n\t1 - enabled" 3765 3766 /** 3767 * ice_sysctl_fw_lldp_agent - Display or change the FW LLDP agent status 3768 * @oidp: sysctl oid structure 3769 * @arg1: pointer to private softc structure 3770 * @arg2: unused 3771 * @req: sysctl request pointer 3772 * 3773 * On read: Displays whether the FW LLDP agent is running 3774 * On write: Persistently enables or disables the FW LLDP agent 3775 */ 3776 static int 3777 ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS) 3778 { 3779 struct ice_softc *sc = (struct ice_softc *)arg1; 3780 struct ice_hw *hw = &sc->hw; 3781 device_t dev = sc->dev; 3782 enum ice_status status; 3783 int error = 0; 3784 u32 old_state; 3785 u8 fw_lldp_enabled; 3786 bool retried_start_lldp = false; 3787 3788 UNREFERENCED_PARAMETER(arg2); 3789 3790 if (ice_driver_is_detaching(sc)) 3791 return (ESHUTDOWN); 3792 3793 status = ice_get_cur_lldp_persist_status(hw, &old_state); 3794 if (status) { 3795 device_printf(dev, 3796 "Could not acquire current LLDP persistence status, err %s aq_err %s\n", 3797 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 3798 return (EIO); 3799 } 3800 3801 if (old_state > ICE_LLDP_ADMINSTATUS_ENA_RXTX) { 3802 status = ice_get_dflt_lldp_persist_status(hw, &old_state); 3803 if (status) { 3804 device_printf(dev, 3805 "Could not acquire default LLDP persistence status, err %s aq_err %s\n", 3806 ice_status_str(status), 3807 ice_aq_str(hw->adminq.sq_last_status)); 3808 return (EIO); 3809 } 3810 } 3811 if (old_state == 0) 3812 fw_lldp_enabled = false; 3813 else 3814 fw_lldp_enabled = true; 3815 3816 error = sysctl_handle_bool(oidp, &fw_lldp_enabled, 0, req); 3817 if ((error) || (req->newptr == NULL)) 3818 return (error); 3819 3820 if (old_state == 0 && fw_lldp_enabled == false) 3821 return (0); 3822 3823 if (old_state != 0 && fw_lldp_enabled == true) 3824 return (0); 3825 3826 if (fw_lldp_enabled == false) { 3827 status = ice_aq_stop_lldp(hw, true, true, NULL); 3828 /* EPERM is returned if the LLDP agent is already shutdown */ 3829 if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EPERM) { 3830 device_printf(dev, 3831 "%s: ice_aq_stop_lldp failed; status %s, aq_err %s\n", 3832 __func__, ice_status_str(status), 3833 ice_aq_str(hw->adminq.sq_last_status)); 3834 return (EIO); 3835 } 3836 ice_aq_set_dcb_parameters(hw, true, NULL); 3837 hw->port_info->qos_cfg.is_sw_lldp = true; 3838 ice_add_rx_lldp_filter(sc); 3839 } else { 3840 ice_del_rx_lldp_filter(sc); 3841 retry_start_lldp: 3842 status = ice_aq_start_lldp(hw, true, NULL); 3843 if (status) { 3844 switch (hw->adminq.sq_last_status) { 3845 /* EEXIST is returned if the LLDP agent is already started */ 3846 case ICE_AQ_RC_EEXIST: 3847 break; 3848 case ICE_AQ_RC_EAGAIN: 3849 /* Retry command after a 2 second wait */ 3850 if (retried_start_lldp == false) { 3851 retried_start_lldp = true; 3852 pause("slldp", ICE_START_LLDP_RETRY_WAIT); 3853 goto retry_start_lldp; 3854 } 3855 /* Fallthrough */ 3856 default: 3857 device_printf(dev, 3858 "%s: ice_aq_start_lldp failed; status %s, aq_err %s\n", 3859 __func__, ice_status_str(status), 3860 ice_aq_str(hw->adminq.sq_last_status)); 3861 return (EIO); 3862 } 3863 } 3864 hw->port_info->qos_cfg.is_sw_lldp = false; 3865 } 3866 3867 return (error); 3868 } 3869 3870 /** 3871 * ice_add_device_sysctls - add device specific dynamic sysctls 3872 * @sc: device private structure 3873 * 3874 * Add per-device dynamic sysctls which show device configuration or enable 3875 * configuring device functionality. For tunable values which can be set prior 3876 * to load, see ice_add_device_tunables. 3877 * 3878 * This function depends on the sysctl layout setup by ice_add_device_tunables, 3879 * and likely should be called near the end of the attach process. 3880 */ 3881 void 3882 ice_add_device_sysctls(struct ice_softc *sc) 3883 { 3884 struct sysctl_oid *hw_node; 3885 device_t dev = sc->dev; 3886 3887 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 3888 struct sysctl_oid_list *ctx_list = 3889 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 3890 3891 SYSCTL_ADD_PROC(ctx, ctx_list, 3892 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, 3893 sc, 0, ice_sysctl_show_fw, "A", "Firmware version"); 3894 3895 SYSCTL_ADD_PROC(ctx, ctx_list, 3896 OID_AUTO, "pba_number", CTLTYPE_STRING | CTLFLAG_RD, 3897 sc, 0, ice_sysctl_pba_number, "A", "Product Board Assembly Number"); 3898 3899 SYSCTL_ADD_PROC(ctx, ctx_list, 3900 OID_AUTO, "ddp_version", CTLTYPE_STRING | CTLFLAG_RD, 3901 sc, 0, ice_sysctl_pkg_version, "A", "Active DDP package name and version"); 3902 3903 SYSCTL_ADD_PROC(ctx, ctx_list, 3904 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, 3905 sc, 0, ice_sysctl_current_speed, "A", "Current Port Link Speed"); 3906 3907 SYSCTL_ADD_PROC(ctx, ctx_list, 3908 OID_AUTO, "requested_fec", CTLTYPE_STRING | CTLFLAG_RW, 3909 sc, 0, ice_sysctl_fec_config, "A", ICE_SYSCTL_HELP_FEC_CONFIG); 3910 3911 SYSCTL_ADD_PROC(ctx, ctx_list, 3912 OID_AUTO, "negotiated_fec", CTLTYPE_STRING | CTLFLAG_RD, 3913 sc, 0, ice_sysctl_negotiated_fec, "A", "Current Negotiated FEC mode"); 3914 3915 SYSCTL_ADD_PROC(ctx, ctx_list, 3916 OID_AUTO, "fc", CTLTYPE_STRING | CTLFLAG_RW, 3917 sc, 0, ice_sysctl_fc_config, "A", ICE_SYSCTL_HELP_FC_CONFIG); 3918 3919 SYSCTL_ADD_PROC(ctx, ctx_list, 3920 OID_AUTO, "advertise_speed", CTLTYPE_U16 | CTLFLAG_RW, 3921 sc, 0, ice_sysctl_advertise_speed, "SU", ICE_SYSCTL_HELP_ADVERTISE_SPEED); 3922 3923 SYSCTL_ADD_PROC(ctx, ctx_list, 3924 OID_AUTO, "fw_lldp_agent", CTLTYPE_U8 | CTLFLAG_RWTUN, 3925 sc, 0, ice_sysctl_fw_lldp_agent, "CU", ICE_SYSCTL_HELP_FW_LLDP_AGENT); 3926 3927 /* Differentiate software and hardware statistics, by keeping hw stats 3928 * in their own node. This isn't in ice_add_device_tunables, because 3929 * we won't have any CTLFLAG_TUN sysctls under this node. 3930 */ 3931 hw_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "hw", CTLFLAG_RD, 3932 NULL, "Port Hardware Statistics"); 3933 3934 ice_add_sysctls_mac_stats(ctx, hw_node, &sc->stats.cur); 3935 3936 /* Add the main PF VSI stats now. Other VSIs will add their own stats 3937 * during creation 3938 */ 3939 ice_add_vsi_sysctls(&sc->pf_vsi); 3940 3941 /* Add sysctls related to debugging the device driver. This includes 3942 * sysctls which display additional internal driver state for use in 3943 * understanding what is happening within the driver. 3944 */ 3945 ice_add_debug_sysctls(sc); 3946 } 3947 3948 /** 3949 * @enum hmc_error_type 3950 * @brief enumeration of HMC errors 3951 * 3952 * Enumeration defining the possible HMC errors that might occur. 3953 */ 3954 enum hmc_error_type { 3955 HMC_ERR_PMF_INVALID = 0, 3956 HMC_ERR_VF_IDX_INVALID = 1, 3957 HMC_ERR_VF_PARENT_PF_INVALID = 2, 3958 /* 3 is reserved */ 3959 HMC_ERR_INDEX_TOO_BIG = 4, 3960 HMC_ERR_ADDRESS_TOO_LARGE = 5, 3961 HMC_ERR_SEGMENT_DESC_INVALID = 6, 3962 HMC_ERR_SEGMENT_DESC_TOO_SMALL = 7, 3963 HMC_ERR_PAGE_DESC_INVALID = 8, 3964 HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION = 9, 3965 /* 10 is reserved */ 3966 HMC_ERR_INVALID_OBJECT_TYPE = 11, 3967 /* 12 is reserved */ 3968 }; 3969 3970 /** 3971 * ice_log_hmc_error - Log an HMC error message 3972 * @hw: device hw structure 3973 * @dev: the device to pass to device_printf() 3974 * 3975 * Log a message when an HMC error interrupt is triggered. 3976 */ 3977 void 3978 ice_log_hmc_error(struct ice_hw *hw, device_t dev) 3979 { 3980 u32 info, data; 3981 u8 index, errtype, objtype; 3982 bool isvf; 3983 3984 info = rd32(hw, PFHMC_ERRORINFO); 3985 data = rd32(hw, PFHMC_ERRORDATA); 3986 3987 index = (u8)(info & PFHMC_ERRORINFO_PMF_INDEX_M); 3988 errtype = (u8)((info & PFHMC_ERRORINFO_HMC_ERROR_TYPE_M) >> 3989 PFHMC_ERRORINFO_HMC_ERROR_TYPE_S); 3990 objtype = (u8)((info & PFHMC_ERRORINFO_HMC_OBJECT_TYPE_M) >> 3991 PFHMC_ERRORINFO_HMC_OBJECT_TYPE_S); 3992 3993 isvf = info & PFHMC_ERRORINFO_PMF_ISVF_M; 3994 3995 device_printf(dev, "%s HMC Error detected on PMF index %d:\n", 3996 isvf ? "VF" : "PF", index); 3997 3998 device_printf(dev, "error type %d, object type %d, data 0x%08x\n", 3999 errtype, objtype, data); 4000 4001 switch (errtype) { 4002 case HMC_ERR_PMF_INVALID: 4003 device_printf(dev, "Private Memory Function is not valid\n"); 4004 break; 4005 case HMC_ERR_VF_IDX_INVALID: 4006 device_printf(dev, "Invalid Private Memory Function index for PE enabled VF\n"); 4007 break; 4008 case HMC_ERR_VF_PARENT_PF_INVALID: 4009 device_printf(dev, "Invalid parent PF for PE enabled VF\n"); 4010 break; 4011 case HMC_ERR_INDEX_TOO_BIG: 4012 device_printf(dev, "Object index too big\n"); 4013 break; 4014 case HMC_ERR_ADDRESS_TOO_LARGE: 4015 device_printf(dev, "Address extends beyond segment descriptor limit\n"); 4016 break; 4017 case HMC_ERR_SEGMENT_DESC_INVALID: 4018 device_printf(dev, "Segment descriptor is invalid\n"); 4019 break; 4020 case HMC_ERR_SEGMENT_DESC_TOO_SMALL: 4021 device_printf(dev, "Segment descriptor is too small\n"); 4022 break; 4023 case HMC_ERR_PAGE_DESC_INVALID: 4024 device_printf(dev, "Page descriptor is invalid\n"); 4025 break; 4026 case HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION: 4027 device_printf(dev, "Unsupported Request completion received from PCIe\n"); 4028 break; 4029 case HMC_ERR_INVALID_OBJECT_TYPE: 4030 device_printf(dev, "Invalid object type\n"); 4031 break; 4032 default: 4033 device_printf(dev, "Unknown HMC error\n"); 4034 } 4035 4036 /* Clear the error indication */ 4037 wr32(hw, PFHMC_ERRORINFO, 0); 4038 } 4039 4040 /** 4041 * @struct ice_sysctl_info 4042 * @brief sysctl information 4043 * 4044 * Structure used to simplify the process of defining the many similar 4045 * statistics sysctls. 4046 */ 4047 struct ice_sysctl_info { 4048 u64 *stat; 4049 const char *name; 4050 const char *description; 4051 }; 4052 4053 /** 4054 * ice_add_sysctls_eth_stats - Add sysctls for ethernet statistics 4055 * @ctx: sysctl ctx to use 4056 * @parent: the parent node to add sysctls under 4057 * @stats: the ethernet stats structure to source values from 4058 * 4059 * Adds statistics sysctls for the ethernet statistics of the MAC or a VSI. 4060 * Will add them under the parent node specified. 4061 * 4062 * Note that tx_errors is only meaningful for VSIs and not the global MAC/PF 4063 * statistics, so it is not included here. Similarly, rx_discards has different 4064 * descriptions for VSIs and MAC/PF stats, so it is also not included here. 4065 */ 4066 void 4067 ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, 4068 struct sysctl_oid *parent, 4069 struct ice_eth_stats *stats) 4070 { 4071 const struct ice_sysctl_info ctls[] = { 4072 /* Rx Stats */ 4073 { &stats->rx_bytes, "good_octets_rcvd", "Good Octets Received" }, 4074 { &stats->rx_unicast, "ucast_pkts_rcvd", "Unicast Packets Received" }, 4075 { &stats->rx_multicast, "mcast_pkts_rcvd", "Multicast Packets Received" }, 4076 { &stats->rx_broadcast, "bcast_pkts_rcvd", "Broadcast Packets Received" }, 4077 /* Tx Stats */ 4078 { &stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted" }, 4079 { &stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted" }, 4080 { &stats->tx_multicast, "mcast_pkts_txd", "Multicast Packets Transmitted" }, 4081 { &stats->tx_broadcast, "bcast_pkts_txd", "Broadcast Packets Transmitted" }, 4082 /* End */ 4083 { 0, 0, 0 } 4084 }; 4085 4086 struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); 4087 4088 const struct ice_sysctl_info *entry = ctls; 4089 while (entry->stat != 0) { 4090 SYSCTL_ADD_U64(ctx, parent_list, OID_AUTO, entry->name, 4091 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 4092 entry->description); 4093 entry++; 4094 } 4095 } 4096 4097 /** 4098 * ice_sysctl_tx_cso_stat - Display Tx checksum offload statistic 4099 * @oidp: sysctl oid structure 4100 * @arg1: pointer to private data structure 4101 * @arg2: Tx CSO stat to read 4102 * @req: sysctl request pointer 4103 * 4104 * On read: Sums the per-queue Tx CSO stat and displays it. 4105 */ 4106 static int 4107 ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS) 4108 { 4109 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4110 enum ice_tx_cso_stat type = (enum ice_tx_cso_stat)arg2; 4111 u64 stat = 0; 4112 int i; 4113 4114 if (ice_driver_is_detaching(vsi->sc)) 4115 return (ESHUTDOWN); 4116 4117 /* Check that the type is valid */ 4118 if (type >= ICE_CSO_STAT_TX_COUNT) 4119 return (EDOOFUS); 4120 4121 /* Sum the stat for each of the Tx queues */ 4122 for (i = 0; i < vsi->num_tx_queues; i++) 4123 stat += vsi->tx_queues[i].stats.cso[type]; 4124 4125 return sysctl_handle_64(oidp, NULL, stat, req); 4126 } 4127 4128 /** 4129 * ice_sysctl_rx_cso_stat - Display Rx checksum offload statistic 4130 * @oidp: sysctl oid structure 4131 * @arg1: pointer to private data structure 4132 * @arg2: Rx CSO stat to read 4133 * @req: sysctl request pointer 4134 * 4135 * On read: Sums the per-queue Rx CSO stat and displays it. 4136 */ 4137 static int 4138 ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS) 4139 { 4140 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4141 enum ice_rx_cso_stat type = (enum ice_rx_cso_stat)arg2; 4142 u64 stat = 0; 4143 int i; 4144 4145 if (ice_driver_is_detaching(vsi->sc)) 4146 return (ESHUTDOWN); 4147 4148 /* Check that the type is valid */ 4149 if (type >= ICE_CSO_STAT_RX_COUNT) 4150 return (EDOOFUS); 4151 4152 /* Sum the stat for each of the Rx queues */ 4153 for (i = 0; i < vsi->num_rx_queues; i++) 4154 stat += vsi->rx_queues[i].stats.cso[type]; 4155 4156 return sysctl_handle_64(oidp, NULL, stat, req); 4157 } 4158 4159 /** 4160 * @struct ice_rx_cso_stat_info 4161 * @brief sysctl information for an Rx checksum offload statistic 4162 * 4163 * Structure used to simplify the process of defining the checksum offload 4164 * statistics. 4165 */ 4166 struct ice_rx_cso_stat_info { 4167 enum ice_rx_cso_stat type; 4168 const char *name; 4169 const char *description; 4170 }; 4171 4172 /** 4173 * @struct ice_tx_cso_stat_info 4174 * @brief sysctl information for a Tx checksum offload statistic 4175 * 4176 * Structure used to simplify the process of defining the checksum offload 4177 * statistics. 4178 */ 4179 struct ice_tx_cso_stat_info { 4180 enum ice_tx_cso_stat type; 4181 const char *name; 4182 const char *description; 4183 }; 4184 4185 /** 4186 * ice_add_sysctls_sw_stats - Add sysctls for software statistics 4187 * @vsi: pointer to the VSI to add sysctls for 4188 * @ctx: sysctl ctx to use 4189 * @parent: the parent node to add sysctls under 4190 * 4191 * Add statistics sysctls for software tracked statistics of a VSI. 4192 * 4193 * Currently this only adds checksum offload statistics, but more counters may 4194 * be added in the future. 4195 */ 4196 static void 4197 ice_add_sysctls_sw_stats(struct ice_vsi *vsi, 4198 struct sysctl_ctx_list *ctx, 4199 struct sysctl_oid *parent) 4200 { 4201 struct sysctl_oid *cso_node; 4202 struct sysctl_oid_list *cso_list; 4203 4204 /* Tx CSO Stats */ 4205 const struct ice_tx_cso_stat_info tx_ctls[] = { 4206 { ICE_CSO_STAT_TX_TCP, "tx_tcp", "Transmit TCP Packets marked for HW checksum" }, 4207 { ICE_CSO_STAT_TX_UDP, "tx_udp", "Transmit UDP Packets marked for HW checksum" }, 4208 { ICE_CSO_STAT_TX_SCTP, "tx_sctp", "Transmit SCTP Packets marked for HW checksum" }, 4209 { ICE_CSO_STAT_TX_IP4, "tx_ip4", "Transmit IPv4 Packets marked for HW checksum" }, 4210 { ICE_CSO_STAT_TX_IP6, "tx_ip6", "Transmit IPv6 Packets marked for HW checksum" }, 4211 { ICE_CSO_STAT_TX_L3_ERR, "tx_l3_err", "Transmit packets that driver failed to set L3 HW CSO bits for" }, 4212 { ICE_CSO_STAT_TX_L4_ERR, "tx_l4_err", "Transmit packets that driver failed to set L4 HW CSO bits for" }, 4213 /* End */ 4214 { ICE_CSO_STAT_TX_COUNT, 0, 0 } 4215 }; 4216 4217 /* Rx CSO Stats */ 4218 const struct ice_rx_cso_stat_info rx_ctls[] = { 4219 { ICE_CSO_STAT_RX_IP4_ERR, "rx_ip4_err", "Received packets with invalid IPv4 checksum indicated by HW" }, 4220 { ICE_CSO_STAT_RX_IP6_ERR, "rx_ip6_err", "Received IPv6 packets with extension headers" }, 4221 { ICE_CSO_STAT_RX_L3_ERR, "rx_l3_err", "Received packets with an unexpected invalid L3 checksum indicated by HW" }, 4222 { ICE_CSO_STAT_RX_TCP_ERR, "rx_tcp_err", "Received packets with invalid TCP checksum indicated by HW" }, 4223 { ICE_CSO_STAT_RX_UDP_ERR, "rx_udp_err", "Received packets with invalid UDP checksum indicated by HW" }, 4224 { ICE_CSO_STAT_RX_SCTP_ERR, "rx_sctp_err", "Received packets with invalid SCTP checksum indicated by HW" }, 4225 { ICE_CSO_STAT_RX_L4_ERR, "rx_l4_err", "Received packets with an unexpected invalid L4 checksum indicated by HW" }, 4226 /* End */ 4227 { ICE_CSO_STAT_RX_COUNT, 0, 0 } 4228 }; 4229 4230 struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); 4231 4232 /* Add a node for statistics tracked by software. */ 4233 cso_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "cso", CTLFLAG_RD, 4234 NULL, "Checksum offload Statistics"); 4235 cso_list = SYSCTL_CHILDREN(cso_node); 4236 4237 const struct ice_tx_cso_stat_info *tx_entry = tx_ctls; 4238 while (tx_entry->name && tx_entry->description) { 4239 SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, tx_entry->name, 4240 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4241 vsi, tx_entry->type, ice_sysctl_tx_cso_stat, "QU", 4242 tx_entry->description); 4243 tx_entry++; 4244 } 4245 4246 const struct ice_rx_cso_stat_info *rx_entry = rx_ctls; 4247 while (rx_entry->name && rx_entry->description) { 4248 SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, rx_entry->name, 4249 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4250 vsi, rx_entry->type, ice_sysctl_rx_cso_stat, "QU", 4251 rx_entry->description); 4252 rx_entry++; 4253 } 4254 } 4255 4256 /** 4257 * ice_add_vsi_sysctls - Add sysctls for a VSI 4258 * @vsi: pointer to VSI structure 4259 * 4260 * Add various sysctls for a given VSI. 4261 */ 4262 void 4263 ice_add_vsi_sysctls(struct ice_vsi *vsi) 4264 { 4265 struct sysctl_ctx_list *ctx = &vsi->ctx; 4266 struct sysctl_oid *hw_node, *sw_node; 4267 struct sysctl_oid_list *vsi_list, *hw_list, *sw_list; 4268 4269 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 4270 4271 /* Keep hw stats in their own node. */ 4272 hw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "hw", CTLFLAG_RD, 4273 NULL, "VSI Hardware Statistics"); 4274 hw_list = SYSCTL_CHILDREN(hw_node); 4275 4276 /* Add the ethernet statistics for this VSI */ 4277 ice_add_sysctls_eth_stats(ctx, hw_node, &vsi->hw_stats.cur); 4278 4279 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_discards", 4280 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_discards, 4281 0, "Discarded Rx Packets (see rx_errors or rx_no_desc)"); 4282 4283 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_errors", 4284 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_errors, 4285 0, "Rx Packets Discarded Due To Error"); 4286 4287 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_no_desc", 4288 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_no_desc, 4289 0, "Rx Packets Discarded Due To Lack Of Descriptors"); 4290 4291 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "tx_errors", 4292 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.tx_errors, 4293 0, "Tx Packets Discarded Due To Error"); 4294 4295 /* Add a node for statistics tracked by software. */ 4296 sw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "sw", CTLFLAG_RD, 4297 NULL, "VSI Software Statistics"); 4298 sw_list = SYSCTL_CHILDREN(sw_node); 4299 4300 ice_add_sysctls_sw_stats(vsi, ctx, sw_node); 4301 } 4302 4303 /** 4304 * ice_add_sysctls_mac_stats - Add sysctls for global MAC statistics 4305 * @ctx: the sysctl ctx to use 4306 * @parent: parent node to add the sysctls under 4307 * @stats: the hw ports stat structure to pull values from 4308 * 4309 * Add global MAC statistics sysctls. 4310 */ 4311 void 4312 ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 4313 struct sysctl_oid *parent, 4314 struct ice_hw_port_stats *stats) 4315 { 4316 struct sysctl_oid *mac_node; 4317 struct sysctl_oid_list *parent_list, *mac_list; 4318 4319 parent_list = SYSCTL_CHILDREN(parent); 4320 4321 mac_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "mac", CTLFLAG_RD, 4322 NULL, "Mac Hardware Statistics"); 4323 mac_list = SYSCTL_CHILDREN(mac_node); 4324 4325 /* add the common ethernet statistics */ 4326 ice_add_sysctls_eth_stats(ctx, mac_node, &stats->eth); 4327 4328 const struct ice_sysctl_info ctls[] = { 4329 /* Packet Reception Stats */ 4330 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 4331 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 4332 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 4333 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 4334 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 4335 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 4336 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 4337 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 4338 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 4339 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 4340 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 4341 {&stats->rx_len_errors, "rx_length_errors", "Receive Length Errors"}, 4342 {&stats->eth.rx_discards, "rx_discards", 4343 "Discarded Rx Packets by Port (shortage of storage space)"}, 4344 /* Packet Transmission Stats */ 4345 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 4346 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 4347 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 4348 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 4349 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 4350 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 4351 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 4352 {&stats->tx_dropped_link_down, "tx_dropped", "Tx Dropped Due To Link Down"}, 4353 /* Flow control */ 4354 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 4355 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 4356 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 4357 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 4358 /* Other */ 4359 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 4360 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 4361 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 4362 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 4363 /* End */ 4364 { 0, 0, 0 } 4365 }; 4366 4367 const struct ice_sysctl_info *entry = ctls; 4368 while (entry->stat != 0) { 4369 SYSCTL_ADD_U64(ctx, mac_list, OID_AUTO, entry->name, 4370 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 4371 entry->description); 4372 entry++; 4373 } 4374 } 4375 4376 /** 4377 * ice_configure_misc_interrupts - enable 'other' interrupt causes 4378 * @sc: pointer to device private softc 4379 * 4380 * Enable various "other" interrupt causes, and associate them to interrupt 0, 4381 * which is our administrative interrupt. 4382 */ 4383 void 4384 ice_configure_misc_interrupts(struct ice_softc *sc) 4385 { 4386 struct ice_hw *hw = &sc->hw; 4387 u32 val; 4388 4389 /* Read the OICR register to clear it */ 4390 rd32(hw, PFINT_OICR); 4391 4392 /* Enable useful "other" interrupt causes */ 4393 val = (PFINT_OICR_ECC_ERR_M | 4394 PFINT_OICR_MAL_DETECT_M | 4395 PFINT_OICR_GRST_M | 4396 PFINT_OICR_PCI_EXCEPTION_M | 4397 PFINT_OICR_VFLR_M | 4398 PFINT_OICR_HMC_ERR_M | 4399 PFINT_OICR_PE_CRITERR_M); 4400 4401 wr32(hw, PFINT_OICR_ENA, val); 4402 4403 /* Note that since we're using MSI-X index 0, and ITR index 0, we do 4404 * not explicitly program them when writing to the PFINT_*_CTL 4405 * registers. Nevertheless, these writes are associating the 4406 * interrupts with the ITR 0 vector 4407 */ 4408 4409 /* Associate the OICR interrupt with ITR 0, and enable it */ 4410 wr32(hw, PFINT_OICR_CTL, PFINT_OICR_CTL_CAUSE_ENA_M); 4411 4412 /* Associate the Mailbox interrupt with ITR 0, and enable it */ 4413 wr32(hw, PFINT_MBX_CTL, PFINT_MBX_CTL_CAUSE_ENA_M); 4414 4415 /* Associate the AdminQ interrupt with ITR 0, and enable it */ 4416 wr32(hw, PFINT_FW_CTL, PFINT_FW_CTL_CAUSE_ENA_M); 4417 } 4418 4419 /** 4420 * ice_filter_is_mcast - Check if info is a multicast filter 4421 * @vsi: vsi structure addresses are targeted towards 4422 * @info: filter info 4423 * 4424 * @returns true if the provided info is a multicast filter, and false 4425 * otherwise. 4426 */ 4427 static bool 4428 ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info) 4429 { 4430 const u8 *addr = info->l_data.mac.mac_addr; 4431 4432 /* 4433 * Check if this info matches a multicast filter added by 4434 * ice_add_mac_to_list 4435 */ 4436 if ((info->flag == ICE_FLTR_TX) && 4437 (info->src_id == ICE_SRC_ID_VSI) && 4438 (info->lkup_type == ICE_SW_LKUP_MAC) && 4439 (info->vsi_handle == vsi->idx) && 4440 ETHER_IS_MULTICAST(addr) && !ETHER_IS_BROADCAST(addr)) 4441 return true; 4442 4443 return false; 4444 } 4445 4446 /** 4447 * @struct ice_mcast_sync_data 4448 * @brief data used by ice_sync_one_mcast_filter function 4449 * 4450 * Structure used to store data needed for processing by the 4451 * ice_sync_one_mcast_filter. This structure contains a linked list of filters 4452 * to be added, an error indication, and a pointer to the device softc. 4453 */ 4454 struct ice_mcast_sync_data { 4455 struct ice_list_head add_list; 4456 struct ice_softc *sc; 4457 int err; 4458 }; 4459 4460 /** 4461 * ice_sync_one_mcast_filter - Check if we need to program the filter 4462 * @p: void pointer to algorithm data 4463 * @sdl: link level socket address 4464 * @count: unused count value 4465 * 4466 * Called by if_foreach_llmaddr to operate on each filter in the ifp filter 4467 * list. For the given address, search our internal list to see if we have 4468 * found the filter. If not, add it to our list of filters that need to be 4469 * programmed. 4470 * 4471 * @returns (1) if we've actually setup the filter to be added 4472 */ 4473 static u_int 4474 ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, 4475 u_int __unused count) 4476 { 4477 struct ice_mcast_sync_data *data = (struct ice_mcast_sync_data *)p; 4478 struct ice_softc *sc = data->sc; 4479 struct ice_hw *hw = &sc->hw; 4480 struct ice_switch_info *sw = hw->switch_info; 4481 const u8 *sdl_addr = (const u8 *)LLADDR(sdl); 4482 struct ice_fltr_mgmt_list_entry *itr; 4483 struct ice_list_head *rules; 4484 int err; 4485 4486 rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 4487 4488 /* 4489 * If a previous filter already indicated an error, there is no need 4490 * for us to finish processing the rest of the filters. 4491 */ 4492 if (data->err) 4493 return (0); 4494 4495 /* See if this filter has already been programmed */ 4496 LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) { 4497 struct ice_fltr_info *info = &itr->fltr_info; 4498 const u8 *addr = info->l_data.mac.mac_addr; 4499 4500 /* Only check multicast filters */ 4501 if (!ice_filter_is_mcast(&sc->pf_vsi, info)) 4502 continue; 4503 4504 /* 4505 * If this filter matches, mark the internal filter as 4506 * "found", and exit. 4507 */ 4508 if (bcmp(addr, sdl_addr, ETHER_ADDR_LEN) == 0) { 4509 itr->marker = ICE_FLTR_FOUND; 4510 return (1); 4511 } 4512 } 4513 4514 /* 4515 * If we failed to locate the filter in our internal list, we need to 4516 * place it into our add list. 4517 */ 4518 err = ice_add_mac_to_list(&sc->pf_vsi, &data->add_list, sdl_addr, 4519 ICE_FWD_TO_VSI); 4520 if (err) { 4521 device_printf(sc->dev, 4522 "Failed to place MAC %6D onto add list, err %s\n", 4523 sdl_addr, ":", ice_err_str(err)); 4524 data->err = err; 4525 4526 return (0); 4527 } 4528 4529 return (1); 4530 } 4531 4532 /** 4533 * ice_sync_multicast_filters - Synchronize OS and internal filter list 4534 * @sc: device private structure 4535 * 4536 * Called in response to SIOCDELMULTI to synchronize the operating system 4537 * multicast address list with the internal list of filters programmed to 4538 * firmware. 4539 * 4540 * Works in one phase to find added and deleted filters using a marker bit on 4541 * the internal list. 4542 * 4543 * First, a loop over the internal list clears the marker bit. Second, for 4544 * each filter in the ifp list is checked. If we find it in the internal list, 4545 * the marker bit is set. Otherwise, the filter is added to the add list. 4546 * Third, a loop over the internal list determines if any filters have not 4547 * been found. Each of these is added to the delete list. Finally, the add and 4548 * delete lists are programmed to firmware to update the filters. 4549 * 4550 * @returns zero on success or an integer error code on failure. 4551 */ 4552 int 4553 ice_sync_multicast_filters(struct ice_softc *sc) 4554 { 4555 struct ice_hw *hw = &sc->hw; 4556 struct ice_switch_info *sw = hw->switch_info; 4557 struct ice_fltr_mgmt_list_entry *itr; 4558 struct ice_mcast_sync_data data = {}; 4559 struct ice_list_head *rules, remove_list; 4560 enum ice_status status; 4561 int err = 0; 4562 4563 INIT_LIST_HEAD(&data.add_list); 4564 INIT_LIST_HEAD(&remove_list); 4565 data.sc = sc; 4566 data.err = 0; 4567 4568 rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 4569 4570 /* Acquire the lock for the entire duration */ 4571 ice_acquire_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4572 4573 /* (1) Reset the marker state for all filters */ 4574 LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) 4575 itr->marker = ICE_FLTR_NOT_FOUND; 4576 4577 /* (2) determine which filters need to be added and removed */ 4578 if_foreach_llmaddr(sc->ifp, ice_sync_one_mcast_filter, (void *)&data); 4579 if (data.err) { 4580 /* ice_sync_one_mcast_filter already prints an error */ 4581 err = data.err; 4582 ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4583 goto free_filter_lists; 4584 } 4585 4586 LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) { 4587 struct ice_fltr_info *info = &itr->fltr_info; 4588 const u8 *addr = info->l_data.mac.mac_addr; 4589 4590 /* Only check multicast filters */ 4591 if (!ice_filter_is_mcast(&sc->pf_vsi, info)) 4592 continue; 4593 4594 /* 4595 * If the filter is not marked as found, then it must no 4596 * longer be in the ifp address list, so we need to remove it. 4597 */ 4598 if (itr->marker == ICE_FLTR_NOT_FOUND) { 4599 err = ice_add_mac_to_list(&sc->pf_vsi, &remove_list, 4600 addr, ICE_FWD_TO_VSI); 4601 if (err) { 4602 device_printf(sc->dev, 4603 "Failed to place MAC %6D onto remove list, err %s\n", 4604 addr, ":", ice_err_str(err)); 4605 ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4606 goto free_filter_lists; 4607 } 4608 } 4609 } 4610 4611 ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4612 4613 status = ice_add_mac(hw, &data.add_list); 4614 if (status) { 4615 device_printf(sc->dev, 4616 "Could not add new MAC filters, err %s aq_err %s\n", 4617 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 4618 err = (EIO); 4619 goto free_filter_lists; 4620 } 4621 4622 status = ice_remove_mac(hw, &remove_list); 4623 if (status) { 4624 device_printf(sc->dev, 4625 "Could not remove old MAC filters, err %s aq_err %s\n", 4626 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 4627 err = (EIO); 4628 goto free_filter_lists; 4629 } 4630 4631 free_filter_lists: 4632 ice_free_fltr_list(&data.add_list); 4633 ice_free_fltr_list(&remove_list); 4634 4635 return (err); 4636 } 4637 4638 /** 4639 * ice_add_vlan_hw_filter - Add a VLAN filter for a given VSI 4640 * @vsi: The VSI to add the filter for 4641 * @vid: VLAN to add 4642 * 4643 * Programs a HW filter so that the given VSI will receive the specified VLAN. 4644 */ 4645 enum ice_status 4646 ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid) 4647 { 4648 struct ice_hw *hw = &vsi->sc->hw; 4649 struct ice_list_head vlan_list; 4650 struct ice_fltr_list_entry vlan_entry; 4651 4652 INIT_LIST_HEAD(&vlan_list); 4653 memset(&vlan_entry, 0, sizeof(vlan_entry)); 4654 4655 vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 4656 vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 4657 vlan_entry.fltr_info.flag = ICE_FLTR_TX; 4658 vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI; 4659 vlan_entry.fltr_info.vsi_handle = vsi->idx; 4660 vlan_entry.fltr_info.l_data.vlan.vlan_id = vid; 4661 4662 LIST_ADD(&vlan_entry.list_entry, &vlan_list); 4663 4664 return ice_add_vlan(hw, &vlan_list); 4665 } 4666 4667 /** 4668 * ice_remove_vlan_hw_filter - Remove a VLAN filter for a given VSI 4669 * @vsi: The VSI to add the filter for 4670 * @vid: VLAN to remove 4671 * 4672 * Removes a previously programmed HW filter for the specified VSI. 4673 */ 4674 enum ice_status 4675 ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid) 4676 { 4677 struct ice_hw *hw = &vsi->sc->hw; 4678 struct ice_list_head vlan_list; 4679 struct ice_fltr_list_entry vlan_entry; 4680 4681 INIT_LIST_HEAD(&vlan_list); 4682 memset(&vlan_entry, 0, sizeof(vlan_entry)); 4683 4684 vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 4685 vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 4686 vlan_entry.fltr_info.flag = ICE_FLTR_TX; 4687 vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI; 4688 vlan_entry.fltr_info.vsi_handle = vsi->idx; 4689 vlan_entry.fltr_info.l_data.vlan.vlan_id = vid; 4690 4691 LIST_ADD(&vlan_entry.list_entry, &vlan_list); 4692 4693 return ice_remove_vlan(hw, &vlan_list); 4694 } 4695 4696 #define ICE_SYSCTL_HELP_RX_ITR \ 4697 "\nControl Rx interrupt throttle rate." \ 4698 "\n\t0-8160 - sets interrupt rate in usecs" \ 4699 "\n\t -1 - reset the Rx itr to default" 4700 4701 /** 4702 * ice_sysctl_rx_itr - Display or change the Rx ITR for a VSI 4703 * @oidp: sysctl oid structure 4704 * @arg1: pointer to private data structure 4705 * @arg2: unused 4706 * @req: sysctl request pointer 4707 * 4708 * On read: Displays the current Rx ITR value 4709 * on write: Sets the Rx ITR value, reconfiguring device if it is up 4710 */ 4711 static int 4712 ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS) 4713 { 4714 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4715 struct ice_softc *sc = vsi->sc; 4716 int increment, error = 0; 4717 4718 UNREFERENCED_PARAMETER(arg2); 4719 4720 if (ice_driver_is_detaching(sc)) 4721 return (ESHUTDOWN); 4722 4723 error = sysctl_handle_16(oidp, &vsi->rx_itr, 0, req); 4724 if ((error) || (req->newptr == NULL)) 4725 return (error); 4726 4727 if (vsi->rx_itr < 0) 4728 vsi->rx_itr = ICE_DFLT_RX_ITR; 4729 if (vsi->rx_itr > ICE_ITR_MAX) 4730 vsi->rx_itr = ICE_ITR_MAX; 4731 4732 /* Assume 2usec increment if it hasn't been loaded yet */ 4733 increment = sc->hw.itr_gran ? : 2; 4734 4735 /* We need to round the value to the hardware's ITR granularity */ 4736 vsi->rx_itr = (vsi->rx_itr / increment ) * increment; 4737 4738 /* If the driver has finished initializing, then we need to reprogram 4739 * the ITR registers now. Otherwise, they will be programmed during 4740 * driver initialization. 4741 */ 4742 if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) 4743 ice_configure_rx_itr(vsi); 4744 4745 return (0); 4746 } 4747 4748 #define ICE_SYSCTL_HELP_TX_ITR \ 4749 "\nControl Tx interrupt throttle rate." \ 4750 "\n\t0-8160 - sets interrupt rate in usecs" \ 4751 "\n\t -1 - reset the Tx itr to default" 4752 4753 /** 4754 * ice_sysctl_tx_itr - Display or change the Tx ITR for a VSI 4755 * @oidp: sysctl oid structure 4756 * @arg1: pointer to private data structure 4757 * @arg2: unused 4758 * @req: sysctl request pointer 4759 * 4760 * On read: Displays the current Tx ITR value 4761 * on write: Sets the Tx ITR value, reconfiguring device if it is up 4762 */ 4763 static int 4764 ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS) 4765 { 4766 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4767 struct ice_softc *sc = vsi->sc; 4768 int increment, error = 0; 4769 4770 UNREFERENCED_PARAMETER(arg2); 4771 4772 if (ice_driver_is_detaching(sc)) 4773 return (ESHUTDOWN); 4774 4775 error = sysctl_handle_16(oidp, &vsi->tx_itr, 0, req); 4776 if ((error) || (req->newptr == NULL)) 4777 return (error); 4778 4779 /* Allow configuring a negative value to reset to the default */ 4780 if (vsi->tx_itr < 0) 4781 vsi->tx_itr = ICE_DFLT_TX_ITR; 4782 if (vsi->tx_itr > ICE_ITR_MAX) 4783 vsi->tx_itr = ICE_ITR_MAX; 4784 4785 /* Assume 2usec increment if it hasn't been loaded yet */ 4786 increment = sc->hw.itr_gran ? : 2; 4787 4788 /* We need to round the value to the hardware's ITR granularity */ 4789 vsi->tx_itr = (vsi->tx_itr / increment ) * increment; 4790 4791 /* If the driver has finished initializing, then we need to reprogram 4792 * the ITR registers now. Otherwise, they will be programmed during 4793 * driver initialization. 4794 */ 4795 if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) 4796 ice_configure_tx_itr(vsi); 4797 4798 return (0); 4799 } 4800 4801 /** 4802 * ice_add_vsi_tunables - Add tunables and nodes for a VSI 4803 * @vsi: pointer to VSI structure 4804 * @parent: parent node to add the tunables under 4805 * 4806 * Create a sysctl context for the VSI, so that sysctls for the VSI can be 4807 * dynamically removed upon VSI removal. 4808 * 4809 * Add various tunables and set up the basic node structure for the VSI. Must 4810 * be called *prior* to ice_add_vsi_sysctls. It should be called as soon as 4811 * possible after the VSI memory is initialized. 4812 * 4813 * VSI specific sysctls with CTLFLAG_TUN should be initialized here so that 4814 * their values can be read from loader.conf prior to their first use in the 4815 * driver. 4816 */ 4817 void 4818 ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent) 4819 { 4820 struct sysctl_oid_list *vsi_list; 4821 char vsi_name[32], vsi_desc[32]; 4822 4823 struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); 4824 4825 /* Initialize the sysctl context for this VSI */ 4826 sysctl_ctx_init(&vsi->ctx); 4827 4828 /* Add a node to collect this VSI's statistics together */ 4829 snprintf(vsi_name, sizeof(vsi_name), "%u", vsi->idx); 4830 snprintf(vsi_desc, sizeof(vsi_desc), "VSI %u", vsi->idx); 4831 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->ctx, parent_list, OID_AUTO, vsi_name, 4832 CTLFLAG_RD, NULL, vsi_desc); 4833 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 4834 4835 vsi->rx_itr = ICE_DFLT_TX_ITR; 4836 SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "rx_itr", 4837 CTLTYPE_S16 | CTLFLAG_RWTUN, 4838 vsi, 0, ice_sysctl_rx_itr, "S", 4839 ICE_SYSCTL_HELP_RX_ITR); 4840 4841 vsi->tx_itr = ICE_DFLT_TX_ITR; 4842 SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "tx_itr", 4843 CTLTYPE_S16 | CTLFLAG_RWTUN, 4844 vsi, 0, ice_sysctl_tx_itr, "S", 4845 ICE_SYSCTL_HELP_TX_ITR); 4846 } 4847 4848 /** 4849 * ice_del_vsi_sysctl_ctx - Delete the sysctl context(s) of a VSI 4850 * @vsi: the VSI to remove contexts for 4851 * 4852 * Free the context for the VSI sysctls. This includes the main context, as 4853 * well as the per-queue sysctls. 4854 */ 4855 void 4856 ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi) 4857 { 4858 device_t dev = vsi->sc->dev; 4859 int err; 4860 4861 if (vsi->vsi_node) { 4862 err = sysctl_ctx_free(&vsi->ctx); 4863 if (err) 4864 device_printf(dev, "failed to free VSI %d sysctl context, err %s\n", 4865 vsi->idx, ice_err_str(err)); 4866 vsi->vsi_node = NULL; 4867 } 4868 } 4869 4870 /** 4871 * ice_add_device_tunables - Add early tunable sysctls and sysctl nodes 4872 * @sc: device private structure 4873 * 4874 * Add per-device dynamic tunable sysctls, and setup the general sysctl trees 4875 * for re-use by ice_add_device_sysctls. 4876 * 4877 * In order for the sysctl fields to be initialized before use, this function 4878 * should be called as early as possible during attach activities. 4879 * 4880 * Any non-global sysctl marked as CTLFLAG_TUN should likely be initialized 4881 * here in this function, rather than later in ice_add_device_sysctls. 4882 * 4883 * To make things easier, this function is also expected to setup the various 4884 * sysctl nodes in addition to tunables so that other sysctls which can't be 4885 * initialized early can hook into the same nodes. 4886 */ 4887 void 4888 ice_add_device_tunables(struct ice_softc *sc) 4889 { 4890 device_t dev = sc->dev; 4891 4892 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 4893 struct sysctl_oid_list *ctx_list = 4894 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 4895 4896 /* Add a node to track VSI sysctls. Keep track of the node in the 4897 * softc so that we can hook other sysctls into it later. This 4898 * includes both the VSI statistics, as well as potentially dynamic 4899 * VSIs in the future. 4900 */ 4901 4902 sc->vsi_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "vsi", 4903 CTLFLAG_RD, NULL, "VSI Configuration and Statistics"); 4904 4905 /* Add debug tunables */ 4906 ice_add_debug_tunables(sc); 4907 } 4908 4909 /** 4910 * ice_sysctl_dump_mac_filters - Dump a list of all HW MAC Filters 4911 * @oidp: sysctl oid structure 4912 * @arg1: pointer to private data structure 4913 * @arg2: unused 4914 * @req: sysctl request pointer 4915 * 4916 * Callback for "mac_filters" sysctl to dump the programmed MAC filters. 4917 */ 4918 static int 4919 ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS) 4920 { 4921 struct ice_softc *sc = (struct ice_softc *)arg1; 4922 struct ice_hw *hw = &sc->hw; 4923 struct ice_switch_info *sw = hw->switch_info; 4924 struct ice_fltr_mgmt_list_entry *fm_entry; 4925 struct ice_list_head *rule_head; 4926 struct ice_lock *rule_lock; 4927 struct ice_fltr_info *fi; 4928 struct sbuf *sbuf; 4929 int ret; 4930 4931 UNREFERENCED_PARAMETER(oidp); 4932 UNREFERENCED_PARAMETER(arg2); 4933 4934 if (ice_driver_is_detaching(sc)) 4935 return (ESHUTDOWN); 4936 4937 /* Wire the old buffer so we can take a non-sleepable lock */ 4938 ret = sysctl_wire_old_buffer(req, 0); 4939 if (ret) 4940 return (ret); 4941 4942 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4943 4944 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 4945 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 4946 4947 sbuf_printf(sbuf, "MAC Filter List"); 4948 4949 ice_acquire_lock(rule_lock); 4950 4951 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 4952 fi = &fm_entry->fltr_info; 4953 4954 sbuf_printf(sbuf, 4955 "\nmac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %d", 4956 fi->l_data.mac.mac_addr, ":", fi->vsi_handle, 4957 ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, 4958 ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); 4959 4960 /* if we have a vsi_list_info, print some information about that */ 4961 if (fm_entry->vsi_list_info) { 4962 sbuf_printf(sbuf, 4963 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 4964 fm_entry->vsi_count, 4965 fm_entry->vsi_list_info->vsi_list_id, 4966 fm_entry->vsi_list_info->ref_cnt); 4967 } 4968 } 4969 4970 ice_release_lock(rule_lock); 4971 4972 sbuf_finish(sbuf); 4973 sbuf_delete(sbuf); 4974 4975 return (0); 4976 } 4977 4978 /** 4979 * ice_sysctl_dump_vlan_filters - Dump a list of all HW VLAN Filters 4980 * @oidp: sysctl oid structure 4981 * @arg1: pointer to private data structure 4982 * @arg2: unused 4983 * @req: sysctl request pointer 4984 * 4985 * Callback for "vlan_filters" sysctl to dump the programmed VLAN filters. 4986 */ 4987 static int 4988 ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS) 4989 { 4990 struct ice_softc *sc = (struct ice_softc *)arg1; 4991 struct ice_hw *hw = &sc->hw; 4992 struct ice_switch_info *sw = hw->switch_info; 4993 struct ice_fltr_mgmt_list_entry *fm_entry; 4994 struct ice_list_head *rule_head; 4995 struct ice_lock *rule_lock; 4996 struct ice_fltr_info *fi; 4997 struct sbuf *sbuf; 4998 int ret; 4999 5000 UNREFERENCED_PARAMETER(oidp); 5001 UNREFERENCED_PARAMETER(arg2); 5002 5003 if (ice_driver_is_detaching(sc)) 5004 return (ESHUTDOWN); 5005 5006 /* Wire the old buffer so we can take a non-sleepable lock */ 5007 ret = sysctl_wire_old_buffer(req, 0); 5008 if (ret) 5009 return (ret); 5010 5011 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5012 5013 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 5014 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 5015 5016 sbuf_printf(sbuf, "VLAN Filter List"); 5017 5018 ice_acquire_lock(rule_lock); 5019 5020 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 5021 fi = &fm_entry->fltr_info; 5022 5023 sbuf_printf(sbuf, 5024 "\nvlan_id = %4d, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", 5025 fi->l_data.vlan.vlan_id, fi->vsi_handle, 5026 ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, 5027 ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); 5028 5029 /* if we have a vsi_list_info, print some information about that */ 5030 if (fm_entry->vsi_list_info) { 5031 sbuf_printf(sbuf, 5032 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 5033 fm_entry->vsi_count, 5034 fm_entry->vsi_list_info->vsi_list_id, 5035 fm_entry->vsi_list_info->ref_cnt); 5036 } 5037 } 5038 5039 ice_release_lock(rule_lock); 5040 5041 sbuf_finish(sbuf); 5042 sbuf_delete(sbuf); 5043 5044 return (0); 5045 } 5046 5047 /** 5048 * ice_sysctl_dump_ethertype_filters - Dump a list of all HW Ethertype filters 5049 * @oidp: sysctl oid structure 5050 * @arg1: pointer to private data structure 5051 * @arg2: unused 5052 * @req: sysctl request pointer 5053 * 5054 * Callback for "ethertype_filters" sysctl to dump the programmed Ethertype 5055 * filters. 5056 */ 5057 static int 5058 ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS) 5059 { 5060 struct ice_softc *sc = (struct ice_softc *)arg1; 5061 struct ice_hw *hw = &sc->hw; 5062 struct ice_switch_info *sw = hw->switch_info; 5063 struct ice_fltr_mgmt_list_entry *fm_entry; 5064 struct ice_list_head *rule_head; 5065 struct ice_lock *rule_lock; 5066 struct ice_fltr_info *fi; 5067 struct sbuf *sbuf; 5068 int ret; 5069 5070 UNREFERENCED_PARAMETER(oidp); 5071 UNREFERENCED_PARAMETER(arg2); 5072 5073 if (ice_driver_is_detaching(sc)) 5074 return (ESHUTDOWN); 5075 5076 /* Wire the old buffer so we can take a non-sleepable lock */ 5077 ret = sysctl_wire_old_buffer(req, 0); 5078 if (ret) 5079 return (ret); 5080 5081 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5082 5083 rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rule_lock; 5084 rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rules; 5085 5086 sbuf_printf(sbuf, "Ethertype Filter List"); 5087 5088 ice_acquire_lock(rule_lock); 5089 5090 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 5091 fi = &fm_entry->fltr_info; 5092 5093 sbuf_printf(sbuf, 5094 "\nethertype = 0x%04x, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", 5095 fi->l_data.ethertype_mac.ethertype, 5096 fi->vsi_handle, ice_fltr_flag_str(fi->flag), 5097 fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), 5098 fi->fltr_rule_id); 5099 5100 /* if we have a vsi_list_info, print some information about that */ 5101 if (fm_entry->vsi_list_info) { 5102 sbuf_printf(sbuf, 5103 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 5104 fm_entry->vsi_count, 5105 fm_entry->vsi_list_info->vsi_list_id, 5106 fm_entry->vsi_list_info->ref_cnt); 5107 } 5108 } 5109 5110 ice_release_lock(rule_lock); 5111 5112 sbuf_finish(sbuf); 5113 sbuf_delete(sbuf); 5114 5115 return (0); 5116 } 5117 5118 /** 5119 * ice_sysctl_dump_ethertype_mac_filters - Dump a list of all HW Ethertype/MAC filters 5120 * @oidp: sysctl oid structure 5121 * @arg1: pointer to private data structure 5122 * @arg2: unused 5123 * @req: sysctl request pointer 5124 * 5125 * Callback for "ethertype_mac_filters" sysctl to dump the programmed 5126 * Ethertype/MAC filters. 5127 */ 5128 static int 5129 ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS) 5130 { 5131 struct ice_softc *sc = (struct ice_softc *)arg1; 5132 struct ice_hw *hw = &sc->hw; 5133 struct ice_switch_info *sw = hw->switch_info; 5134 struct ice_fltr_mgmt_list_entry *fm_entry; 5135 struct ice_list_head *rule_head; 5136 struct ice_lock *rule_lock; 5137 struct ice_fltr_info *fi; 5138 struct sbuf *sbuf; 5139 int ret; 5140 5141 UNREFERENCED_PARAMETER(oidp); 5142 UNREFERENCED_PARAMETER(arg2); 5143 5144 if (ice_driver_is_detaching(sc)) 5145 return (ESHUTDOWN); 5146 5147 /* Wire the old buffer so we can take a non-sleepable lock */ 5148 ret = sysctl_wire_old_buffer(req, 0); 5149 if (ret) 5150 return (ret); 5151 5152 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5153 5154 rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rule_lock; 5155 rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rules; 5156 5157 sbuf_printf(sbuf, "Ethertype/MAC Filter List"); 5158 5159 ice_acquire_lock(rule_lock); 5160 5161 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 5162 fi = &fm_entry->fltr_info; 5163 5164 sbuf_printf(sbuf, 5165 "\nethertype = 0x%04x, mac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", 5166 fi->l_data.ethertype_mac.ethertype, 5167 fi->l_data.ethertype_mac.mac_addr, ":", 5168 fi->vsi_handle, ice_fltr_flag_str(fi->flag), 5169 fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), 5170 fi->fltr_rule_id); 5171 5172 /* if we have a vsi_list_info, print some information about that */ 5173 if (fm_entry->vsi_list_info) { 5174 sbuf_printf(sbuf, 5175 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 5176 fm_entry->vsi_count, 5177 fm_entry->vsi_list_info->vsi_list_id, 5178 fm_entry->vsi_list_info->ref_cnt); 5179 } 5180 } 5181 5182 ice_release_lock(rule_lock); 5183 5184 sbuf_finish(sbuf); 5185 sbuf_delete(sbuf); 5186 5187 return (0); 5188 } 5189 5190 /** 5191 * ice_sysctl_dump_state_flags - Dump device driver state flags 5192 * @oidp: sysctl oid structure 5193 * @arg1: pointer to private data structure 5194 * @arg2: unused 5195 * @req: sysctl request pointer 5196 * 5197 * Callback for "state" sysctl to display currently set driver state flags. 5198 */ 5199 static int 5200 ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS) 5201 { 5202 struct ice_softc *sc = (struct ice_softc *)arg1; 5203 struct sbuf *sbuf; 5204 u32 copied_state; 5205 unsigned int i; 5206 bool at_least_one = false; 5207 5208 UNREFERENCED_PARAMETER(oidp); 5209 UNREFERENCED_PARAMETER(arg2); 5210 5211 if (ice_driver_is_detaching(sc)) 5212 return (ESHUTDOWN); 5213 5214 /* Make a copy of the state to ensure we display coherent values */ 5215 copied_state = atomic_load_acq_32(&sc->state); 5216 5217 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5218 5219 /* Add the string for each set state to the sbuf */ 5220 for (i = 0; i < 32; i++) { 5221 if (copied_state & BIT(i)) { 5222 const char *str = ice_state_to_str((enum ice_state)i); 5223 5224 at_least_one = true; 5225 5226 if (str) 5227 sbuf_printf(sbuf, "\n%s", str); 5228 else 5229 sbuf_printf(sbuf, "\nBIT(%u)", i); 5230 } 5231 } 5232 5233 if (!at_least_one) 5234 sbuf_printf(sbuf, "Nothing set"); 5235 5236 sbuf_finish(sbuf); 5237 sbuf_delete(sbuf); 5238 5239 return (0); 5240 } 5241 5242 /** 5243 * ice_add_debug_tunables - Add tunables helpful for debugging the device driver 5244 * @sc: device private structure 5245 * 5246 * Add sysctl tunable values related to debugging the device driver. For now, 5247 * this means a tunable to set the debug mask early during driver load. 5248 * 5249 * The debug node will be marked CTLFLAG_SKIP unless INVARIANTS is defined, so 5250 * that in normal kernel builds, these will all be hidden, but on a debug 5251 * kernel they will be more easily visible. 5252 */ 5253 static void 5254 ice_add_debug_tunables(struct ice_softc *sc) 5255 { 5256 struct sysctl_oid_list *debug_list; 5257 device_t dev = sc->dev; 5258 5259 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 5260 struct sysctl_oid_list *ctx_list = 5261 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 5262 5263 sc->debug_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug", 5264 ICE_CTLFLAG_DEBUG | CTLFLAG_RD, 5265 NULL, "Debug Sysctls"); 5266 debug_list = SYSCTL_CHILDREN(sc->debug_sysctls); 5267 5268 SYSCTL_ADD_U64(ctx, debug_list, OID_AUTO, "debug_mask", 5269 CTLFLAG_RW | CTLFLAG_TUN, &sc->hw.debug_mask, 0, 5270 "Debug message enable/disable mask"); 5271 5272 /* Load the default value from the global sysctl first */ 5273 sc->enable_tx_fc_filter = ice_enable_tx_fc_filter; 5274 5275 SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_fc_filter", 5276 CTLFLAG_RDTUN, &sc->enable_tx_fc_filter, 0, 5277 "Drop Ethertype 0x8808 control frames originating from software on this PF"); 5278 5279 /* Load the default value from the global sysctl first */ 5280 sc->enable_tx_lldp_filter = ice_enable_tx_lldp_filter; 5281 5282 SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_lldp_filter", 5283 CTLFLAG_RDTUN, &sc->enable_tx_lldp_filter, 0, 5284 "Drop Ethertype 0x88cc LLDP frames originating from software on this PF"); 5285 5286 } 5287 5288 #define ICE_SYSCTL_HELP_REQUEST_RESET \ 5289 "\nRequest the driver to initiate a reset." \ 5290 "\n\tpfr - Initiate a PF reset" \ 5291 "\n\tcorer - Initiate a CORE reset" \ 5292 "\n\tglobr - Initiate a GLOBAL reset" 5293 5294 /** 5295 * @var rl_sysctl_ticks 5296 * @brief timestamp for latest reset request sysctl call 5297 * 5298 * Helps rate-limit the call to the sysctl which resets the device 5299 */ 5300 int rl_sysctl_ticks = 0; 5301 5302 /** 5303 * ice_sysctl_request_reset - Request that the driver initiate a reset 5304 * @oidp: sysctl oid structure 5305 * @arg1: pointer to private data structure 5306 * @arg2: unused 5307 * @req: sysctl request pointer 5308 * 5309 * Callback for "request_reset" sysctl to request that the driver initiate 5310 * a reset. Expects to be passed one of the following strings 5311 * 5312 * "pfr" - Initiate a PF reset 5313 * "corer" - Initiate a CORE reset 5314 * "globr" - Initiate a Global reset 5315 */ 5316 static int 5317 ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS) 5318 { 5319 struct ice_softc *sc = (struct ice_softc *)arg1; 5320 struct ice_hw *hw = &sc->hw; 5321 enum ice_status status; 5322 enum ice_reset_req reset_type = ICE_RESET_INVAL; 5323 const char *reset_message; 5324 int error = 0; 5325 5326 /* Buffer to store the requested reset string. Must contain enough 5327 * space to store the largest expected reset string, which currently 5328 * means 6 bytes of space. 5329 */ 5330 char reset[6] = ""; 5331 5332 UNREFERENCED_PARAMETER(arg2); 5333 5334 error = priv_check(curthread, PRIV_DRIVER); 5335 if (error) 5336 return (error); 5337 5338 if (ice_driver_is_detaching(sc)) 5339 return (ESHUTDOWN); 5340 5341 /* Read in the requested reset type. */ 5342 error = sysctl_handle_string(oidp, reset, sizeof(reset), req); 5343 if ((error) || (req->newptr == NULL)) 5344 return (error); 5345 5346 if (strcmp(reset, "pfr") == 0) { 5347 reset_message = "Requesting a PF reset"; 5348 reset_type = ICE_RESET_PFR; 5349 } else if (strcmp(reset, "corer") == 0) { 5350 reset_message = "Initiating a CORE reset"; 5351 reset_type = ICE_RESET_CORER; 5352 } else if (strcmp(reset, "globr") == 0) { 5353 reset_message = "Initiating a GLOBAL reset"; 5354 reset_type = ICE_RESET_GLOBR; 5355 } else if (strcmp(reset, "empr") == 0) { 5356 device_printf(sc->dev, "Triggering an EMP reset via software is not currently supported\n"); 5357 return (EOPNOTSUPP); 5358 } 5359 5360 if (reset_type == ICE_RESET_INVAL) { 5361 device_printf(sc->dev, "%s is not a valid reset request\n", reset); 5362 return (EINVAL); 5363 } 5364 5365 /* 5366 * Rate-limit the frequency at which this function is called. 5367 * Assuming this is called successfully once, typically, 5368 * everything should be handled within the allotted time frame. 5369 * However, in the odd setup situations, we've also put in 5370 * guards for when the reset has finished, but we're in the 5371 * process of rebuilding. And instead of queueing an intent, 5372 * simply error out and let the caller retry, if so desired. 5373 */ 5374 if (TICKS_2_MSEC(ticks - rl_sysctl_ticks) < 500) { 5375 device_printf(sc->dev, 5376 "Call frequency too high. Operation aborted.\n"); 5377 return (EBUSY); 5378 } 5379 rl_sysctl_ticks = ticks; 5380 5381 if (TICKS_2_MSEC(ticks - sc->rebuild_ticks) < 100) { 5382 device_printf(sc->dev, "Device rebuilding. Operation aborted.\n"); 5383 return (EBUSY); 5384 } 5385 5386 if (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) { 5387 device_printf(sc->dev, "Device in reset. Operation aborted.\n"); 5388 return (EBUSY); 5389 } 5390 5391 device_printf(sc->dev, "%s\n", reset_message); 5392 5393 /* Initiate the PF reset during the admin status task */ 5394 if (reset_type == ICE_RESET_PFR) { 5395 ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); 5396 return (0); 5397 } 5398 5399 /* 5400 * Other types of resets including CORE and GLOBAL resets trigger an 5401 * interrupt on all PFs. Initiate the reset now. Preparation and 5402 * rebuild logic will be handled by the admin status task. 5403 */ 5404 status = ice_reset(hw, reset_type); 5405 5406 /* 5407 * Resets can take a long time and we still don't want another call 5408 * to this function before we settle down. 5409 */ 5410 rl_sysctl_ticks = ticks; 5411 5412 if (status) { 5413 device_printf(sc->dev, "failed to initiate device reset, err %s\n", 5414 ice_status_str(status)); 5415 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); 5416 return (EFAULT); 5417 } 5418 5419 return (0); 5420 } 5421 5422 /** 5423 * ice_add_debug_sysctls - Add sysctls helpful for debugging the device driver 5424 * @sc: device private structure 5425 * 5426 * Add sysctls related to debugging the device driver. Generally these should 5427 * simply be sysctls which dump internal driver state, to aid in understanding 5428 * what the driver is doing. 5429 */ 5430 static void 5431 ice_add_debug_sysctls(struct ice_softc *sc) 5432 { 5433 struct sysctl_oid *sw_node; 5434 struct sysctl_oid_list *debug_list, *sw_list; 5435 device_t dev = sc->dev; 5436 5437 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 5438 5439 debug_list = SYSCTL_CHILDREN(sc->debug_sysctls); 5440 5441 SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "request_reset", 5442 CTLTYPE_STRING | CTLFLAG_WR, sc, 0, 5443 ice_sysctl_request_reset, "A", 5444 ICE_SYSCTL_HELP_REQUEST_RESET); 5445 5446 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "pfr_count", CTLFLAG_RD, 5447 &sc->soft_stats.pfr_count, 0, "# of PF resets handled"); 5448 5449 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "corer_count", CTLFLAG_RD, 5450 &sc->soft_stats.corer_count, 0, "# of CORE resets handled"); 5451 5452 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "globr_count", CTLFLAG_RD, 5453 &sc->soft_stats.globr_count, 0, "# of Global resets handled"); 5454 5455 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "empr_count", CTLFLAG_RD, 5456 &sc->soft_stats.empr_count, 0, "# of EMP resets handled"); 5457 5458 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "tx_mdd_count", CTLFLAG_RD, 5459 &sc->soft_stats.tx_mdd_count, 0, "# of Tx MDD events detected"); 5460 5461 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "rx_mdd_count", CTLFLAG_RD, 5462 &sc->soft_stats.rx_mdd_count, 0, "# of Rx MDD events detected"); 5463 5464 SYSCTL_ADD_PROC(ctx, debug_list, 5465 OID_AUTO, "state", CTLTYPE_STRING | CTLFLAG_RD, 5466 sc, 0, ice_sysctl_dump_state_flags, "A", "Driver State Flags"); 5467 5468 SYSCTL_ADD_PROC(ctx, debug_list, 5469 OID_AUTO, "phy_type_low", CTLTYPE_U64 | CTLFLAG_RW, 5470 sc, 0, ice_sysctl_phy_type_low, "QU", 5471 "PHY type Low from Get PHY Caps/Set PHY Cfg"); 5472 5473 SYSCTL_ADD_PROC(ctx, debug_list, 5474 OID_AUTO, "phy_type_high", CTLTYPE_U64 | CTLFLAG_RW, 5475 sc, 0, ice_sysctl_phy_type_high, "QU", 5476 "PHY type High from Get PHY Caps/Set PHY Cfg"); 5477 5478 SYSCTL_ADD_PROC(ctx, debug_list, 5479 OID_AUTO, "phy_sw_caps", CTLTYPE_STRUCT | CTLFLAG_RD, 5480 sc, 0, ice_sysctl_phy_sw_caps, "", 5481 "Get PHY Capabilities (Software configuration)"); 5482 5483 SYSCTL_ADD_PROC(ctx, debug_list, 5484 OID_AUTO, "phy_nvm_caps", CTLTYPE_STRUCT | CTLFLAG_RD, 5485 sc, 0, ice_sysctl_phy_nvm_caps, "", 5486 "Get PHY Capabilities (NVM configuration)"); 5487 5488 SYSCTL_ADD_PROC(ctx, debug_list, 5489 OID_AUTO, "phy_topo_caps", CTLTYPE_STRUCT | CTLFLAG_RD, 5490 sc, 0, ice_sysctl_phy_topo_caps, "", 5491 "Get PHY Capabilities (Topology configuration)"); 5492 5493 SYSCTL_ADD_PROC(ctx, debug_list, 5494 OID_AUTO, "phy_link_status", CTLTYPE_STRUCT | CTLFLAG_RD, 5495 sc, 0, ice_sysctl_phy_link_status, "", 5496 "Get PHY Link Status"); 5497 5498 SYSCTL_ADD_PROC(ctx, debug_list, 5499 OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD, 5500 sc, 0, ice_sysctl_read_i2c_diag_data, "A", 5501 "Dump selected diagnostic data from FW"); 5502 5503 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "fw_build", CTLFLAG_RD, 5504 &sc->hw.fw_build, 0, "FW Build ID"); 5505 5506 SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "os_ddp_version", CTLTYPE_STRING | CTLFLAG_RD, 5507 sc, 0, ice_sysctl_os_pkg_version, "A", 5508 "DDP package name and version found in ice_ddp"); 5509 5510 SYSCTL_ADD_PROC(ctx, debug_list, 5511 OID_AUTO, "cur_lldp_persist_status", CTLTYPE_STRING | CTLFLAG_RD, 5512 sc, 0, ice_sysctl_fw_cur_lldp_persist_status, "A", "Current LLDP persistent status"); 5513 5514 SYSCTL_ADD_PROC(ctx, debug_list, 5515 OID_AUTO, "dflt_lldp_persist_status", CTLTYPE_STRING | CTLFLAG_RD, 5516 sc, 0, ice_sysctl_fw_dflt_lldp_persist_status, "A", "Default LLDP persistent status"); 5517 5518 SYSCTL_ADD_PROC(ctx, debug_list, 5519 OID_AUTO, "negotiated_fc", CTLTYPE_STRING | CTLFLAG_RD, 5520 sc, 0, ice_sysctl_negotiated_fc, "A", "Current Negotiated Flow Control mode"); 5521 5522 sw_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "switch", 5523 CTLFLAG_RD, NULL, "Switch Configuration"); 5524 sw_list = SYSCTL_CHILDREN(sw_node); 5525 5526 SYSCTL_ADD_PROC(ctx, sw_list, 5527 OID_AUTO, "mac_filters", CTLTYPE_STRING | CTLFLAG_RD, 5528 sc, 0, ice_sysctl_dump_mac_filters, "A", "MAC Filters"); 5529 5530 SYSCTL_ADD_PROC(ctx, sw_list, 5531 OID_AUTO, "vlan_filters", CTLTYPE_STRING | CTLFLAG_RD, 5532 sc, 0, ice_sysctl_dump_vlan_filters, "A", "VLAN Filters"); 5533 5534 SYSCTL_ADD_PROC(ctx, sw_list, 5535 OID_AUTO, "ethertype_filters", CTLTYPE_STRING | CTLFLAG_RD, 5536 sc, 0, ice_sysctl_dump_ethertype_filters, "A", "Ethertype Filters"); 5537 5538 SYSCTL_ADD_PROC(ctx, sw_list, 5539 OID_AUTO, "ethertype_mac_filters", CTLTYPE_STRING | CTLFLAG_RD, 5540 sc, 0, ice_sysctl_dump_ethertype_mac_filters, "A", "Ethertype/MAC Filters"); 5541 5542 } 5543 5544 /** 5545 * ice_vsi_disable_tx - Disable (unconfigure) Tx queues for a VSI 5546 * @vsi: the VSI to disable 5547 * 5548 * Disables the Tx queues associated with this VSI. Essentially the opposite 5549 * of ice_cfg_vsi_for_tx. 5550 */ 5551 int 5552 ice_vsi_disable_tx(struct ice_vsi *vsi) 5553 { 5554 struct ice_softc *sc = vsi->sc; 5555 struct ice_hw *hw = &sc->hw; 5556 enum ice_status status; 5557 u32 *q_teids; 5558 u16 *q_ids, *q_handles; 5559 int i, err = 0; 5560 5561 if (vsi->num_tx_queues > 255) 5562 return (ENOSYS); 5563 5564 q_teids = (u32 *)malloc(sizeof(*q_teids) * vsi->num_tx_queues, 5565 M_ICE, M_NOWAIT|M_ZERO); 5566 if (!q_teids) 5567 return (ENOMEM); 5568 5569 q_ids = (u16 *)malloc(sizeof(*q_ids) * vsi->num_tx_queues, 5570 M_ICE, M_NOWAIT|M_ZERO); 5571 if (!q_ids) { 5572 err = (ENOMEM); 5573 goto free_q_teids; 5574 } 5575 5576 q_handles = (u16 *)malloc(sizeof(*q_handles) * vsi->num_tx_queues, 5577 M_ICE, M_NOWAIT|M_ZERO); 5578 if (!q_handles) { 5579 err = (ENOMEM); 5580 goto free_q_ids; 5581 } 5582 5583 5584 for (i = 0; i < vsi->num_tx_queues; i++) { 5585 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 5586 5587 q_ids[i] = vsi->tx_qmap[i]; 5588 q_handles[i] = i; 5589 q_teids[i] = txq->q_teid; 5590 } 5591 5592 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, vsi->num_tx_queues, 5593 q_handles, q_ids, q_teids, ICE_NO_RESET, 0, NULL); 5594 if (status == ICE_ERR_DOES_NOT_EXIST) { 5595 ; /* Queues have already been disabled, no need to report this as an error */ 5596 } else if (status == ICE_ERR_RESET_ONGOING) { 5597 device_printf(sc->dev, 5598 "Reset in progress. LAN Tx queues already disabled\n"); 5599 } else if (status) { 5600 device_printf(sc->dev, 5601 "Failed to disable LAN Tx queues: err %s aq_err %s\n", 5602 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5603 err = (ENODEV); 5604 } 5605 5606 /* free_q_handles: */ 5607 free(q_handles, M_ICE); 5608 free_q_ids: 5609 free(q_ids, M_ICE); 5610 free_q_teids: 5611 free(q_teids, M_ICE); 5612 5613 return err; 5614 } 5615 5616 /** 5617 * ice_vsi_set_rss_params - Set the RSS parameters for the VSI 5618 * @vsi: the VSI to configure 5619 * 5620 * Sets the RSS table size and lookup table type for the VSI based on its 5621 * VSI type. 5622 */ 5623 static void 5624 ice_vsi_set_rss_params(struct ice_vsi *vsi) 5625 { 5626 struct ice_softc *sc = vsi->sc; 5627 struct ice_hw_common_caps *cap; 5628 5629 cap = &sc->hw.func_caps.common_cap; 5630 5631 switch (vsi->type) { 5632 case ICE_VSI_PF: 5633 /* The PF VSI inherits RSS instance of the PF */ 5634 vsi->rss_table_size = cap->rss_table_size; 5635 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 5636 break; 5637 case ICE_VSI_VF: 5638 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 5639 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 5640 break; 5641 default: 5642 device_printf(sc->dev, 5643 "VSI %d: RSS not supported for VSI type %d\n", 5644 vsi->idx, vsi->type); 5645 break; 5646 } 5647 } 5648 5649 /** 5650 * ice_vsi_add_txqs_ctx - Create a sysctl context and node to store txq sysctls 5651 * @vsi: The VSI to add the context for 5652 * 5653 * Creates a sysctl context for storing txq sysctls. Additionally creates 5654 * a node rooted at the given VSI's main sysctl node. This context will be 5655 * used to store per-txq sysctls which may need to be released during the 5656 * driver's lifetime. 5657 */ 5658 void 5659 ice_vsi_add_txqs_ctx(struct ice_vsi *vsi) 5660 { 5661 struct sysctl_oid_list *vsi_list; 5662 5663 sysctl_ctx_init(&vsi->txqs_ctx); 5664 5665 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 5666 5667 vsi->txqs_node = SYSCTL_ADD_NODE(&vsi->txqs_ctx, vsi_list, OID_AUTO, "txqs", 5668 CTLFLAG_RD, NULL, "Tx Queues"); 5669 } 5670 5671 /** 5672 * ice_vsi_add_rxqs_ctx - Create a sysctl context and node to store rxq sysctls 5673 * @vsi: The VSI to add the context for 5674 * 5675 * Creates a sysctl context for storing rxq sysctls. Additionally creates 5676 * a node rooted at the given VSI's main sysctl node. This context will be 5677 * used to store per-rxq sysctls which may need to be released during the 5678 * driver's lifetime. 5679 */ 5680 void 5681 ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi) 5682 { 5683 struct sysctl_oid_list *vsi_list; 5684 5685 sysctl_ctx_init(&vsi->rxqs_ctx); 5686 5687 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 5688 5689 vsi->rxqs_node = SYSCTL_ADD_NODE(&vsi->rxqs_ctx, vsi_list, OID_AUTO, "rxqs", 5690 CTLFLAG_RD, NULL, "Rx Queues"); 5691 } 5692 5693 /** 5694 * ice_vsi_del_txqs_ctx - Delete the Tx queue sysctl context for this VSI 5695 * @vsi: The VSI to delete from 5696 * 5697 * Frees the txq sysctl context created for storing the per-queue Tx sysctls. 5698 * Must be called prior to freeing the Tx queue memory, in order to avoid 5699 * having sysctls point at stale memory. 5700 */ 5701 void 5702 ice_vsi_del_txqs_ctx(struct ice_vsi *vsi) 5703 { 5704 device_t dev = vsi->sc->dev; 5705 int err; 5706 5707 if (vsi->txqs_node) { 5708 err = sysctl_ctx_free(&vsi->txqs_ctx); 5709 if (err) 5710 device_printf(dev, "failed to free VSI %d txqs_ctx, err %s\n", 5711 vsi->idx, ice_err_str(err)); 5712 vsi->txqs_node = NULL; 5713 } 5714 } 5715 5716 /** 5717 * ice_vsi_del_rxqs_ctx - Delete the Rx queue sysctl context for this VSI 5718 * @vsi: The VSI to delete from 5719 * 5720 * Frees the rxq sysctl context created for storing the per-queue Rx sysctls. 5721 * Must be called prior to freeing the Rx queue memory, in order to avoid 5722 * having sysctls point at stale memory. 5723 */ 5724 void 5725 ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi) 5726 { 5727 device_t dev = vsi->sc->dev; 5728 int err; 5729 5730 if (vsi->rxqs_node) { 5731 err = sysctl_ctx_free(&vsi->rxqs_ctx); 5732 if (err) 5733 device_printf(dev, "failed to free VSI %d rxqs_ctx, err %s\n", 5734 vsi->idx, ice_err_str(err)); 5735 vsi->rxqs_node = NULL; 5736 } 5737 } 5738 5739 /** 5740 * ice_add_txq_sysctls - Add per-queue sysctls for a Tx queue 5741 * @txq: pointer to the Tx queue 5742 * 5743 * Add per-queue sysctls for a given Tx queue. Can't be called during 5744 * ice_add_vsi_sysctls, since the queue memory has not yet been setup. 5745 */ 5746 void 5747 ice_add_txq_sysctls(struct ice_tx_queue *txq) 5748 { 5749 struct ice_vsi *vsi = txq->vsi; 5750 struct sysctl_ctx_list *ctx = &vsi->txqs_ctx; 5751 struct sysctl_oid_list *txqs_list, *this_txq_list; 5752 struct sysctl_oid *txq_node; 5753 char txq_name[32], txq_desc[32]; 5754 5755 const struct ice_sysctl_info ctls[] = { 5756 { &txq->stats.tx_packets, "tx_packets", "Queue Packets Transmitted" }, 5757 { &txq->stats.tx_bytes, "tx_bytes", "Queue Bytes Transmitted" }, 5758 { &txq->stats.mss_too_small, "mss_too_small", "TSO sends with an MSS less than 64" }, 5759 { 0, 0, 0 } 5760 }; 5761 5762 const struct ice_sysctl_info *entry = ctls; 5763 5764 txqs_list = SYSCTL_CHILDREN(vsi->txqs_node); 5765 5766 snprintf(txq_name, sizeof(txq_name), "%u", txq->me); 5767 snprintf(txq_desc, sizeof(txq_desc), "Tx Queue %u", txq->me); 5768 txq_node = SYSCTL_ADD_NODE(ctx, txqs_list, OID_AUTO, txq_name, 5769 CTLFLAG_RD, NULL, txq_desc); 5770 this_txq_list = SYSCTL_CHILDREN(txq_node); 5771 5772 /* Add the Tx queue statistics */ 5773 while (entry->stat != 0) { 5774 SYSCTL_ADD_U64(ctx, this_txq_list, OID_AUTO, entry->name, 5775 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 5776 entry->description); 5777 entry++; 5778 } 5779 } 5780 5781 /** 5782 * ice_add_rxq_sysctls - Add per-queue sysctls for an Rx queue 5783 * @rxq: pointer to the Rx queue 5784 * 5785 * Add per-queue sysctls for a given Rx queue. Can't be called during 5786 * ice_add_vsi_sysctls, since the queue memory has not yet been setup. 5787 */ 5788 void 5789 ice_add_rxq_sysctls(struct ice_rx_queue *rxq) 5790 { 5791 struct ice_vsi *vsi = rxq->vsi; 5792 struct sysctl_ctx_list *ctx = &vsi->rxqs_ctx; 5793 struct sysctl_oid_list *rxqs_list, *this_rxq_list; 5794 struct sysctl_oid *rxq_node; 5795 char rxq_name[32], rxq_desc[32]; 5796 5797 const struct ice_sysctl_info ctls[] = { 5798 { &rxq->stats.rx_packets, "rx_packets", "Queue Packets Received" }, 5799 { &rxq->stats.rx_bytes, "rx_bytes", "Queue Bytes Received" }, 5800 { &rxq->stats.desc_errs, "rx_desc_errs", "Queue Rx Descriptor Errors" }, 5801 { 0, 0, 0 } 5802 }; 5803 5804 const struct ice_sysctl_info *entry = ctls; 5805 5806 rxqs_list = SYSCTL_CHILDREN(vsi->rxqs_node); 5807 5808 snprintf(rxq_name, sizeof(rxq_name), "%u", rxq->me); 5809 snprintf(rxq_desc, sizeof(rxq_desc), "Rx Queue %u", rxq->me); 5810 rxq_node = SYSCTL_ADD_NODE(ctx, rxqs_list, OID_AUTO, rxq_name, 5811 CTLFLAG_RD, NULL, rxq_desc); 5812 this_rxq_list = SYSCTL_CHILDREN(rxq_node); 5813 5814 /* Add the Rx queue statistics */ 5815 while (entry->stat != 0) { 5816 SYSCTL_ADD_U64(ctx, this_rxq_list, OID_AUTO, entry->name, 5817 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 5818 entry->description); 5819 entry++; 5820 } 5821 } 5822 5823 /** 5824 * ice_get_default_rss_key - Obtain a default RSS key 5825 * @seed: storage for the RSS key data 5826 * 5827 * Copies a pre-generated RSS key into the seed memory. The seed pointer must 5828 * point to a block of memory that is at least 40 bytes in size. 5829 * 5830 * The key isn't randomly generated each time this function is called because 5831 * that makes the RSS key change every time we reconfigure RSS. This does mean 5832 * that we're hard coding a possibly 'well known' key. We might want to 5833 * investigate randomly generating this key once during the first call. 5834 */ 5835 static void 5836 ice_get_default_rss_key(u8 *seed) 5837 { 5838 const u8 default_seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE] = { 5839 0x39, 0xed, 0xff, 0x4d, 0x43, 0x58, 0x42, 0xc3, 0x5f, 0xb8, 5840 0xa5, 0x32, 0x95, 0x65, 0x81, 0xcd, 0x36, 0x79, 0x71, 0x97, 5841 0xde, 0xa4, 0x41, 0x40, 0x6f, 0x27, 0xe9, 0x81, 0x13, 0xa0, 5842 0x95, 0x93, 0x5b, 0x1e, 0x9d, 0x27, 0x9d, 0x24, 0x84, 0xb5, 5843 }; 5844 5845 bcopy(default_seed, seed, ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); 5846 } 5847 5848 /** 5849 * ice_set_rss_key - Configure a given VSI with the default RSS key 5850 * @vsi: the VSI to configure 5851 * 5852 * Program the hardware RSS key. We use rss_getkey to grab the kernel RSS key. 5853 * If the kernel RSS interface is not available, this will fall back to our 5854 * pre-generated hash seed from ice_get_default_rss_key(). 5855 */ 5856 static int 5857 ice_set_rss_key(struct ice_vsi *vsi) 5858 { 5859 struct ice_aqc_get_set_rss_keys keydata = { .standard_rss_key = {0} }; 5860 struct ice_softc *sc = vsi->sc; 5861 struct ice_hw *hw = &sc->hw; 5862 enum ice_status status; 5863 5864 /* 5865 * If the RSS kernel interface is disabled, this will return the 5866 * default RSS key above. 5867 */ 5868 rss_getkey(keydata.standard_rss_key); 5869 5870 status = ice_aq_set_rss_key(hw, vsi->idx, &keydata); 5871 if (status) { 5872 device_printf(sc->dev, 5873 "ice_aq_set_rss_key status %s, error %s\n", 5874 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5875 return (EIO); 5876 } 5877 5878 return (0); 5879 } 5880 5881 /** 5882 * ice_set_rss_flow_flds - Program the RSS hash flows after package init 5883 * @vsi: the VSI to configure 5884 * 5885 * If the package file is initialized, the default RSS flows are reset. We 5886 * need to reprogram the expected hash configuration. We'll use 5887 * rss_gethashconfig() to determine which flows to enable. If RSS kernel 5888 * support is not enabled, this macro will fall back to suitable defaults. 5889 */ 5890 static void 5891 ice_set_rss_flow_flds(struct ice_vsi *vsi) 5892 { 5893 struct ice_softc *sc = vsi->sc; 5894 struct ice_hw *hw = &sc->hw; 5895 device_t dev = sc->dev; 5896 enum ice_status status; 5897 u_int rss_hash_config; 5898 5899 rss_hash_config = rss_gethashconfig(); 5900 5901 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) { 5902 status = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, 5903 ICE_FLOW_SEG_HDR_IPV4); 5904 if (status) 5905 device_printf(dev, 5906 "ice_add_rss_cfg on VSI %d failed for ipv4 flow, err %s aq_err %s\n", 5907 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5908 } 5909 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) { 5910 status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, 5911 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); 5912 if (status) 5913 device_printf(dev, 5914 "ice_add_rss_cfg on VSI %d failed for tcp4 flow, err %s aq_err %s\n", 5915 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5916 } 5917 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) { 5918 status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, 5919 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); 5920 if (status) 5921 device_printf(dev, 5922 "ice_add_rss_cfg on VSI %d failed for udp4 flow, err %s aq_err %s\n", 5923 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5924 } 5925 if (rss_hash_config & (RSS_HASHTYPE_RSS_IPV6 | RSS_HASHTYPE_RSS_IPV6_EX)) { 5926 status = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, 5927 ICE_FLOW_SEG_HDR_IPV6); 5928 if (status) 5929 device_printf(dev, 5930 "ice_add_rss_cfg on VSI %d failed for ipv6 flow, err %s aq_err %s\n", 5931 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5932 } 5933 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) { 5934 status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, 5935 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); 5936 if (status) 5937 device_printf(dev, 5938 "ice_add_rss_cfg on VSI %d failed for tcp6 flow, err %s aq_err %s\n", 5939 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5940 } 5941 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) { 5942 status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, 5943 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); 5944 if (status) 5945 device_printf(dev, 5946 "ice_add_rss_cfg on VSI %d failed for udp6 flow, err %s aq_err %s\n", 5947 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5948 } 5949 5950 /* Warn about RSS hash types which are not supported */ 5951 /* coverity[dead_error_condition] */ 5952 if (rss_hash_config & ~ICE_DEFAULT_RSS_HASH_CONFIG) { 5953 device_printf(dev, 5954 "ice_add_rss_cfg on VSI %d could not configure every requested hash type\n", 5955 vsi->idx); 5956 } 5957 } 5958 5959 /** 5960 * ice_set_rss_lut - Program the RSS lookup table for a VSI 5961 * @vsi: the VSI to configure 5962 * 5963 * Programs the RSS lookup table for a given VSI. We use 5964 * rss_get_indirection_to_bucket which will use the indirection table provided 5965 * by the kernel RSS interface when available. If the kernel RSS interface is 5966 * not available, we will fall back to a simple round-robin fashion queue 5967 * assignment. 5968 */ 5969 static int 5970 ice_set_rss_lut(struct ice_vsi *vsi) 5971 { 5972 struct ice_softc *sc = vsi->sc; 5973 struct ice_hw *hw = &sc->hw; 5974 device_t dev = sc->dev; 5975 enum ice_status status; 5976 int i, err = 0; 5977 u8 *lut; 5978 5979 lut = (u8 *)malloc(vsi->rss_table_size, M_ICE, M_NOWAIT|M_ZERO); 5980 if (!lut) { 5981 device_printf(dev, "Failed to allocate RSS lut memory\n"); 5982 return (ENOMEM); 5983 } 5984 5985 /* Populate the LUT with max no. of queues. If the RSS kernel 5986 * interface is disabled, this will assign the lookup table in 5987 * a simple round robin fashion 5988 */ 5989 for (i = 0; i < vsi->rss_table_size; i++) { 5990 /* XXX: this needs to be changed if num_rx_queues ever counts 5991 * more than just the RSS queues */ 5992 lut[i] = rss_get_indirection_to_bucket(i) % vsi->num_rx_queues; 5993 } 5994 5995 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, 5996 lut, vsi->rss_table_size); 5997 if (status) { 5998 device_printf(dev, 5999 "Cannot set RSS lut, err %s aq_err %s\n", 6000 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 6001 err = (EIO); 6002 } 6003 6004 free(lut, M_ICE); 6005 return err; 6006 } 6007 6008 /** 6009 * ice_config_rss - Configure RSS for a VSI 6010 * @vsi: the VSI to configure 6011 * 6012 * If FEATURE_RSS is enabled, configures the RSS lookup table and hash key for 6013 * a given VSI. 6014 */ 6015 int 6016 ice_config_rss(struct ice_vsi *vsi) 6017 { 6018 int err; 6019 6020 /* Nothing to do, if RSS is not enabled */ 6021 if (!ice_is_bit_set(vsi->sc->feat_en, ICE_FEATURE_RSS)) 6022 return 0; 6023 6024 err = ice_set_rss_key(vsi); 6025 if (err) 6026 return err; 6027 6028 ice_set_rss_flow_flds(vsi); 6029 6030 return ice_set_rss_lut(vsi); 6031 } 6032 6033 /** 6034 * ice_log_pkg_init - Log a message about status of DDP initialization 6035 * @sc: the device softc pointer 6036 * @pkg_status: the status result of ice_copy_and_init_pkg 6037 * 6038 * Called by ice_load_pkg after an attempt to download the DDP package 6039 * contents to the device. Determines whether the download was successful or 6040 * not and logs an appropriate message for the system administrator. 6041 * 6042 * @post if a DDP package was previously downloaded on another port and it 6043 * is not compatible with this driver, pkg_status will be updated to reflect 6044 * this, and the driver will transition to safe mode. 6045 */ 6046 void 6047 ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status) 6048 { 6049 struct ice_hw *hw = &sc->hw; 6050 device_t dev = sc->dev; 6051 struct sbuf *active_pkg, *os_pkg; 6052 6053 active_pkg = sbuf_new_auto(); 6054 ice_active_pkg_version_str(hw, active_pkg); 6055 sbuf_finish(active_pkg); 6056 6057 os_pkg = sbuf_new_auto(); 6058 ice_os_pkg_version_str(hw, os_pkg); 6059 sbuf_finish(os_pkg); 6060 6061 switch (*pkg_status) { 6062 case ICE_SUCCESS: 6063 /* The package download AdminQ command returned success because 6064 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is 6065 * already a package loaded on the device. 6066 */ 6067 if (hw->pkg_ver.major == hw->active_pkg_ver.major && 6068 hw->pkg_ver.minor == hw->active_pkg_ver.minor && 6069 hw->pkg_ver.update == hw->active_pkg_ver.update && 6070 hw->pkg_ver.draft == hw->active_pkg_ver.draft && 6071 !memcmp(hw->pkg_name, hw->active_pkg_name, 6072 sizeof(hw->pkg_name))) { 6073 switch (hw->pkg_dwnld_status) { 6074 case ICE_AQ_RC_OK: 6075 device_printf(dev, 6076 "The DDP package was successfully loaded: %s.\n", 6077 sbuf_data(active_pkg)); 6078 break; 6079 case ICE_AQ_RC_EEXIST: 6080 device_printf(dev, 6081 "DDP package already present on device: %s.\n", 6082 sbuf_data(active_pkg)); 6083 break; 6084 default: 6085 /* We do not expect this to occur, but the 6086 * extra messaging is here in case something 6087 * changes in the ice_init_pkg flow. 6088 */ 6089 device_printf(dev, 6090 "DDP package already present on device: %s. An unexpected error occurred, pkg_dwnld_status %s.\n", 6091 sbuf_data(active_pkg), 6092 ice_aq_str(hw->pkg_dwnld_status)); 6093 break; 6094 } 6095 } else if (pkg_ver_compatible(&hw->active_pkg_ver) == 0) { 6096 device_printf(dev, 6097 "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package %s. The ice_ddp module has package: %s.\n", 6098 sbuf_data(active_pkg), 6099 sbuf_data(os_pkg)); 6100 } else if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) { 6101 device_printf(dev, 6102 "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6103 sbuf_data(active_pkg), 6104 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6105 *pkg_status = ICE_ERR_NOT_SUPPORTED; 6106 } else { 6107 device_printf(dev, 6108 "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6109 sbuf_data(active_pkg), 6110 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6111 *pkg_status = ICE_ERR_NOT_SUPPORTED; 6112 } 6113 break; 6114 case ICE_ERR_NOT_SUPPORTED: 6115 /* 6116 * This assumes that the active_pkg_ver will not be 6117 * initialized if the ice_ddp package version is not 6118 * supported. 6119 */ 6120 if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) { 6121 /* The ice_ddp version is not supported */ 6122 if (pkg_ver_compatible(&hw->pkg_ver) > 0) { 6123 device_printf(dev, 6124 "The DDP package in the ice_ddp module is higher than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated driver. Entering Safe Mode.\n", 6125 sbuf_data(os_pkg), 6126 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6127 } else if (pkg_ver_compatible(&hw->pkg_ver) < 0) { 6128 device_printf(dev, 6129 "The DDP package in the ice_ddp module is lower than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated ice_ddp module. Entering Safe Mode.\n", 6130 sbuf_data(os_pkg), 6131 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6132 } else { 6133 device_printf(dev, 6134 "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6135 ice_status_str(*pkg_status), 6136 ice_aq_str(hw->pkg_dwnld_status), 6137 sbuf_data(os_pkg), 6138 sbuf_data(active_pkg), 6139 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6140 } 6141 } else { 6142 if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) { 6143 device_printf(dev, 6144 "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6145 sbuf_data(active_pkg), 6146 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6147 } else if (pkg_ver_compatible(&hw->active_pkg_ver) < 0) { 6148 device_printf(dev, 6149 "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6150 sbuf_data(active_pkg), 6151 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6152 } else { 6153 device_printf(dev, 6154 "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6155 ice_status_str(*pkg_status), 6156 ice_aq_str(hw->pkg_dwnld_status), 6157 sbuf_data(os_pkg), 6158 sbuf_data(active_pkg), 6159 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6160 } 6161 } 6162 break; 6163 case ICE_ERR_CFG: 6164 case ICE_ERR_BUF_TOO_SHORT: 6165 case ICE_ERR_PARAM: 6166 device_printf(dev, 6167 "The DDP package in the ice_ddp module is invalid. Entering Safe Mode\n"); 6168 break; 6169 case ICE_ERR_FW_DDP_MISMATCH: 6170 device_printf(dev, 6171 "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 6172 break; 6173 case ICE_ERR_AQ_ERROR: 6174 switch (hw->pkg_dwnld_status) { 6175 case ICE_AQ_RC_ENOSEC: 6176 case ICE_AQ_RC_EBADSIG: 6177 device_printf(dev, 6178 "The DDP package in the ice_ddp module cannot be loaded because its signature is not valid. Please use a valid ice_ddp module. Entering Safe Mode.\n"); 6179 goto free_sbufs; 6180 case ICE_AQ_RC_ESVN: 6181 device_printf(dev, 6182 "The DDP package in the ice_ddp module could not be loaded because its security revision is too low. Please use an updated ice_ddp module. Entering Safe Mode.\n"); 6183 goto free_sbufs; 6184 case ICE_AQ_RC_EBADMAN: 6185 case ICE_AQ_RC_EBADBUF: 6186 device_printf(dev, 6187 "An error occurred on the device while loading the DDP package. Entering Safe Mode.\n"); 6188 goto free_sbufs; 6189 default: 6190 break; 6191 } 6192 /* fall-through */ 6193 default: 6194 device_printf(dev, 6195 "An unknown error (%s aq_err %s) occurred when loading the DDP package. Entering Safe Mode.\n", 6196 ice_status_str(*pkg_status), 6197 ice_aq_str(hw->pkg_dwnld_status)); 6198 break; 6199 } 6200 6201 free_sbufs: 6202 sbuf_delete(active_pkg); 6203 sbuf_delete(os_pkg); 6204 } 6205 6206 /** 6207 * ice_load_pkg_file - Load the DDP package file using firmware_get 6208 * @sc: device private softc 6209 * 6210 * Use firmware_get to load the DDP package memory and then request that 6211 * firmware download the package contents and program the relevant hardware 6212 * bits. 6213 * 6214 * This function makes a copy of the DDP package memory which is tracked in 6215 * the ice_hw structure. The copy will be managed and released by 6216 * ice_deinit_hw(). This allows the firmware reference to be immediately 6217 * released using firmware_put. 6218 */ 6219 void 6220 ice_load_pkg_file(struct ice_softc *sc) 6221 { 6222 struct ice_hw *hw = &sc->hw; 6223 device_t dev = sc->dev; 6224 enum ice_status status; 6225 const struct firmware *pkg; 6226 6227 pkg = firmware_get("ice_ddp"); 6228 if (!pkg) { 6229 device_printf(dev, "The DDP package module (ice_ddp) failed to load or could not be found. Entering Safe Mode.\n"); 6230 if (cold) 6231 device_printf(dev, 6232 "The DDP package module cannot be automatically loaded while booting. You may want to specify ice_ddp_load=\"YES\" in your loader.conf\n"); 6233 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); 6234 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); 6235 return; 6236 } 6237 6238 /* Copy and download the pkg contents */ 6239 status = ice_copy_and_init_pkg(hw, (const u8 *)pkg->data, pkg->datasize); 6240 6241 /* Release the firmware reference */ 6242 firmware_put(pkg, FIRMWARE_UNLOAD); 6243 6244 /* Check the active DDP package version and log a message */ 6245 ice_log_pkg_init(sc, &status); 6246 6247 /* Place the driver into safe mode */ 6248 if (status != ICE_SUCCESS) { 6249 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); 6250 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); 6251 } 6252 } 6253 6254 /** 6255 * ice_get_ifnet_counter - Retrieve counter value for a given ifnet counter 6256 * @vsi: the vsi to retrieve the value for 6257 * @counter: the counter type to retrieve 6258 * 6259 * Returns the value for a given ifnet counter. To do so, we calculate the 6260 * value based on the matching hardware statistics. 6261 */ 6262 uint64_t 6263 ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter) 6264 { 6265 struct ice_hw_port_stats *hs = &vsi->sc->stats.cur; 6266 struct ice_eth_stats *es = &vsi->hw_stats.cur; 6267 6268 /* For some statistics, especially those related to error flows, we do 6269 * not have per-VSI counters. In this case, we just report the global 6270 * counters. 6271 */ 6272 6273 switch (counter) { 6274 case IFCOUNTER_IPACKETS: 6275 return (es->rx_unicast + es->rx_multicast + es->rx_broadcast); 6276 case IFCOUNTER_IERRORS: 6277 return (hs->crc_errors + hs->illegal_bytes + 6278 hs->mac_local_faults + hs->mac_remote_faults + 6279 hs->rx_len_errors + hs->rx_undersize + 6280 hs->rx_oversize + hs->rx_fragments + hs->rx_jabber); 6281 case IFCOUNTER_OPACKETS: 6282 return (es->tx_unicast + es->tx_multicast + es->tx_broadcast); 6283 case IFCOUNTER_OERRORS: 6284 return (es->tx_errors); 6285 case IFCOUNTER_COLLISIONS: 6286 return (0); 6287 case IFCOUNTER_IBYTES: 6288 return (es->rx_bytes); 6289 case IFCOUNTER_OBYTES: 6290 return (es->tx_bytes); 6291 case IFCOUNTER_IMCASTS: 6292 return (es->rx_multicast); 6293 case IFCOUNTER_OMCASTS: 6294 return (es->tx_multicast); 6295 case IFCOUNTER_IQDROPS: 6296 return (es->rx_discards); 6297 case IFCOUNTER_OQDROPS: 6298 return (hs->tx_dropped_link_down); 6299 case IFCOUNTER_NOPROTO: 6300 return (es->rx_unknown_protocol); 6301 default: 6302 return if_get_counter_default(vsi->sc->ifp, counter); 6303 } 6304 } 6305 6306 /** 6307 * ice_save_pci_info - Save PCI configuration fields in HW struct 6308 * @hw: the ice_hw struct to save the PCI information in 6309 * @dev: the device to get the PCI information from 6310 * 6311 * This should only be called once, early in the device attach 6312 * process. 6313 */ 6314 void 6315 ice_save_pci_info(struct ice_hw *hw, device_t dev) 6316 { 6317 hw->vendor_id = pci_get_vendor(dev); 6318 hw->device_id = pci_get_device(dev); 6319 hw->subsystem_vendor_id = pci_get_subvendor(dev); 6320 hw->subsystem_device_id = pci_get_subdevice(dev); 6321 hw->revision_id = pci_get_revid(dev); 6322 hw->bus.device = pci_get_slot(dev); 6323 hw->bus.func = pci_get_function(dev); 6324 } 6325 6326 /** 6327 * ice_replay_all_vsi_cfg - Replace configuration for all VSIs after reset 6328 * @sc: the device softc 6329 * 6330 * Replace the configuration for each VSI, and then cleanup replay 6331 * information. Called after a hardware reset in order to reconfigure the 6332 * active VSIs. 6333 */ 6334 int 6335 ice_replay_all_vsi_cfg(struct ice_softc *sc) 6336 { 6337 struct ice_hw *hw = &sc->hw; 6338 enum ice_status status; 6339 int i; 6340 6341 for (i = 0 ; i < sc->num_available_vsi; i++) { 6342 struct ice_vsi *vsi = sc->all_vsi[i]; 6343 6344 if (!vsi) 6345 continue; 6346 6347 status = ice_replay_vsi(hw, vsi->idx); 6348 if (status) { 6349 device_printf(sc->dev, "Failed to replay VSI %d, err %s aq_err %s\n", 6350 vsi->idx, ice_status_str(status), 6351 ice_aq_str(hw->adminq.sq_last_status)); 6352 return (EIO); 6353 } 6354 } 6355 6356 /* Cleanup replay filters after successful reconfiguration */ 6357 ice_replay_post(hw); 6358 return (0); 6359 } 6360 6361 /** 6362 * ice_clean_vsi_rss_cfg - Cleanup RSS configuration for a given VSI 6363 * @vsi: pointer to the VSI structure 6364 * 6365 * Cleanup the advanced RSS configuration for a given VSI. This is necessary 6366 * during driver removal to ensure that all RSS resources are properly 6367 * released. 6368 * 6369 * @remark this function doesn't report an error as it is expected to be 6370 * called during driver reset and unload, and there isn't much the driver can 6371 * do if freeing RSS resources fails. 6372 */ 6373 static void 6374 ice_clean_vsi_rss_cfg(struct ice_vsi *vsi) 6375 { 6376 struct ice_softc *sc = vsi->sc; 6377 struct ice_hw *hw = &sc->hw; 6378 device_t dev = sc->dev; 6379 enum ice_status status; 6380 6381 status = ice_rem_vsi_rss_cfg(hw, vsi->idx); 6382 if (status) 6383 device_printf(dev, 6384 "Failed to remove RSS configuration for VSI %d, err %s\n", 6385 vsi->idx, ice_status_str(status)); 6386 6387 /* Remove this VSI from the RSS list */ 6388 ice_rem_vsi_rss_list(hw, vsi->idx); 6389 } 6390 6391 /** 6392 * ice_clean_all_vsi_rss_cfg - Cleanup RSS configuration for all VSIs 6393 * @sc: the device softc pointer 6394 * 6395 * Cleanup the advanced RSS configuration for all VSIs on a given PF 6396 * interface. 6397 * 6398 * @remark This should be called while preparing for a reset, to cleanup stale 6399 * RSS configuration for all VSIs. 6400 */ 6401 void 6402 ice_clean_all_vsi_rss_cfg(struct ice_softc *sc) 6403 { 6404 int i; 6405 6406 /* No need to cleanup if RSS is not enabled */ 6407 if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS)) 6408 return; 6409 6410 for (i = 0; i < sc->num_available_vsi; i++) { 6411 struct ice_vsi *vsi = sc->all_vsi[i]; 6412 6413 if (vsi) 6414 ice_clean_vsi_rss_cfg(vsi); 6415 } 6416 } 6417 6418 /** 6419 * ice_requested_fec_mode - Return the requested FEC mode as a string 6420 * @pi: The port info structure 6421 * 6422 * Return a string representing the requested FEC mode. 6423 */ 6424 static const char * 6425 ice_requested_fec_mode(struct ice_port_info *pi) 6426 { 6427 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 6428 enum ice_status status; 6429 6430 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 6431 &pcaps, NULL); 6432 if (status) 6433 /* Just report unknown if we can't get capabilities */ 6434 return "Unknown"; 6435 6436 /* Check if RS-FEC has been requested first */ 6437 if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 6438 ICE_AQC_PHY_FEC_25G_RS_544_REQ)) 6439 return ice_fec_str(ICE_FEC_RS); 6440 6441 /* If RS FEC has not been requested, then check BASE-R */ 6442 if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 6443 ICE_AQC_PHY_FEC_25G_KR_REQ)) 6444 return ice_fec_str(ICE_FEC_BASER); 6445 6446 return ice_fec_str(ICE_FEC_NONE); 6447 } 6448 6449 /** 6450 * ice_negotiated_fec_mode - Return the negotiated FEC mode as a string 6451 * @pi: The port info structure 6452 * 6453 * Return a string representing the current FEC mode. 6454 */ 6455 static const char * 6456 ice_negotiated_fec_mode(struct ice_port_info *pi) 6457 { 6458 /* First, check if RS has been requested first */ 6459 if (pi->phy.link_info.fec_info & (ICE_AQ_LINK_25G_RS_528_FEC_EN | 6460 ICE_AQ_LINK_25G_RS_544_FEC_EN)) 6461 return ice_fec_str(ICE_FEC_RS); 6462 6463 /* If RS FEC has not been requested, then check BASE-R */ 6464 if (pi->phy.link_info.fec_info & ICE_AQ_LINK_25G_KR_FEC_EN) 6465 return ice_fec_str(ICE_FEC_BASER); 6466 6467 return ice_fec_str(ICE_FEC_NONE); 6468 } 6469 6470 /** 6471 * ice_autoneg_mode - Return string indicating of autoneg completed 6472 * @pi: The port info structure 6473 * 6474 * Return "True" if autonegotiation is completed, "False" otherwise. 6475 */ 6476 static const char * 6477 ice_autoneg_mode(struct ice_port_info *pi) 6478 { 6479 if (pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 6480 return "True"; 6481 else 6482 return "False"; 6483 } 6484 6485 /** 6486 * ice_flowcontrol_mode - Return string indicating the Flow Control mode 6487 * @pi: The port info structure 6488 * 6489 * Returns the current Flow Control mode as a string. 6490 */ 6491 static const char * 6492 ice_flowcontrol_mode(struct ice_port_info *pi) 6493 { 6494 return ice_fc_str(pi->fc.current_mode); 6495 } 6496 6497 /** 6498 * ice_link_up_msg - Log a link up message with associated info 6499 * @sc: the device private softc 6500 * 6501 * Log a link up message with LOG_NOTICE message level. Include information 6502 * about the duplex, FEC mode, autonegotiation and flow control. 6503 */ 6504 void 6505 ice_link_up_msg(struct ice_softc *sc) 6506 { 6507 struct ice_hw *hw = &sc->hw; 6508 struct ifnet *ifp = sc->ifp; 6509 const char *speed, *req_fec, *neg_fec, *autoneg, *flowcontrol; 6510 6511 speed = ice_aq_speed_to_str(hw->port_info); 6512 req_fec = ice_requested_fec_mode(hw->port_info); 6513 neg_fec = ice_negotiated_fec_mode(hw->port_info); 6514 autoneg = ice_autoneg_mode(hw->port_info); 6515 flowcontrol = ice_flowcontrol_mode(hw->port_info); 6516 6517 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 6518 ifp->if_xname, speed, req_fec, neg_fec, autoneg, flowcontrol); 6519 } 6520 6521 /** 6522 * ice_update_laa_mac - Update MAC address if Locally Administered 6523 * @sc: the device softc 6524 * 6525 * Update the device MAC address when a Locally Administered Address is 6526 * assigned. 6527 * 6528 * This function does *not* update the MAC filter list itself. Instead, it 6529 * should be called after ice_rm_pf_default_mac_filters, so that the previous 6530 * address filter will be removed, and before ice_cfg_pf_default_mac_filters, 6531 * so that the new address filter will be assigned. 6532 */ 6533 int 6534 ice_update_laa_mac(struct ice_softc *sc) 6535 { 6536 const u8 *lladdr = (const u8 *)IF_LLADDR(sc->ifp); 6537 struct ice_hw *hw = &sc->hw; 6538 enum ice_status status; 6539 6540 /* If the address is the same, then there is nothing to update */ 6541 if (!memcmp(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN)) 6542 return (0); 6543 6544 /* Reject Multicast addresses */ 6545 if (ETHER_IS_MULTICAST(lladdr)) 6546 return (EINVAL); 6547 6548 status = ice_aq_manage_mac_write(hw, lladdr, ICE_AQC_MAN_MAC_UPDATE_LAA_WOL, NULL); 6549 if (status) { 6550 device_printf(sc->dev, "Failed to write mac %6D to firmware, err %s aq_err %s\n", 6551 lladdr, ":", ice_status_str(status), 6552 ice_aq_str(hw->adminq.sq_last_status)); 6553 return (EFAULT); 6554 } 6555 6556 /* Copy the address into place of the LAN address. */ 6557 bcopy(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN); 6558 6559 return (0); 6560 } 6561 6562 /** 6563 * ice_get_and_print_bus_info - Save (PCI) bus info and print messages 6564 * @sc: device softc 6565 * 6566 * This will potentially print out a warning message if bus bandwidth 6567 * is insufficient for full-speed operation. 6568 * 6569 * This should only be called once, during the attach process, after 6570 * hw->port_info has been filled out with port link topology information 6571 * (from the Get PHY Capabilities Admin Queue command). 6572 */ 6573 void 6574 ice_get_and_print_bus_info(struct ice_softc *sc) 6575 { 6576 struct ice_hw *hw = &sc->hw; 6577 device_t dev = sc->dev; 6578 u16 pci_link_status; 6579 int offset; 6580 6581 pci_find_cap(dev, PCIY_EXPRESS, &offset); 6582 pci_link_status = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 6583 6584 /* Fill out hw struct with PCIE link status info */ 6585 ice_set_pci_link_status_data(hw, pci_link_status); 6586 6587 /* Use info to print out bandwidth messages */ 6588 ice_print_bus_link_data(dev, hw); 6589 6590 if (ice_pcie_bandwidth_check(sc)) { 6591 device_printf(dev, 6592 "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 6593 device_printf(dev, 6594 "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 6595 } 6596 } 6597 6598 /** 6599 * ice_pcie_bus_speed_to_rate - Convert driver bus speed enum value to 6600 * a 64-bit baudrate. 6601 * @speed: enum value to convert 6602 * 6603 * This only goes up to PCIE Gen 4. 6604 */ 6605 static uint64_t 6606 ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed) 6607 { 6608 /* If the PCI-E speed is Gen1 or Gen2, then report 6609 * only 80% of bus speed to account for encoding overhead. 6610 */ 6611 switch (speed) { 6612 case ice_pcie_speed_2_5GT: 6613 return IF_Gbps(2); 6614 case ice_pcie_speed_5_0GT: 6615 return IF_Gbps(4); 6616 case ice_pcie_speed_8_0GT: 6617 return IF_Gbps(8); 6618 case ice_pcie_speed_16_0GT: 6619 return IF_Gbps(16); 6620 case ice_pcie_speed_unknown: 6621 default: 6622 return 0; 6623 } 6624 } 6625 6626 /** 6627 * ice_pcie_lnk_width_to_int - Convert driver pci-e width enum value to 6628 * a 32-bit number. 6629 * @width: enum value to convert 6630 */ 6631 static int 6632 ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width) 6633 { 6634 switch (width) { 6635 case ice_pcie_lnk_x1: 6636 return (1); 6637 case ice_pcie_lnk_x2: 6638 return (2); 6639 case ice_pcie_lnk_x4: 6640 return (4); 6641 case ice_pcie_lnk_x8: 6642 return (8); 6643 case ice_pcie_lnk_x12: 6644 return (12); 6645 case ice_pcie_lnk_x16: 6646 return (16); 6647 case ice_pcie_lnk_x32: 6648 return (32); 6649 case ice_pcie_lnk_width_resrv: 6650 case ice_pcie_lnk_width_unknown: 6651 default: 6652 return (0); 6653 } 6654 } 6655 6656 /** 6657 * ice_pcie_bandwidth_check - Check if PCI-E bandwidth is sufficient for 6658 * full-speed device operation. 6659 * @sc: adapter softc 6660 * 6661 * Returns 0 if sufficient; 1 if not. 6662 */ 6663 static uint8_t 6664 ice_pcie_bandwidth_check(struct ice_softc *sc) 6665 { 6666 struct ice_hw *hw = &sc->hw; 6667 int num_ports, pcie_width; 6668 u64 pcie_speed, port_speed; 6669 6670 MPASS(hw->port_info); 6671 6672 num_ports = bitcount32(hw->func_caps.common_cap.valid_functions); 6673 port_speed = ice_phy_types_to_max_rate(hw->port_info); 6674 pcie_speed = ice_pcie_bus_speed_to_rate(hw->bus.speed); 6675 pcie_width = ice_pcie_lnk_width_to_int(hw->bus.width); 6676 6677 /* 6678 * If 2x100, clamp ports to 1 -- 2nd port is intended for 6679 * failover. 6680 */ 6681 if (port_speed == IF_Gbps(100)) 6682 num_ports = 1; 6683 6684 return !!((num_ports * port_speed) > pcie_speed * pcie_width); 6685 } 6686 6687 /** 6688 * ice_print_bus_link_data - Print PCI-E bandwidth information 6689 * @dev: device to print string for 6690 * @hw: hw struct with PCI-e link information 6691 */ 6692 static void 6693 ice_print_bus_link_data(device_t dev, struct ice_hw *hw) 6694 { 6695 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 6696 ((hw->bus.speed == ice_pcie_speed_16_0GT) ? "16.0GT/s" : 6697 (hw->bus.speed == ice_pcie_speed_8_0GT) ? "8.0GT/s" : 6698 (hw->bus.speed == ice_pcie_speed_5_0GT) ? "5.0GT/s" : 6699 (hw->bus.speed == ice_pcie_speed_2_5GT) ? "2.5GT/s" : "Unknown"), 6700 (hw->bus.width == ice_pcie_lnk_x32) ? "Width x32" : 6701 (hw->bus.width == ice_pcie_lnk_x16) ? "Width x16" : 6702 (hw->bus.width == ice_pcie_lnk_x12) ? "Width x12" : 6703 (hw->bus.width == ice_pcie_lnk_x8) ? "Width x8" : 6704 (hw->bus.width == ice_pcie_lnk_x4) ? "Width x4" : 6705 (hw->bus.width == ice_pcie_lnk_x2) ? "Width x2" : 6706 (hw->bus.width == ice_pcie_lnk_x1) ? "Width x1" : "Width Unknown"); 6707 } 6708 6709 /** 6710 * ice_set_pci_link_status_data - store PCI bus info 6711 * @hw: pointer to hardware structure 6712 * @link_status: the link status word from PCI config space 6713 * 6714 * Stores the PCI bus info (speed, width, type) within the ice_hw structure 6715 **/ 6716 static void 6717 ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status) 6718 { 6719 u16 reg; 6720 6721 hw->bus.type = ice_bus_pci_express; 6722 6723 reg = (link_status & PCIEM_LINK_STA_WIDTH) >> 4; 6724 6725 switch (reg) { 6726 case ice_pcie_lnk_x1: 6727 case ice_pcie_lnk_x2: 6728 case ice_pcie_lnk_x4: 6729 case ice_pcie_lnk_x8: 6730 case ice_pcie_lnk_x12: 6731 case ice_pcie_lnk_x16: 6732 case ice_pcie_lnk_x32: 6733 hw->bus.width = (enum ice_pcie_link_width)reg; 6734 break; 6735 default: 6736 hw->bus.width = ice_pcie_lnk_width_unknown; 6737 break; 6738 } 6739 6740 reg = (link_status & PCIEM_LINK_STA_SPEED) + 0x13; 6741 6742 switch (reg) { 6743 case ice_pcie_speed_2_5GT: 6744 case ice_pcie_speed_5_0GT: 6745 case ice_pcie_speed_8_0GT: 6746 case ice_pcie_speed_16_0GT: 6747 hw->bus.speed = (enum ice_pcie_bus_speed)reg; 6748 break; 6749 default: 6750 hw->bus.speed = ice_pcie_speed_unknown; 6751 break; 6752 } 6753 } 6754 6755 /** 6756 * ice_init_link_events - Initialize Link Status Events mask 6757 * @sc: the device softc 6758 * 6759 * Initialize the Link Status Events mask to disable notification of link 6760 * events we don't care about in software. Also request that link status 6761 * events be enabled. 6762 */ 6763 int 6764 ice_init_link_events(struct ice_softc *sc) 6765 { 6766 struct ice_hw *hw = &sc->hw; 6767 enum ice_status status; 6768 u16 wanted_events; 6769 6770 /* Set the bits for the events that we want to be notified by */ 6771 wanted_events = (ICE_AQ_LINK_EVENT_UPDOWN | 6772 ICE_AQ_LINK_EVENT_MEDIA_NA | 6773 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL); 6774 6775 /* request that every event except the wanted events be masked */ 6776 status = ice_aq_set_event_mask(hw, hw->port_info->lport, ~wanted_events, NULL); 6777 if (status) { 6778 device_printf(sc->dev, 6779 "Failed to set link status event mask, err %s aq_err %s\n", 6780 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 6781 return (EIO); 6782 } 6783 6784 /* Request link info with the LSE bit set to enable link status events */ 6785 status = ice_aq_get_link_info(hw->port_info, true, NULL, NULL); 6786 if (status) { 6787 device_printf(sc->dev, 6788 "Failed to enable link status events, err %s aq_err %s\n", 6789 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 6790 return (EIO); 6791 } 6792 6793 return (0); 6794 } 6795 6796 /** 6797 * ice_handle_mdd_event - Handle possibly malicious events 6798 * @sc: the device softc 6799 * 6800 * Called by the admin task if an MDD detection interrupt is triggered. 6801 * Identifies possibly malicious events coming from VFs. Also triggers for 6802 * similar incorrect behavior from the PF as well. 6803 */ 6804 void 6805 ice_handle_mdd_event(struct ice_softc *sc) 6806 { 6807 struct ice_hw *hw = &sc->hw; 6808 bool mdd_detected = false, request_reinit = false; 6809 device_t dev = sc->dev; 6810 u32 reg; 6811 6812 if (!ice_testandclear_state(&sc->state, ICE_STATE_MDD_PENDING)) 6813 return; 6814 6815 reg = rd32(hw, GL_MDET_TX_TCLAN); 6816 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 6817 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> GL_MDET_TX_TCLAN_PF_NUM_S; 6818 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> GL_MDET_TX_TCLAN_VF_NUM_S; 6819 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> GL_MDET_TX_TCLAN_MAL_TYPE_S; 6820 u16 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >> GL_MDET_TX_TCLAN_QNUM_S; 6821 6822 device_printf(dev, "Malicious Driver Detection Tx Descriptor check event '%s' on Tx queue %u PF# %u VF# %u\n", 6823 ice_mdd_tx_tclan_str(event), queue, pf_num, vf_num); 6824 6825 /* Only clear this event if it matches this PF, that way other 6826 * PFs can read the event and determine VF and queue number. 6827 */ 6828 if (pf_num == hw->pf_id) 6829 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 6830 6831 mdd_detected = true; 6832 } 6833 6834 /* Determine what triggered the MDD event */ 6835 reg = rd32(hw, GL_MDET_TX_PQM); 6836 if (reg & GL_MDET_TX_PQM_VALID_M) { 6837 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> GL_MDET_TX_PQM_PF_NUM_S; 6838 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> GL_MDET_TX_PQM_VF_NUM_S; 6839 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> GL_MDET_TX_PQM_MAL_TYPE_S; 6840 u16 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >> GL_MDET_TX_PQM_QNUM_S; 6841 6842 device_printf(dev, "Malicious Driver Detection Tx Quanta check event '%s' on Tx queue %u PF# %u VF# %u\n", 6843 ice_mdd_tx_pqm_str(event), queue, pf_num, vf_num); 6844 6845 /* Only clear this event if it matches this PF, that way other 6846 * PFs can read the event and determine VF and queue number. 6847 */ 6848 if (pf_num == hw->pf_id) 6849 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 6850 6851 mdd_detected = true; 6852 } 6853 6854 reg = rd32(hw, GL_MDET_RX); 6855 if (reg & GL_MDET_RX_VALID_M) { 6856 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> GL_MDET_RX_PF_NUM_S; 6857 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> GL_MDET_RX_VF_NUM_S; 6858 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> GL_MDET_RX_MAL_TYPE_S; 6859 u16 queue = (reg & GL_MDET_RX_QNUM_M) >> GL_MDET_RX_QNUM_S; 6860 6861 device_printf(dev, "Malicious Driver Detection Rx event '%s' on Rx queue %u PF# %u VF# %u\n", 6862 ice_mdd_rx_str(event), queue, pf_num, vf_num); 6863 6864 /* Only clear this event if it matches this PF, that way other 6865 * PFs can read the event and determine VF and queue number. 6866 */ 6867 if (pf_num == hw->pf_id) 6868 wr32(hw, GL_MDET_RX, 0xffffffff); 6869 6870 mdd_detected = true; 6871 } 6872 6873 /* Now, confirm that this event actually affects this PF, by checking 6874 * the PF registers. 6875 */ 6876 if (mdd_detected) { 6877 reg = rd32(hw, PF_MDET_TX_TCLAN); 6878 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 6879 wr32(hw, PF_MDET_TX_TCLAN, 0xffff); 6880 sc->soft_stats.tx_mdd_count++; 6881 request_reinit = true; 6882 } 6883 6884 reg = rd32(hw, PF_MDET_TX_PQM); 6885 if (reg & PF_MDET_TX_PQM_VALID_M) { 6886 wr32(hw, PF_MDET_TX_PQM, 0xffff); 6887 sc->soft_stats.tx_mdd_count++; 6888 request_reinit = true; 6889 } 6890 6891 reg = rd32(hw, PF_MDET_RX); 6892 if (reg & PF_MDET_RX_VALID_M) { 6893 wr32(hw, PF_MDET_RX, 0xffff); 6894 sc->soft_stats.rx_mdd_count++; 6895 request_reinit = true; 6896 } 6897 } 6898 6899 /* TODO: Implement logic to detect and handle events caused by VFs. */ 6900 6901 /* request that the upper stack re-initialize the Tx/Rx queues */ 6902 if (request_reinit) 6903 ice_request_stack_reinit(sc); 6904 6905 ice_flush(hw); 6906 } 6907 6908 /** 6909 * ice_init_dcb_setup - Initialize DCB settings for HW 6910 * @sc: the device softc 6911 * 6912 * This needs to be called after the fw_lldp_agent sysctl is added, since that 6913 * can update the device's LLDP agent status if a tunable value is set. 6914 * 6915 * Get and store the initial state of DCB settings on driver load. Print out 6916 * informational messages as well. 6917 */ 6918 void 6919 ice_init_dcb_setup(struct ice_softc *sc) 6920 { 6921 struct ice_hw *hw = &sc->hw; 6922 device_t dev = sc->dev; 6923 bool dcbx_agent_status; 6924 enum ice_status status; 6925 6926 /* Don't do anything if DCB isn't supported */ 6927 if (!hw->func_caps.common_cap.dcb) { 6928 device_printf(dev, "%s: No DCB support\n", 6929 __func__); 6930 return; 6931 } 6932 6933 hw->port_info->qos_cfg.dcbx_status = ice_get_dcbx_status(hw); 6934 if (hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_DONE && 6935 hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) { 6936 /* 6937 * Start DCBX agent, but not LLDP. The return value isn't 6938 * checked here because a more detailed dcbx agent status is 6939 * retrieved and checked in ice_init_dcb() and below. 6940 */ 6941 ice_aq_start_stop_dcbx(hw, true, &dcbx_agent_status, NULL); 6942 } 6943 6944 /* This sets hw->port_info->qos_cfg.is_sw_lldp */ 6945 status = ice_init_dcb(hw, true); 6946 6947 /* If there is an error, then FW LLDP is not in a usable state */ 6948 if (status != 0 && status != ICE_ERR_NOT_READY) { 6949 /* Don't print an error message if the return code from the AQ 6950 * cmd performed in ice_init_dcb() is is EPERM; that means the 6951 * FW LLDP engine is disabled, and that is a valid state. 6952 */ 6953 if (!(status == ICE_ERR_AQ_ERROR && 6954 hw->adminq.sq_last_status == ICE_AQ_RC_EPERM)) { 6955 device_printf(dev, "DCB init failed, err %s aq_err %s\n", 6956 ice_status_str(status), 6957 ice_aq_str(hw->adminq.sq_last_status)); 6958 } 6959 hw->port_info->qos_cfg.dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; 6960 } 6961 6962 switch (hw->port_info->qos_cfg.dcbx_status) { 6963 case ICE_DCBX_STATUS_DIS: 6964 ice_debug(hw, ICE_DBG_DCB, "DCBX disabled\n"); 6965 break; 6966 case ICE_DCBX_STATUS_NOT_STARTED: 6967 ice_debug(hw, ICE_DBG_DCB, "DCBX not started\n"); 6968 break; 6969 case ICE_DCBX_STATUS_MULTIPLE_PEERS: 6970 ice_debug(hw, ICE_DBG_DCB, "DCBX detected multiple peers\n"); 6971 break; 6972 default: 6973 break; 6974 } 6975 6976 /* LLDP disabled in FW */ 6977 if (hw->port_info->qos_cfg.is_sw_lldp) { 6978 ice_add_rx_lldp_filter(sc); 6979 device_printf(dev, "Firmware LLDP agent disabled\n"); 6980 } 6981 } 6982 6983 /** 6984 * ice_handle_mib_change_event - helper function to log LLDP MIB change events 6985 * @sc: device softc 6986 * @event: event received on a control queue 6987 * 6988 * Prints out the type of an LLDP MIB change event in a DCB debug message. 6989 * 6990 * XXX: Should be extended to do more if the driver decides to notify other SW 6991 * of LLDP MIB changes, or needs to extract info from the MIB. 6992 */ 6993 static void 6994 ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *event) 6995 { 6996 struct ice_aqc_lldp_get_mib *params = 6997 (struct ice_aqc_lldp_get_mib *)&event->desc.params.lldp_get_mib; 6998 u8 mib_type, bridge_type, tx_status; 6999 7000 /* XXX: To get the contents of the MIB that caused the event, set the 7001 * ICE_DBG_AQ debug mask and read that output 7002 */ 7003 static const char* mib_type_strings[] = { 7004 "Local MIB", 7005 "Remote MIB", 7006 "Reserved", 7007 "Reserved" 7008 }; 7009 static const char* bridge_type_strings[] = { 7010 "Nearest Bridge", 7011 "Non-TPMR Bridge", 7012 "Reserved", 7013 "Reserved" 7014 }; 7015 static const char* tx_status_strings[] = { 7016 "Port's TX active", 7017 "Port's TX suspended and drained", 7018 "Reserved", 7019 "Port's TX suspended and srained; blocked TC pipe flushed" 7020 }; 7021 7022 mib_type = (params->type & ICE_AQ_LLDP_MIB_TYPE_M) >> 7023 ICE_AQ_LLDP_MIB_TYPE_S; 7024 bridge_type = (params->type & ICE_AQ_LLDP_BRID_TYPE_M) >> 7025 ICE_AQ_LLDP_BRID_TYPE_S; 7026 tx_status = (params->type & ICE_AQ_LLDP_TX_M) >> 7027 ICE_AQ_LLDP_TX_S; 7028 7029 ice_debug(&sc->hw, ICE_DBG_DCB, "LLDP MIB Change Event (%s, %s, %s)\n", 7030 mib_type_strings[mib_type], bridge_type_strings[bridge_type], 7031 tx_status_strings[tx_status]); 7032 } 7033 7034 /** 7035 * ice_send_version - Send driver version to firmware 7036 * @sc: the device private softc 7037 * 7038 * Send the driver version to the firmware. This must be called as early as 7039 * possible after ice_init_hw(). 7040 */ 7041 int 7042 ice_send_version(struct ice_softc *sc) 7043 { 7044 struct ice_driver_ver driver_version = {0}; 7045 struct ice_hw *hw = &sc->hw; 7046 device_t dev = sc->dev; 7047 enum ice_status status; 7048 7049 driver_version.major_ver = ice_major_version; 7050 driver_version.minor_ver = ice_minor_version; 7051 driver_version.build_ver = ice_patch_version; 7052 driver_version.subbuild_ver = ice_rc_version; 7053 7054 strlcpy((char *)driver_version.driver_string, ice_driver_version, 7055 sizeof(driver_version.driver_string)); 7056 7057 status = ice_aq_send_driver_ver(hw, &driver_version, NULL); 7058 if (status) { 7059 device_printf(dev, "Unable to send driver version to firmware, err %s aq_err %s\n", 7060 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 7061 return (EIO); 7062 } 7063 7064 return (0); 7065 } 7066 7067 /** 7068 * ice_handle_lan_overflow_event - helper function to log LAN overflow events 7069 * @sc: device softc 7070 * @event: event received on a control queue 7071 * 7072 * Prints out a message when a LAN overflow event is detected on a receive 7073 * queue. 7074 */ 7075 static void 7076 ice_handle_lan_overflow_event(struct ice_softc *sc, struct ice_rq_event_info *event) 7077 { 7078 struct ice_aqc_event_lan_overflow *params = 7079 (struct ice_aqc_event_lan_overflow *)&event->desc.params.lan_overflow; 7080 struct ice_hw *hw = &sc->hw; 7081 7082 ice_debug(hw, ICE_DBG_DCB, "LAN overflow event detected, prtdcb_ruptq=0x%08x, qtx_ctl=0x%08x\n", 7083 LE32_TO_CPU(params->prtdcb_ruptq), 7084 LE32_TO_CPU(params->qtx_ctl)); 7085 } 7086 7087 /** 7088 * ice_add_ethertype_to_list - Add an Ethertype filter to a filter list 7089 * @vsi: the VSI to target packets to 7090 * @list: the list to add the filter to 7091 * @ethertype: the Ethertype to filter on 7092 * @direction: The direction of the filter (Tx or Rx) 7093 * @action: the action to take 7094 * 7095 * Add an Ethertype filter to a filter list. Used to forward a series of 7096 * filters to the firmware for configuring the switch. 7097 * 7098 * Returns 0 on success, and an error code on failure. 7099 */ 7100 static int 7101 ice_add_ethertype_to_list(struct ice_vsi *vsi, struct ice_list_head *list, 7102 u16 ethertype, u16 direction, 7103 enum ice_sw_fwd_act_type action) 7104 { 7105 struct ice_fltr_list_entry *entry; 7106 7107 MPASS((direction == ICE_FLTR_TX) || (direction == ICE_FLTR_RX)); 7108 7109 entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO); 7110 if (!entry) 7111 return (ENOMEM); 7112 7113 entry->fltr_info.flag = direction; 7114 entry->fltr_info.src_id = ICE_SRC_ID_VSI; 7115 entry->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; 7116 entry->fltr_info.fltr_act = action; 7117 entry->fltr_info.vsi_handle = vsi->idx; 7118 entry->fltr_info.l_data.ethertype_mac.ethertype = ethertype; 7119 7120 LIST_ADD(&entry->list_entry, list); 7121 7122 return 0; 7123 } 7124 7125 #define ETHERTYPE_PAUSE_FRAMES 0x8808 7126 #define ETHERTYPE_LLDP_FRAMES 0x88cc 7127 7128 /** 7129 * ice_cfg_pf_ethertype_filters - Configure switch to drop ethertypes 7130 * @sc: the device private softc 7131 * 7132 * Configure the switch to drop PAUSE frames and LLDP frames transmitted from 7133 * the host. This prevents malicious VFs from sending these frames and being 7134 * able to control or configure the network. 7135 */ 7136 int 7137 ice_cfg_pf_ethertype_filters(struct ice_softc *sc) 7138 { 7139 struct ice_list_head ethertype_list; 7140 struct ice_vsi *vsi = &sc->pf_vsi; 7141 struct ice_hw *hw = &sc->hw; 7142 device_t dev = sc->dev; 7143 enum ice_status status; 7144 int err = 0; 7145 7146 INIT_LIST_HEAD(ðertype_list); 7147 7148 /* 7149 * Note that the switch filters will ignore the VSI index for the drop 7150 * action, so we only need to program drop filters once for the main 7151 * VSI. 7152 */ 7153 7154 /* Configure switch to drop all Tx pause frames coming from any VSI. */ 7155 if (sc->enable_tx_fc_filter) { 7156 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7157 ETHERTYPE_PAUSE_FRAMES, 7158 ICE_FLTR_TX, ICE_DROP_PACKET); 7159 if (err) 7160 goto free_ethertype_list; 7161 } 7162 7163 /* Configure switch to drop LLDP frames coming from any VSI */ 7164 if (sc->enable_tx_lldp_filter) { 7165 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7166 ETHERTYPE_LLDP_FRAMES, 7167 ICE_FLTR_TX, ICE_DROP_PACKET); 7168 if (err) 7169 goto free_ethertype_list; 7170 } 7171 7172 status = ice_add_eth_mac(hw, ðertype_list); 7173 if (status) { 7174 device_printf(dev, 7175 "Failed to add Tx Ethertype filters, err %s aq_err %s\n", 7176 ice_status_str(status), 7177 ice_aq_str(hw->adminq.sq_last_status)); 7178 err = (EIO); 7179 } 7180 7181 free_ethertype_list: 7182 ice_free_fltr_list(ðertype_list); 7183 return err; 7184 } 7185 7186 /** 7187 * ice_add_rx_lldp_filter - add ethertype filter for Rx LLDP frames 7188 * @sc: the device private structure 7189 * 7190 * Add a switch ethertype filter which forwards the LLDP frames to the main PF 7191 * VSI. Called when the fw_lldp_agent is disabled, to allow the LLDP frames to 7192 * be forwarded to the stack. 7193 */ 7194 static void 7195 ice_add_rx_lldp_filter(struct ice_softc *sc) 7196 { 7197 struct ice_list_head ethertype_list; 7198 struct ice_vsi *vsi = &sc->pf_vsi; 7199 struct ice_hw *hw = &sc->hw; 7200 device_t dev = sc->dev; 7201 enum ice_status status; 7202 int err; 7203 u16 vsi_num; 7204 7205 /* 7206 * If FW is new enough, use a direct AQ command to perform the filter 7207 * addition. 7208 */ 7209 if (ice_fw_supports_lldp_fltr_ctrl(hw)) { 7210 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); 7211 status = ice_lldp_fltr_add_remove(hw, vsi_num, true); 7212 if (status) { 7213 device_printf(dev, 7214 "Failed to add Rx LLDP filter, err %s aq_err %s\n", 7215 ice_status_str(status), 7216 ice_aq_str(hw->adminq.sq_last_status)); 7217 } else 7218 ice_set_state(&sc->state, 7219 ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER); 7220 return; 7221 } 7222 7223 INIT_LIST_HEAD(ðertype_list); 7224 7225 /* Forward Rx LLDP frames to the stack */ 7226 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7227 ETHERTYPE_LLDP_FRAMES, 7228 ICE_FLTR_RX, ICE_FWD_TO_VSI); 7229 if (err) { 7230 device_printf(dev, 7231 "Failed to add Rx LLDP filter, err %s\n", 7232 ice_err_str(err)); 7233 goto free_ethertype_list; 7234 } 7235 7236 status = ice_add_eth_mac(hw, ðertype_list); 7237 if (status && status != ICE_ERR_ALREADY_EXISTS) { 7238 device_printf(dev, 7239 "Failed to add Rx LLDP filter, err %s aq_err %s\n", 7240 ice_status_str(status), 7241 ice_aq_str(hw->adminq.sq_last_status)); 7242 } else { 7243 /* 7244 * If status == ICE_ERR_ALREADY_EXISTS, we won't treat an 7245 * already existing filter as an error case. 7246 */ 7247 ice_set_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER); 7248 } 7249 7250 free_ethertype_list: 7251 ice_free_fltr_list(ðertype_list); 7252 } 7253 7254 /** 7255 * ice_del_rx_lldp_filter - Remove ethertype filter for Rx LLDP frames 7256 * @sc: the device private structure 7257 * 7258 * Remove the switch filter forwarding LLDP frames to the main PF VSI, called 7259 * when the firmware LLDP agent is enabled, to stop routing LLDP frames to the 7260 * stack. 7261 */ 7262 static void 7263 ice_del_rx_lldp_filter(struct ice_softc *sc) 7264 { 7265 struct ice_list_head ethertype_list; 7266 struct ice_vsi *vsi = &sc->pf_vsi; 7267 struct ice_hw *hw = &sc->hw; 7268 device_t dev = sc->dev; 7269 enum ice_status status; 7270 int err; 7271 u16 vsi_num; 7272 7273 /* 7274 * Only in the scenario where the driver added the filter during 7275 * this session (while the driver was loaded) would we be able to 7276 * delete this filter. 7277 */ 7278 if (!ice_test_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER)) 7279 return; 7280 7281 /* 7282 * If FW is new enough, use a direct AQ command to perform the filter 7283 * removal. 7284 */ 7285 if (ice_fw_supports_lldp_fltr_ctrl(hw)) { 7286 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); 7287 status = ice_lldp_fltr_add_remove(hw, vsi_num, false); 7288 if (status) { 7289 device_printf(dev, 7290 "Failed to remove Rx LLDP filter, err %s aq_err %s\n", 7291 ice_status_str(status), 7292 ice_aq_str(hw->adminq.sq_last_status)); 7293 } 7294 return; 7295 } 7296 7297 INIT_LIST_HEAD(ðertype_list); 7298 7299 /* Remove filter forwarding Rx LLDP frames to the stack */ 7300 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7301 ETHERTYPE_LLDP_FRAMES, 7302 ICE_FLTR_RX, ICE_FWD_TO_VSI); 7303 if (err) { 7304 device_printf(dev, 7305 "Failed to remove Rx LLDP filter, err %s\n", 7306 ice_err_str(err)); 7307 goto free_ethertype_list; 7308 } 7309 7310 status = ice_remove_eth_mac(hw, ðertype_list); 7311 if (status == ICE_ERR_DOES_NOT_EXIST) { 7312 ; /* Don't complain if we try to remove a filter that doesn't exist */ 7313 } else if (status) { 7314 device_printf(dev, 7315 "Failed to remove Rx LLDP filter, err %s aq_err %s\n", 7316 ice_status_str(status), 7317 ice_aq_str(hw->adminq.sq_last_status)); 7318 } 7319 7320 free_ethertype_list: 7321 ice_free_fltr_list(ðertype_list); 7322 } 7323 7324 /** 7325 * ice_init_link_configuration -- Setup link in different ways depending 7326 * on whether media is available or not. 7327 * @sc: device private structure 7328 * 7329 * Called at the end of the attach process to either set default link 7330 * parameters if there is media available, or force HW link down and 7331 * set a state bit if there is no media. 7332 */ 7333 void 7334 ice_init_link_configuration(struct ice_softc *sc) 7335 { 7336 struct ice_port_info *pi = sc->hw.port_info; 7337 struct ice_hw *hw = &sc->hw; 7338 device_t dev = sc->dev; 7339 enum ice_status status; 7340 7341 pi->phy.get_link_info = true; 7342 status = ice_get_link_status(pi, &sc->link_up); 7343 if (status != ICE_SUCCESS) { 7344 device_printf(dev, 7345 "%s: ice_get_link_status failed; status %s, aq_err %s\n", 7346 __func__, ice_status_str(status), 7347 ice_aq_str(hw->adminq.sq_last_status)); 7348 return; 7349 } 7350 7351 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 7352 ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA); 7353 /* Apply default link settings */ 7354 ice_apply_saved_phy_cfg(sc); 7355 } else { 7356 /* Set link down, and poll for media available in timer. This prevents the 7357 * driver from receiving spurious link-related events. 7358 */ 7359 ice_set_state(&sc->state, ICE_STATE_NO_MEDIA); 7360 status = ice_aq_set_link_restart_an(pi, false, NULL); 7361 if (status != ICE_SUCCESS) 7362 device_printf(dev, 7363 "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", 7364 __func__, ice_status_str(status), 7365 ice_aq_str(hw->adminq.sq_last_status)); 7366 } 7367 } 7368 7369 /** 7370 * ice_apply_saved_phy_req_to_cfg -- Write saved user PHY settings to cfg data 7371 * @pi: port info struct 7372 * @pcaps: TOPO_CAPS capability data to use for defaults 7373 * @cfg: new PHY config data to be modified 7374 * 7375 * Applies user settings for advertised speeds to the PHY type fields in the 7376 * supplied PHY config struct. It uses the data from pcaps to check if the 7377 * saved settings are invalid and uses the pcaps data instead if they are 7378 * invalid. 7379 */ 7380 static void 7381 ice_apply_saved_phy_req_to_cfg(struct ice_port_info *pi, 7382 struct ice_aqc_get_phy_caps_data *pcaps, 7383 struct ice_aqc_set_phy_cfg_data *cfg) 7384 { 7385 u64 phy_low = 0, phy_high = 0; 7386 7387 ice_update_phy_type(&phy_low, &phy_high, pi->phy.curr_user_speed_req); 7388 cfg->phy_type_low = pcaps->phy_type_low & htole64(phy_low); 7389 cfg->phy_type_high = pcaps->phy_type_high & htole64(phy_high); 7390 7391 /* Can't use saved user speed request; use NVM default PHY capabilities */ 7392 if (!cfg->phy_type_low && !cfg->phy_type_high) { 7393 cfg->phy_type_low = pcaps->phy_type_low; 7394 cfg->phy_type_high = pcaps->phy_type_high; 7395 } 7396 } 7397 7398 /** 7399 * ice_apply_saved_fec_req_to_cfg -- Write saved user FEC mode to cfg data 7400 * @pi: port info struct 7401 * @pcaps: TOPO_CAPS capability data to use for defaults 7402 * @cfg: new PHY config data to be modified 7403 * 7404 * Applies user setting for FEC mode to PHY config struct. It uses the data 7405 * from pcaps to check if the saved settings are invalid and uses the pcaps 7406 * data instead if they are invalid. 7407 */ 7408 static void 7409 ice_apply_saved_fec_req_to_cfg(struct ice_port_info *pi, 7410 struct ice_aqc_get_phy_caps_data *pcaps, 7411 struct ice_aqc_set_phy_cfg_data *cfg) 7412 { 7413 ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); 7414 7415 /* Can't use saved user FEC mode; use NVM default PHY capabilities */ 7416 if (cfg->link_fec_opt && 7417 !(cfg->link_fec_opt & pcaps->link_fec_options)) { 7418 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 7419 cfg->link_fec_opt = pcaps->link_fec_options; 7420 } 7421 } 7422 7423 /** 7424 * ice_apply_saved_fc_req_to_cfg -- Write saved user flow control mode to cfg data 7425 * @pi: port info struct 7426 * @cfg: new PHY config data to be modified 7427 * 7428 * Applies user setting for flow control mode to PHY config struct. There are 7429 * no invalid flow control mode settings; if there are, then this function 7430 * treats them like "ICE_FC_NONE". 7431 */ 7432 static void 7433 ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, 7434 struct ice_aqc_set_phy_cfg_data *cfg) 7435 { 7436 cfg->caps &= ~(ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | 7437 ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY); 7438 7439 switch (pi->phy.curr_user_fc_req) { 7440 case ICE_FC_FULL: 7441 cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | 7442 ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; 7443 break; 7444 case ICE_FC_RX_PAUSE: 7445 cfg->caps |= ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; 7446 break; 7447 case ICE_FC_TX_PAUSE: 7448 cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY; 7449 break; 7450 default: 7451 /* ICE_FC_NONE */ 7452 break; 7453 } 7454 } 7455 7456 /** 7457 * ice_apply_saved_user_req_to_cfg -- Apply all saved user settings to AQ cfg data 7458 * @pi: port info struct 7459 * @pcaps: TOPO_CAPS capability data to use for defaults 7460 * @cfg: new PHY config data to be modified 7461 * 7462 * Applies user settings for advertised speeds, FEC mode, and flow control 7463 * mode to the supplied PHY config struct; it uses the data from pcaps to check 7464 * if the saved settings are invalid and uses the pcaps data instead if they 7465 * are invalid. 7466 */ 7467 static void 7468 ice_apply_saved_user_req_to_cfg(struct ice_port_info *pi, 7469 struct ice_aqc_get_phy_caps_data *pcaps, 7470 struct ice_aqc_set_phy_cfg_data *cfg) 7471 { 7472 ice_apply_saved_phy_req_to_cfg(pi, pcaps, cfg); 7473 ice_apply_saved_fec_req_to_cfg(pi, pcaps, cfg); 7474 ice_apply_saved_fc_req_to_cfg(pi, cfg); 7475 } 7476 7477 /** 7478 * ice_apply_saved_phy_cfg -- Re-apply user PHY config settings 7479 * @sc: device private structure 7480 * 7481 * Takes the saved user PHY config settings, overwrites the NVM 7482 * default with them if they're valid, and uses the Set PHY Config AQ command 7483 * to apply them. 7484 * 7485 * Intended for use when media is inserted. 7486 * 7487 * @pre Port has media available 7488 */ 7489 void 7490 ice_apply_saved_phy_cfg(struct ice_softc *sc) 7491 { 7492 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 7493 struct ice_port_info *pi = sc->hw.port_info; 7494 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 7495 struct ice_hw *hw = &sc->hw; 7496 device_t dev = sc->dev; 7497 enum ice_status status; 7498 7499 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 7500 &pcaps, NULL); 7501 if (status != ICE_SUCCESS) { 7502 device_printf(dev, 7503 "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", 7504 __func__, ice_status_str(status), 7505 ice_aq_str(hw->adminq.sq_last_status)); 7506 return; 7507 } 7508 7509 /* Setup new PHY config */ 7510 ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); 7511 7512 /* Apply settings requested by user */ 7513 ice_apply_saved_user_req_to_cfg(pi, &pcaps, &cfg); 7514 7515 /* Enable link and re-negotiate it */ 7516 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 7517 7518 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 7519 if (status != ICE_SUCCESS) { 7520 if ((status == ICE_ERR_AQ_ERROR) && 7521 (hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)) 7522 device_printf(dev, 7523 "%s: User PHY cfg not applied; no media in port\n", 7524 __func__); 7525 else 7526 device_printf(dev, 7527 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 7528 __func__, ice_status_str(status), 7529 ice_aq_str(hw->adminq.sq_last_status)); 7530 } 7531 } 7532 7533 /** 7534 * ice_print_ldo_tlv - Print out LDO TLV information 7535 * @sc: device private structure 7536 * @tlv: LDO TLV information from the adapter NVM 7537 * 7538 * Dump out the information in tlv to the kernel message buffer; intended for 7539 * debugging purposes. 7540 */ 7541 static void 7542 ice_print_ldo_tlv(struct ice_softc *sc, struct ice_link_default_override_tlv *tlv) 7543 { 7544 device_t dev = sc->dev; 7545 7546 device_printf(dev, "TLV: -options 0x%02x\n", tlv->options); 7547 device_printf(dev, " -phy_config 0x%02x\n", tlv->phy_config); 7548 device_printf(dev, " -fec_options 0x%02x\n", tlv->fec_options); 7549 device_printf(dev, " -phy_high 0x%016llx\n", 7550 (unsigned long long)tlv->phy_type_high); 7551 device_printf(dev, " -phy_low 0x%016llx\n", 7552 (unsigned long long)tlv->phy_type_low); 7553 } 7554 7555 /** 7556 * ice_set_link_management_mode -- Strict or lenient link management 7557 * @sc: device private structure 7558 * 7559 * Some NVMs give the adapter the option to advertise a superset of link 7560 * configurations. This checks to see if that option is enabled. 7561 * Further, the NVM could also provide a specific set of configurations 7562 * to try; these are cached in the driver's private structure if they 7563 * are available. 7564 */ 7565 void 7566 ice_set_link_management_mode(struct ice_softc *sc) 7567 { 7568 struct ice_port_info *pi = sc->hw.port_info; 7569 device_t dev = sc->dev; 7570 struct ice_link_default_override_tlv tlv = { 0 }; 7571 enum ice_status status; 7572 7573 /* Port must be in strict mode if FW version is below a certain 7574 * version. (i.e. Don't set lenient mode features) 7575 */ 7576 if (!(ice_fw_supports_link_override(&sc->hw))) 7577 return; 7578 7579 status = ice_get_link_default_override(&tlv, pi); 7580 if (status != ICE_SUCCESS) { 7581 device_printf(dev, 7582 "%s: ice_get_link_default_override failed; status %s, aq_err %s\n", 7583 __func__, ice_status_str(status), 7584 ice_aq_str(sc->hw.adminq.sq_last_status)); 7585 return; 7586 } 7587 7588 if (sc->hw.debug_mask & ICE_DBG_LINK) 7589 ice_print_ldo_tlv(sc, &tlv); 7590 7591 /* Set lenient link mode */ 7592 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LENIENT_LINK_MODE) && 7593 (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE))) 7594 ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_en); 7595 7596 /* Default overrides only work if in lenient link mode */ 7597 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_DEFAULT_OVERRIDE) && 7598 ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE) && 7599 (tlv.options & ICE_LINK_OVERRIDE_EN)) 7600 ice_set_bit(ICE_FEATURE_DEFAULT_OVERRIDE, sc->feat_en); 7601 7602 /* Cache the LDO TLV structure in the driver, since it won't change 7603 * during the driver's lifetime. 7604 */ 7605 sc->ldo_tlv = tlv; 7606 } 7607 7608 /** 7609 * ice_init_saved_phy_cfg -- Set cached user PHY cfg settings with NVM defaults 7610 * @sc: device private structure 7611 * 7612 * This should be called before the tunables for these link settings 7613 * (e.g. advertise_speed) are added -- so that these defaults don't overwrite 7614 * the cached values that the sysctl handlers will write. 7615 * 7616 * This also needs to be called before ice_init_link_configuration, to ensure 7617 * that there are sane values that can be written if there is media available 7618 * in the port. 7619 */ 7620 void 7621 ice_init_saved_phy_cfg(struct ice_softc *sc) 7622 { 7623 struct ice_port_info *pi = sc->hw.port_info; 7624 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 7625 struct ice_hw *hw = &sc->hw; 7626 device_t dev = sc->dev; 7627 enum ice_status status; 7628 u64 phy_low, phy_high; 7629 7630 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 7631 &pcaps, NULL); 7632 if (status != ICE_SUCCESS) { 7633 device_printf(dev, 7634 "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", 7635 __func__, ice_status_str(status), 7636 ice_aq_str(hw->adminq.sq_last_status)); 7637 return; 7638 } 7639 7640 phy_low = le64toh(pcaps.phy_type_low); 7641 phy_high = le64toh(pcaps.phy_type_high); 7642 7643 /* Save off initial config parameters */ 7644 pi->phy.curr_user_speed_req = 7645 ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); 7646 pi->phy.curr_user_fec_req = ice_caps_to_fec_mode(pcaps.caps, 7647 pcaps.link_fec_options); 7648 pi->phy.curr_user_fc_req = ice_caps_to_fc_mode(pcaps.caps); 7649 } 7650 7651 /** 7652 * ice_module_init - Driver callback to handle module load 7653 * 7654 * Callback for handling module load events. This function should initialize 7655 * any data structures that are used for the life of the device driver. 7656 */ 7657 static int 7658 ice_module_init(void) 7659 { 7660 return (0); 7661 } 7662 7663 /** 7664 * ice_module_exit - Driver callback to handle module exit 7665 * 7666 * Callback for handling module unload events. This function should release 7667 * any resources initialized during ice_module_init. 7668 * 7669 * If this function returns non-zero, the module will not be unloaded. It 7670 * should only return such a value if the module cannot be unloaded at all, 7671 * such as due to outstanding memory references that cannot be revoked. 7672 */ 7673 static int 7674 ice_module_exit(void) 7675 { 7676 return (0); 7677 } 7678 7679 /** 7680 * ice_module_event_handler - Callback for module events 7681 * @mod: unused module_t parameter 7682 * @what: the event requested 7683 * @arg: unused event argument 7684 * 7685 * Callback used to handle module events from the stack. Used to allow the 7686 * driver to define custom behavior that should happen at module load and 7687 * unload. 7688 */ 7689 int 7690 ice_module_event_handler(module_t __unused mod, int what, void __unused *arg) 7691 { 7692 switch (what) { 7693 case MOD_LOAD: 7694 return ice_module_init(); 7695 case MOD_UNLOAD: 7696 return ice_module_exit(); 7697 default: 7698 /* TODO: do we need to handle MOD_QUIESCE and MOD_SHUTDOWN? */ 7699 return (EOPNOTSUPP); 7700 } 7701 } 7702 7703 /** 7704 * ice_handle_nvm_access_ioctl - Handle an NVM access ioctl request 7705 * @sc: the device private softc 7706 * @ifd: ifdrv ioctl request pointer 7707 */ 7708 int 7709 ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd) 7710 { 7711 union ice_nvm_access_data *data; 7712 struct ice_nvm_access_cmd *cmd; 7713 size_t ifd_len = ifd->ifd_len, malloc_len; 7714 struct ice_hw *hw = &sc->hw; 7715 device_t dev = sc->dev; 7716 enum ice_status status; 7717 u8 *nvm_buffer; 7718 int err; 7719 7720 /* 7721 * ifioctl forwards SIOCxDRVSPEC to iflib without performing 7722 * a privilege check. In turn, iflib forwards the ioctl to the driver 7723 * without performing a privilege check. Perform one here to ensure 7724 * that non-privileged threads cannot access this interface. 7725 */ 7726 err = priv_check(curthread, PRIV_DRIVER); 7727 if (err) 7728 return (err); 7729 7730 if (ifd_len < sizeof(struct ice_nvm_access_cmd)) { 7731 device_printf(dev, "%s: ifdrv length is too small. Got %zu, but expected %zu\n", 7732 __func__, ifd_len, sizeof(struct ice_nvm_access_cmd)); 7733 return (EINVAL); 7734 } 7735 7736 if (ifd->ifd_data == NULL) { 7737 device_printf(dev, "%s: ifd data buffer not present.\n", 7738 __func__); 7739 return (EINVAL); 7740 } 7741 7742 /* 7743 * If everything works correctly, ice_handle_nvm_access should not 7744 * modify data past the size of the ioctl length. However, it could 7745 * lead to memory corruption if it did. Make sure to allocate at least 7746 * enough space for the command and data regardless. This 7747 * ensures that any access to the data union will not access invalid 7748 * memory. 7749 */ 7750 malloc_len = max(ifd_len, sizeof(*data) + sizeof(*cmd)); 7751 7752 nvm_buffer = (u8 *)malloc(malloc_len, M_ICE, M_ZERO | M_WAITOK); 7753 if (!nvm_buffer) 7754 return (ENOMEM); 7755 7756 /* Copy the NVM access command and data in from user space */ 7757 /* coverity[tainted_data_argument] */ 7758 err = copyin(ifd->ifd_data, nvm_buffer, ifd_len); 7759 if (err) { 7760 device_printf(dev, "%s: Copying request from user space failed, err %s\n", 7761 __func__, ice_err_str(err)); 7762 goto cleanup_free_nvm_buffer; 7763 } 7764 7765 /* 7766 * The NVM command structure is immediately followed by data which 7767 * varies in size based on the command. 7768 */ 7769 cmd = (struct ice_nvm_access_cmd *)nvm_buffer; 7770 data = (union ice_nvm_access_data *)(nvm_buffer + sizeof(struct ice_nvm_access_cmd)); 7771 7772 /* Handle the NVM access request */ 7773 status = ice_handle_nvm_access(hw, cmd, data); 7774 if (status) 7775 ice_debug(hw, ICE_DBG_NVM, 7776 "NVM access request failed, err %s\n", 7777 ice_status_str(status)); 7778 7779 /* Copy the possibly modified contents of the handled request out */ 7780 err = copyout(nvm_buffer, ifd->ifd_data, ifd_len); 7781 if (err) { 7782 device_printf(dev, "%s: Copying response back to user space failed, err %s\n", 7783 __func__, ice_err_str(err)); 7784 goto cleanup_free_nvm_buffer; 7785 } 7786 7787 /* Convert private status to an error code for proper ioctl response */ 7788 switch (status) { 7789 case ICE_SUCCESS: 7790 err = (0); 7791 break; 7792 case ICE_ERR_NO_MEMORY: 7793 err = (ENOMEM); 7794 break; 7795 case ICE_ERR_OUT_OF_RANGE: 7796 err = (ENOTTY); 7797 break; 7798 case ICE_ERR_PARAM: 7799 default: 7800 err = (EINVAL); 7801 break; 7802 } 7803 7804 cleanup_free_nvm_buffer: 7805 free(nvm_buffer, M_ICE); 7806 return err; 7807 } 7808 7809 /** 7810 * ice_read_sff_eeprom - Read data from SFF eeprom 7811 * @sc: device softc 7812 * @dev_addr: I2C device address (typically 0xA0 or 0xA2) 7813 * @offset: offset into the eeprom 7814 * @data: pointer to data buffer to store read data in 7815 * @length: length to read; max length is 16 7816 * 7817 * Read from the SFF eeprom in the module for this PF's port. For more details 7818 * on the contents of an SFF eeprom, refer to SFF-8724 (SFP), SFF-8636 (QSFP), 7819 * and SFF-8024 (both). 7820 */ 7821 int 7822 ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length) 7823 { 7824 struct ice_hw *hw = &sc->hw; 7825 int error = 0, retries = 0; 7826 enum ice_status status; 7827 7828 if (length > 16) 7829 return (EINVAL); 7830 7831 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) 7832 return (ENOSYS); 7833 7834 if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) 7835 return (ENXIO); 7836 7837 do { 7838 status = ice_aq_sff_eeprom(hw, 0, dev_addr, 7839 offset, 0, 0, data, length, 7840 false, NULL); 7841 if (!status) { 7842 error = 0; 7843 break; 7844 } 7845 if (status == ICE_ERR_AQ_ERROR && 7846 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 7847 error = EBUSY; 7848 continue; 7849 } 7850 if (status == ICE_ERR_AQ_ERROR && 7851 hw->adminq.sq_last_status == ICE_AQ_RC_EACCES) { 7852 /* FW says I2C access isn't supported */ 7853 error = EACCES; 7854 break; 7855 } 7856 if (status == ICE_ERR_AQ_ERROR && 7857 hw->adminq.sq_last_status == ICE_AQ_RC_EPERM) { 7858 device_printf(sc->dev, 7859 "%s: Module pointer location specified in command does not permit the required operation.\n", 7860 __func__); 7861 error = EPERM; 7862 break; 7863 } else { 7864 device_printf(sc->dev, 7865 "%s: Error reading I2C data: err %s aq_err %s\n", 7866 __func__, ice_status_str(status), 7867 ice_aq_str(hw->adminq.sq_last_status)); 7868 error = EIO; 7869 break; 7870 } 7871 } while (retries++ < ICE_I2C_MAX_RETRIES); 7872 7873 if (error == EBUSY) 7874 device_printf(sc->dev, 7875 "%s: Error reading I2C data after %d retries\n", 7876 __func__, ICE_I2C_MAX_RETRIES); 7877 7878 return (error); 7879 } 7880 7881 /** 7882 * ice_handle_i2c_req - Driver independent I2C request handler 7883 * @sc: device softc 7884 * @req: The I2C parameters to use 7885 * 7886 * Read from the port's I2C eeprom using the parameters from the ioctl. 7887 */ 7888 int 7889 ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req) 7890 { 7891 return ice_read_sff_eeprom(sc, req->dev_addr, req->offset, req->data, req->len); 7892 } 7893 7894 /** 7895 * ice_sysctl_read_i2c_diag_data - Read some module diagnostic data via i2c 7896 * @oidp: sysctl oid structure 7897 * @arg1: pointer to private data structure 7898 * @arg2: unused 7899 * @req: sysctl request pointer 7900 * 7901 * Read 8 bytes of diagnostic data from the SFF eeprom in the (Q)SFP module 7902 * inserted into the port. 7903 * 7904 * | SFP A2 | QSFP Lower Page 7905 * ------------|---------|---------------- 7906 * Temperature | 96-97 | 22-23 7907 * Vcc | 98-99 | 26-27 7908 * TX power | 102-103 | 34-35..40-41 7909 * RX power | 104-105 | 50-51..56-57 7910 */ 7911 static int 7912 ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 7913 { 7914 struct ice_softc *sc = (struct ice_softc *)arg1; 7915 device_t dev = sc->dev; 7916 struct sbuf *sbuf; 7917 int error = 0; 7918 u8 data[16]; 7919 7920 UNREFERENCED_PARAMETER(arg2); 7921 UNREFERENCED_PARAMETER(oidp); 7922 7923 if (ice_driver_is_detaching(sc)) 7924 return (ESHUTDOWN); 7925 7926 if (req->oldptr == NULL) { 7927 error = SYSCTL_OUT(req, 0, 128); 7928 return (error); 7929 } 7930 7931 error = ice_read_sff_eeprom(sc, 0xA0, 0, data, 1); 7932 if (error) 7933 return (error); 7934 7935 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 7936 if (data[0] == 0x3) { 7937 /* 7938 * Check for: 7939 * - Internally calibrated data 7940 * - Diagnostic monitoring is implemented 7941 */ 7942 ice_read_sff_eeprom(sc, 0xA0, 92, data, 1); 7943 if (!(data[0] & 0x60)) { 7944 device_printf(dev, "Module doesn't support diagnostics: 0xA0[92] = %02X\n", data[0]); 7945 return (ENODEV); 7946 } 7947 7948 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 7949 7950 ice_read_sff_eeprom(sc, 0xA2, 96, data, 4); 7951 for (int i = 0; i < 4; i++) 7952 sbuf_printf(sbuf, "%02X ", data[i]); 7953 7954 ice_read_sff_eeprom(sc, 0xA2, 102, data, 4); 7955 for (int i = 0; i < 4; i++) 7956 sbuf_printf(sbuf, "%02X ", data[i]); 7957 } else if (data[0] == 0xD || data[0] == 0x11) { 7958 /* 7959 * QSFP+ modules are always internally calibrated, and must indicate 7960 * what types of diagnostic monitoring are implemented 7961 */ 7962 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 7963 7964 ice_read_sff_eeprom(sc, 0xA0, 22, data, 2); 7965 for (int i = 0; i < 2; i++) 7966 sbuf_printf(sbuf, "%02X ", data[i]); 7967 7968 ice_read_sff_eeprom(sc, 0xA0, 26, data, 2); 7969 for (int i = 0; i < 2; i++) 7970 sbuf_printf(sbuf, "%02X ", data[i]); 7971 7972 ice_read_sff_eeprom(sc, 0xA0, 34, data, 2); 7973 for (int i = 0; i < 2; i++) 7974 sbuf_printf(sbuf, "%02X ", data[i]); 7975 7976 ice_read_sff_eeprom(sc, 0xA0, 50, data, 2); 7977 for (int i = 0; i < 2; i++) 7978 sbuf_printf(sbuf, "%02X ", data[i]); 7979 } else { 7980 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", data[0]); 7981 return (ENODEV); 7982 } 7983 7984 sbuf_finish(sbuf); 7985 sbuf_delete(sbuf); 7986 7987 return (0); 7988 } 7989 7990 /** 7991 * ice_alloc_intr_tracking - Setup interrupt tracking structures 7992 * @sc: device softc structure 7993 * 7994 * Sets up the resource manager for keeping track of interrupt allocations, 7995 * and initializes the tracking maps for the PF's interrupt allocations. 7996 * 7997 * Unlike the scheme for queues, this is done in one step since both the 7998 * manager and the maps both have the same lifetime. 7999 * 8000 * @returns 0 on success, or an error code on failure. 8001 */ 8002 int 8003 ice_alloc_intr_tracking(struct ice_softc *sc) 8004 { 8005 struct ice_hw *hw = &sc->hw; 8006 device_t dev = sc->dev; 8007 int err; 8008 8009 /* Initialize the interrupt allocation manager */ 8010 err = ice_resmgr_init_contig_only(&sc->imgr, 8011 hw->func_caps.common_cap.num_msix_vectors); 8012 if (err) { 8013 device_printf(dev, "Unable to initialize PF interrupt manager: %s\n", 8014 ice_err_str(err)); 8015 return (err); 8016 } 8017 8018 /* Allocate PF interrupt mapping storage */ 8019 if (!(sc->pf_imap = 8020 (u16 *)malloc(sizeof(u16) * hw->func_caps.common_cap.num_msix_vectors, 8021 M_ICE, M_NOWAIT))) { 8022 device_printf(dev, "Unable to allocate PF imap memory\n"); 8023 err = ENOMEM; 8024 goto free_imgr; 8025 } 8026 for (u32 i = 0; i < hw->func_caps.common_cap.num_msix_vectors; i++) { 8027 sc->pf_imap[i] = ICE_INVALID_RES_IDX; 8028 } 8029 8030 return (0); 8031 8032 free_imgr: 8033 ice_resmgr_destroy(&sc->imgr); 8034 return (err); 8035 } 8036 8037 /** 8038 * ice_free_intr_tracking - Free PF interrupt tracking structures 8039 * @sc: device softc structure 8040 * 8041 * Frees the interrupt resource allocation manager and the PF's owned maps. 8042 * 8043 * VF maps are released when the owning VF's are destroyed, which should always 8044 * happen before this function is called. 8045 */ 8046 void 8047 ice_free_intr_tracking(struct ice_softc *sc) 8048 { 8049 if (sc->pf_imap) { 8050 ice_resmgr_release_map(&sc->imgr, sc->pf_imap, 8051 sc->lan_vectors); 8052 free(sc->pf_imap, M_ICE); 8053 sc->pf_imap = NULL; 8054 } 8055 8056 ice_resmgr_destroy(&sc->imgr); 8057 } 8058 8059 /** 8060 * ice_apply_supported_speed_filter - Mask off unsupported speeds 8061 * @phy_type_low: bit-field for the low quad word of PHY types 8062 * @phy_type_high: bit-field for the high quad word of PHY types 8063 * 8064 * Given the two quad words containing the supported PHY types, 8065 * this function will mask off the speeds that are not currently 8066 * supported by the device. 8067 */ 8068 static void 8069 ice_apply_supported_speed_filter(u64 *phy_type_low, u64 *phy_type_high) 8070 { 8071 u64 phylow_mask; 8072 8073 /* We won't offer anything lower than 1G for any part, 8074 * but we also won't offer anything under 25G for 100G 8075 * parts. 8076 */ 8077 phylow_mask = ~(ICE_PHY_TYPE_LOW_1000BASE_T - 1); 8078 if (*phy_type_high || 8079 *phy_type_low & ~(ICE_PHY_TYPE_LOW_100GBASE_CR4 - 1)) 8080 phylow_mask = ~(ICE_PHY_TYPE_LOW_25GBASE_T - 1); 8081 *phy_type_low &= phylow_mask; 8082 } 8083 8084 /** 8085 * ice_get_phy_types - Report appropriate PHY types 8086 * @sc: device softc structure 8087 * @phy_type_low: bit-field for the low quad word of PHY types 8088 * @phy_type_high: bit-field for the high quad word of PHY types 8089 * 8090 * Populate the two quad words with bits representing the PHY types 8091 * supported by the device. This is really just a wrapper around 8092 * the ice_aq_get_phy_caps() that chooses the appropriate report 8093 * mode (lenient or strict) and reports back only the relevant PHY 8094 * types. In lenient mode the capabilities are retrieved with the 8095 * NVM_CAP report mode, otherwise they're retrieved using the 8096 * TOPO_CAP report mode (NVM intersected with current media). 8097 * 8098 * @returns 0 on success, or an error code on failure. 8099 */ 8100 static enum ice_status 8101 ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high) 8102 { 8103 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 8104 struct ice_port_info *pi = sc->hw.port_info; 8105 device_t dev = sc->dev; 8106 enum ice_status status; 8107 u8 report_mode; 8108 8109 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) 8110 report_mode = ICE_AQC_REPORT_NVM_CAP; 8111 else 8112 report_mode = ICE_AQC_REPORT_TOPO_CAP; 8113 status = ice_aq_get_phy_caps(pi, false, report_mode, &pcaps, NULL); 8114 if (status != ICE_SUCCESS) { 8115 device_printf(dev, 8116 "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", 8117 __func__, (report_mode) ? "TOPO_CAP" : "NVM_CAP", 8118 ice_status_str(status), 8119 ice_aq_str(sc->hw.adminq.sq_last_status)); 8120 return (status); 8121 } 8122 8123 *phy_type_low = le64toh(pcaps.phy_type_low); 8124 *phy_type_high = le64toh(pcaps.phy_type_high); 8125 8126 return (ICE_SUCCESS); 8127 } 8128 8129 /** 8130 * ice_set_default_local_lldp_mib - Set Local LLDP MIB to default settings 8131 * @sc: device softc structure 8132 * 8133 * This function needs to be called after link up; it makes sure the FW 8134 * has certain PFC/DCB settings. This is intended to workaround a FW behavior 8135 * where these settings seem to be cleared on link up. 8136 */ 8137 void 8138 ice_set_default_local_lldp_mib(struct ice_softc *sc) 8139 { 8140 struct ice_dcbx_cfg *dcbcfg; 8141 struct ice_hw *hw = &sc->hw; 8142 struct ice_port_info *pi; 8143 device_t dev = sc->dev; 8144 enum ice_status status; 8145 8146 pi = hw->port_info; 8147 dcbcfg = &pi->qos_cfg.local_dcbx_cfg; 8148 8149 /* This value is only 3 bits; 8 TCs maps to 0 */ 8150 u8 maxtcs = hw->func_caps.common_cap.maxtc & ICE_IEEE_ETS_MAXTC_M; 8151 8152 /** 8153 * Setup the default settings used by the driver for the Set Local 8154 * LLDP MIB Admin Queue command (0x0A08). (1TC w/ 100% BW, ETS, no 8155 * PFC). 8156 */ 8157 memset(dcbcfg, 0, sizeof(*dcbcfg)); 8158 dcbcfg->etscfg.willing = 1; 8159 dcbcfg->etscfg.tcbwtable[0] = 100; 8160 dcbcfg->etscfg.maxtcs = maxtcs; 8161 dcbcfg->etsrec.willing = 1; 8162 dcbcfg->etsrec.tcbwtable[0] = 100; 8163 dcbcfg->etsrec.maxtcs = maxtcs; 8164 dcbcfg->pfc.willing = 1; 8165 dcbcfg->pfc.pfccap = maxtcs; 8166 8167 status = ice_set_dcb_cfg(pi); 8168 8169 if (status) 8170 device_printf(dev, 8171 "Error setting Local LLDP MIB: %s aq_err %s\n", 8172 ice_status_str(status), 8173 ice_aq_str(hw->adminq.sq_last_status)); 8174 } 8175