1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2020, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 /** 34 * @file ice_lib.c 35 * @brief Generic device setup and sysctl functions 36 * 37 * Library of generic device functions not specific to the networking stack. 38 * 39 * This includes hardware initialization functions, as well as handlers for 40 * many of the device sysctls used to probe driver status or tune specific 41 * behaviors. 42 */ 43 44 #include "ice_lib.h" 45 #include "ice_iflib.h" 46 #include <dev/pci/pcivar.h> 47 #include <dev/pci/pcireg.h> 48 #include <machine/resource.h> 49 #include <net/if_dl.h> 50 #include <sys/firmware.h> 51 #include <sys/priv.h> 52 53 /** 54 * @var M_ICE 55 * @brief main ice driver allocation type 56 * 57 * malloc(9) allocation type used by the majority of memory allocations in the 58 * ice driver. 59 */ 60 MALLOC_DEFINE(M_ICE, "ice", "Intel(R) 100Gb Network Driver lib allocations"); 61 62 /* 63 * Helper function prototypes 64 */ 65 static int ice_get_next_vsi(struct ice_vsi **all_vsi, int size); 66 static void ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx); 67 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type); 68 static int ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx); 69 static int ice_setup_tx_ctx(struct ice_tx_queue *txq, 70 struct ice_tlan_ctx *tlan_ctx, u16 pf_q); 71 static int ice_setup_rx_ctx(struct ice_rx_queue *rxq); 72 static int ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg); 73 static void ice_free_fltr_list(struct ice_list_head *list); 74 static int ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list, 75 const u8 *addr, enum ice_sw_fwd_act_type action); 76 static void ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname, 77 struct ice_ctl_q_info *cq); 78 static void ice_process_link_event(struct ice_softc *sc, struct ice_rq_event_info *e); 79 static void ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, 80 struct ice_rq_event_info *event); 81 static void ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf); 82 static void ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf); 83 static void ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf); 84 static bool ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info); 85 static u_int ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, u_int errors); 86 static void ice_add_debug_tunables(struct ice_softc *sc); 87 static void ice_add_debug_sysctls(struct ice_softc *sc); 88 static void ice_vsi_set_rss_params(struct ice_vsi *vsi); 89 static void ice_get_default_rss_key(u8 *seed); 90 static int ice_set_rss_key(struct ice_vsi *vsi); 91 static int ice_set_rss_lut(struct ice_vsi *vsi); 92 static void ice_set_rss_flow_flds(struct ice_vsi *vsi); 93 static void ice_clean_vsi_rss_cfg(struct ice_vsi *vsi); 94 static const char *ice_aq_speed_to_str(struct ice_port_info *pi); 95 static const char *ice_requested_fec_mode(struct ice_port_info *pi); 96 static const char *ice_negotiated_fec_mode(struct ice_port_info *pi); 97 static const char *ice_autoneg_mode(struct ice_port_info *pi); 98 static const char *ice_flowcontrol_mode(struct ice_port_info *pi); 99 static void ice_print_bus_link_data(device_t dev, struct ice_hw *hw); 100 static void ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status); 101 static uint8_t ice_pcie_bandwidth_check(struct ice_softc *sc); 102 static uint64_t ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed); 103 static int ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width); 104 static uint64_t ice_phy_types_to_max_rate(struct ice_port_info *pi); 105 static void ice_add_sysctls_sw_stats(struct ice_vsi *vsi, 106 struct sysctl_ctx_list *ctx, 107 struct sysctl_oid *parent); 108 static void ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi, 109 enum ice_vsi_type type, int idx, 110 bool dynamic); 111 static void ice_handle_mib_change_event(struct ice_softc *sc, 112 struct ice_rq_event_info *event); 113 static void 114 ice_handle_lan_overflow_event(struct ice_softc *sc, 115 struct ice_rq_event_info *event); 116 static int ice_add_ethertype_to_list(struct ice_vsi *vsi, 117 struct ice_list_head *list, 118 u16 ethertype, u16 direction, 119 enum ice_sw_fwd_act_type action); 120 static void ice_add_rx_lldp_filter(struct ice_softc *sc); 121 static void ice_del_rx_lldp_filter(struct ice_softc *sc); 122 static u16 ice_aq_phy_types_to_sysctl_speeds(u64 phy_type_low, 123 u64 phy_type_high); 124 static void 125 ice_apply_saved_phy_req_to_cfg(struct ice_port_info *pi, 126 struct ice_aqc_get_phy_caps_data *pcaps, 127 struct ice_aqc_set_phy_cfg_data *cfg); 128 static void 129 ice_apply_saved_fec_req_to_cfg(struct ice_port_info *pi, 130 struct ice_aqc_get_phy_caps_data *pcaps, 131 struct ice_aqc_set_phy_cfg_data *cfg); 132 static void 133 ice_apply_saved_user_req_to_cfg(struct ice_port_info *pi, 134 struct ice_aqc_get_phy_caps_data *pcaps, 135 struct ice_aqc_set_phy_cfg_data *cfg); 136 static void 137 ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, 138 struct ice_aqc_set_phy_cfg_data *cfg); 139 static void 140 ice_print_ldo_tlv(struct ice_softc *sc, 141 struct ice_link_default_override_tlv *tlv); 142 static void 143 ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, 144 u64 *phy_type_high); 145 static int 146 ice_intersect_media_types_with_caps(struct ice_softc *sc, u64 *phy_type_low, 147 u64 *phy_type_high); 148 static int 149 ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low, 150 u64 *phy_type_high); 151 static void 152 ice_apply_supported_speed_filter(u64 *phy_type_low, u64 *phy_type_high); 153 static enum ice_status 154 ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high); 155 156 static int ice_module_init(void); 157 static int ice_module_exit(void); 158 159 /* 160 * package version comparison functions 161 */ 162 static bool pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name); 163 static int pkg_ver_compatible(struct ice_pkg_ver *pkg_ver); 164 165 /* 166 * dynamic sysctl handlers 167 */ 168 static int ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS); 169 static int ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS); 170 static int ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS); 171 static int ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS); 172 static int ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS); 173 static int ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS); 174 static int ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS); 175 static int ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS); 176 static int ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS); 177 static int ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS); 178 static int ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS); 179 static int ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS); 180 static int ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS); 181 static int ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS); 182 static int ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS); 183 static int ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS); 184 static int __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, 185 bool is_phy_type_high); 186 static int ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS); 187 static int ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS); 188 static int ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS); 189 static int ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS); 190 static int ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS); 191 static int ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS); 192 static int ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode); 193 static int ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS); 194 static int ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS); 195 static int ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS); 196 static int ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS); 197 static int ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); 198 static int ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS); 199 static int ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS); 200 static int ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS); 201 202 /** 203 * ice_map_bar - Map PCIe BAR memory 204 * @dev: the PCIe device 205 * @bar: the BAR info structure 206 * @bar_num: PCIe BAR number 207 * 208 * Maps the specified PCIe BAR. Stores the mapping data in struct 209 * ice_bar_info. 210 */ 211 int 212 ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num) 213 { 214 if (bar->res != NULL) { 215 device_printf(dev, "PCI BAR%d already mapped\n", bar_num); 216 return (EDOOFUS); 217 } 218 219 bar->rid = PCIR_BAR(bar_num); 220 bar->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar->rid, 221 RF_ACTIVE); 222 if (!bar->res) { 223 device_printf(dev, "PCI BAR%d mapping failed\n", bar_num); 224 return (ENXIO); 225 } 226 227 bar->tag = rman_get_bustag(bar->res); 228 bar->handle = rman_get_bushandle(bar->res); 229 bar->size = rman_get_size(bar->res); 230 231 return (0); 232 } 233 234 /** 235 * ice_free_bar - Free PCIe BAR memory 236 * @dev: the PCIe device 237 * @bar: the BAR info structure 238 * 239 * Frees the specified PCIe BAR, releasing its resources. 240 */ 241 void 242 ice_free_bar(device_t dev, struct ice_bar_info *bar) 243 { 244 if (bar->res != NULL) 245 bus_release_resource(dev, SYS_RES_MEMORY, bar->rid, bar->res); 246 bar->res = NULL; 247 } 248 249 /** 250 * ice_set_ctrlq_len - Configure ctrlq lengths for a device 251 * @hw: the device hardware structure 252 * 253 * Configures the control queues for the given device, setting up the 254 * specified lengths, prior to initializing hardware. 255 */ 256 void 257 ice_set_ctrlq_len(struct ice_hw *hw) 258 { 259 hw->adminq.num_rq_entries = ICE_AQ_LEN; 260 hw->adminq.num_sq_entries = ICE_AQ_LEN; 261 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; 262 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; 263 264 hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; 265 hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; 266 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 267 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 268 269 } 270 271 /** 272 * ice_get_next_vsi - Get the next available VSI slot 273 * @all_vsi: the VSI list 274 * @size: the size of the VSI list 275 * 276 * Returns the index to the first available VSI slot. Will return size (one 277 * past the last index) if there are no slots available. 278 */ 279 static int 280 ice_get_next_vsi(struct ice_vsi **all_vsi, int size) 281 { 282 int i; 283 284 for (i = 0; i < size; i++) { 285 if (all_vsi[i] == NULL) 286 return i; 287 } 288 289 return size; 290 } 291 292 /** 293 * ice_setup_vsi_common - Common VSI setup for both dynamic and static VSIs 294 * @sc: the device private softc structure 295 * @vsi: the VSI to setup 296 * @type: the VSI type of the new VSI 297 * @idx: the index in the all_vsi array to use 298 * @dynamic: whether this VSI memory was dynamically allocated 299 * 300 * Perform setup for a VSI that is common to both dynamically allocated VSIs 301 * and the static PF VSI which is embedded in the softc structure. 302 */ 303 static void 304 ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi, 305 enum ice_vsi_type type, int idx, bool dynamic) 306 { 307 /* Store important values in VSI struct */ 308 vsi->type = type; 309 vsi->sc = sc; 310 vsi->idx = idx; 311 sc->all_vsi[idx] = vsi; 312 vsi->dynamic = dynamic; 313 314 /* Setup the VSI tunables now */ 315 ice_add_vsi_tunables(vsi, sc->vsi_sysctls); 316 } 317 318 /** 319 * ice_alloc_vsi - Allocate a dynamic VSI 320 * @sc: device softc structure 321 * @type: VSI type 322 * 323 * Allocates a new dynamic VSI structure and inserts it into the VSI list. 324 */ 325 struct ice_vsi * 326 ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type) 327 { 328 struct ice_vsi *vsi; 329 int idx; 330 331 /* Find an open index for a new VSI to be allocated. If the returned 332 * index is >= the num_available_vsi then it means no slot is 333 * available. 334 */ 335 idx = ice_get_next_vsi(sc->all_vsi, sc->num_available_vsi); 336 if (idx >= sc->num_available_vsi) { 337 device_printf(sc->dev, "No available VSI slots\n"); 338 return NULL; 339 } 340 341 vsi = (struct ice_vsi *)malloc(sizeof(*vsi), M_ICE, M_WAITOK|M_ZERO); 342 if (!vsi) { 343 device_printf(sc->dev, "Unable to allocate VSI memory\n"); 344 return NULL; 345 } 346 347 ice_setup_vsi_common(sc, vsi, type, idx, true); 348 349 return vsi; 350 } 351 352 /** 353 * ice_setup_pf_vsi - Setup the PF VSI 354 * @sc: the device private softc 355 * 356 * Setup the PF VSI structure which is embedded as sc->pf_vsi in the device 357 * private softc. Unlike other VSIs, the PF VSI memory is allocated as part of 358 * the softc memory, instead of being dynamically allocated at creation. 359 */ 360 void 361 ice_setup_pf_vsi(struct ice_softc *sc) 362 { 363 ice_setup_vsi_common(sc, &sc->pf_vsi, ICE_VSI_PF, 0, false); 364 } 365 366 /** 367 * ice_alloc_vsi_qmap 368 * @vsi: VSI structure 369 * @max_tx_queues: Number of transmit queues to identify 370 * @max_rx_queues: Number of receive queues to identify 371 * 372 * Allocates a max_[t|r]x_queues array of words for the VSI where each 373 * word contains the index of the queue it represents. In here, all 374 * words are initialized to an index of ICE_INVALID_RES_IDX, indicating 375 * all queues for this VSI are not yet assigned an index and thus, 376 * not ready for use. 377 * 378 * Returns an error code on failure. 379 */ 380 int 381 ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues, 382 const int max_rx_queues) 383 { 384 struct ice_softc *sc = vsi->sc; 385 int i; 386 387 MPASS(max_tx_queues > 0); 388 MPASS(max_rx_queues > 0); 389 390 /* Allocate Tx queue mapping memory */ 391 if (!(vsi->tx_qmap = 392 (u16 *) malloc(sizeof(u16) * max_tx_queues, M_ICE, M_WAITOK))) { 393 device_printf(sc->dev, "Unable to allocate Tx qmap memory\n"); 394 return (ENOMEM); 395 } 396 397 /* Allocate Rx queue mapping memory */ 398 if (!(vsi->rx_qmap = 399 (u16 *) malloc(sizeof(u16) * max_rx_queues, M_ICE, M_WAITOK))) { 400 device_printf(sc->dev, "Unable to allocate Rx qmap memory\n"); 401 goto free_tx_qmap; 402 } 403 404 /* Mark every queue map as invalid to start with */ 405 for (i = 0; i < max_tx_queues; i++) { 406 vsi->tx_qmap[i] = ICE_INVALID_RES_IDX; 407 } 408 for (i = 0; i < max_rx_queues; i++) { 409 vsi->rx_qmap[i] = ICE_INVALID_RES_IDX; 410 } 411 412 return 0; 413 414 free_tx_qmap: 415 free(vsi->tx_qmap, M_ICE); 416 vsi->tx_qmap = NULL; 417 418 return (ENOMEM); 419 } 420 421 /** 422 * ice_free_vsi_qmaps - Free the PF qmaps associated with a VSI 423 * @vsi: the VSI private structure 424 * 425 * Frees the PF qmaps associated with the given VSI. Generally this will be 426 * called by ice_release_vsi, but may need to be called during attach cleanup, 427 * depending on when the qmaps were allocated. 428 */ 429 void 430 ice_free_vsi_qmaps(struct ice_vsi *vsi) 431 { 432 struct ice_softc *sc = vsi->sc; 433 434 if (vsi->tx_qmap) { 435 ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, 436 vsi->num_tx_queues); 437 free(vsi->tx_qmap, M_ICE); 438 vsi->tx_qmap = NULL; 439 } 440 441 if (vsi->rx_qmap) { 442 ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap, 443 vsi->num_rx_queues); 444 free(vsi->rx_qmap, M_ICE); 445 vsi->rx_qmap = NULL; 446 } 447 } 448 449 /** 450 * ice_set_default_vsi_ctx - Setup default VSI context parameters 451 * @ctx: the VSI context to initialize 452 * 453 * Initialize and prepare a default VSI context for configuring a new VSI. 454 */ 455 static void 456 ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx) 457 { 458 u32 table = 0; 459 460 memset(&ctx->info, 0, sizeof(ctx->info)); 461 /* VSI will be allocated from shared pool */ 462 ctx->alloc_from_pool = true; 463 /* Enable source pruning by default */ 464 ctx->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 465 /* Traffic from VSI can be sent to LAN */ 466 ctx->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 467 /* Allow all packets untagged/tagged */ 468 ctx->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & 469 ICE_AQ_VSI_VLAN_MODE_M) >> 470 ICE_AQ_VSI_VLAN_MODE_S); 471 /* Show VLAN/UP from packets in Rx descriptors */ 472 ctx->info.vlan_flags |= ((ICE_AQ_VSI_VLAN_EMOD_STR_BOTH & 473 ICE_AQ_VSI_VLAN_EMOD_M) >> 474 ICE_AQ_VSI_VLAN_EMOD_S); 475 /* Have 1:1 UP mapping for both ingress/egress tables */ 476 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 477 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 478 table |= ICE_UP_TABLE_TRANSLATE(2, 2); 479 table |= ICE_UP_TABLE_TRANSLATE(3, 3); 480 table |= ICE_UP_TABLE_TRANSLATE(4, 4); 481 table |= ICE_UP_TABLE_TRANSLATE(5, 5); 482 table |= ICE_UP_TABLE_TRANSLATE(6, 6); 483 table |= ICE_UP_TABLE_TRANSLATE(7, 7); 484 ctx->info.ingress_table = CPU_TO_LE32(table); 485 ctx->info.egress_table = CPU_TO_LE32(table); 486 /* Have 1:1 UP mapping for outer to inner UP table */ 487 ctx->info.outer_up_table = CPU_TO_LE32(table); 488 /* No Outer tag support, so outer_tag_flags remains zero */ 489 } 490 491 /** 492 * ice_set_rss_vsi_ctx - Setup VSI context parameters for RSS 493 * @ctx: the VSI context to configure 494 * @type: the VSI type 495 * 496 * Configures the VSI context for RSS, based on the VSI type. 497 */ 498 static void 499 ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type) 500 { 501 u8 lut_type, hash_type; 502 503 switch (type) { 504 case ICE_VSI_PF: 505 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; 506 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 507 break; 508 case ICE_VSI_VF: 509 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 510 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 511 break; 512 default: 513 /* Other VSI types do not support RSS */ 514 return; 515 } 516 517 ctx->info.q_opt_rss = (((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 518 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 519 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & 520 ICE_AQ_VSI_Q_OPT_RSS_HASH_M)); 521 } 522 523 /** 524 * ice_setup_vsi_qmap - Setup the queue mapping for a VSI 525 * @vsi: the VSI to configure 526 * @ctx: the VSI context to configure 527 * 528 * Configures the context for the given VSI, setting up how the firmware 529 * should map the queues for this VSI. 530 */ 531 static int 532 ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 533 { 534 int pow = 0; 535 u16 qmap; 536 537 MPASS(vsi->rx_qmap != NULL); 538 539 /* TODO: 540 * Handle multiple Traffic Classes 541 * Handle scattered queues (for VFs) 542 */ 543 if (vsi->qmap_type != ICE_RESMGR_ALLOC_CONTIGUOUS) 544 return (EOPNOTSUPP); 545 546 ctx->info.mapping_flags |= CPU_TO_LE16(ICE_AQ_VSI_Q_MAP_CONTIG); 547 548 ctx->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]); 549 ctx->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues); 550 551 552 /* Calculate the next power-of-2 of number of queues */ 553 if (vsi->num_rx_queues) 554 pow = flsl(vsi->num_rx_queues - 1); 555 556 /* Assign all the queues to traffic class zero */ 557 qmap = (pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M; 558 ctx->info.tc_mapping[0] = CPU_TO_LE16(qmap); 559 560 return 0; 561 } 562 563 /** 564 * ice_initialize_vsi - Initialize a VSI for use 565 * @vsi: the vsi to initialize 566 * 567 * Initialize a VSI over the adminq and prepare it for operation. 568 */ 569 int 570 ice_initialize_vsi(struct ice_vsi *vsi) 571 { 572 struct ice_vsi_ctx ctx = { 0 }; 573 struct ice_hw *hw = &vsi->sc->hw; 574 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 575 enum ice_status status; 576 int err; 577 578 /* For now, we only have code supporting PF VSIs */ 579 switch (vsi->type) { 580 case ICE_VSI_PF: 581 ctx.flags = ICE_AQ_VSI_TYPE_PF; 582 break; 583 default: 584 return (ENODEV); 585 } 586 587 ice_set_default_vsi_ctx(&ctx); 588 ice_set_rss_vsi_ctx(&ctx, vsi->type); 589 590 /* XXX: VSIs of other types may need different port info? */ 591 ctx.info.sw_id = hw->port_info->sw_id; 592 593 /* Set some RSS parameters based on the VSI type */ 594 ice_vsi_set_rss_params(vsi); 595 596 /* Initialize the Rx queue mapping for this VSI */ 597 err = ice_setup_vsi_qmap(vsi, &ctx); 598 if (err) { 599 return err; 600 } 601 602 /* (Re-)add VSI to HW VSI handle list */ 603 status = ice_add_vsi(hw, vsi->idx, &ctx, NULL); 604 if (status != 0) { 605 device_printf(vsi->sc->dev, 606 "Add VSI AQ call failed, err %s aq_err %s\n", 607 ice_status_str(status), 608 ice_aq_str(hw->adminq.sq_last_status)); 609 return (EIO); 610 } 611 vsi->info = ctx.info; 612 613 /* TODO: DCB traffic class support? */ 614 max_txqs[0] = vsi->num_tx_queues; 615 616 status = ice_cfg_vsi_lan(hw->port_info, vsi->idx, 617 ICE_DFLT_TRAFFIC_CLASS, max_txqs); 618 if (status) { 619 device_printf(vsi->sc->dev, 620 "Failed VSI lan queue config, err %s aq_err %s\n", 621 ice_status_str(status), 622 ice_aq_str(hw->adminq.sq_last_status)); 623 ice_deinit_vsi(vsi); 624 return (ENODEV); 625 } 626 627 /* Reset VSI stats */ 628 ice_reset_vsi_stats(vsi); 629 630 return 0; 631 } 632 633 /** 634 * ice_deinit_vsi - Tell firmware to release resources for a VSI 635 * @vsi: the VSI to release 636 * 637 * Helper function which requests the firmware to release the hardware 638 * resources associated with a given VSI. 639 */ 640 void 641 ice_deinit_vsi(struct ice_vsi *vsi) 642 { 643 struct ice_vsi_ctx ctx = { 0 }; 644 struct ice_softc *sc = vsi->sc; 645 struct ice_hw *hw = &sc->hw; 646 enum ice_status status; 647 648 /* Assert that the VSI pointer matches in the list */ 649 MPASS(vsi == sc->all_vsi[vsi->idx]); 650 651 ctx.info = vsi->info; 652 653 status = ice_rm_vsi_lan_cfg(hw->port_info, vsi->idx); 654 if (status) { 655 /* 656 * This should only fail if the VSI handle is invalid, or if 657 * any of the nodes have leaf nodes which are still in use. 658 */ 659 device_printf(sc->dev, 660 "Unable to remove scheduler nodes for VSI %d, err %s\n", 661 vsi->idx, ice_status_str(status)); 662 } 663 664 /* Tell firmware to release the VSI resources */ 665 status = ice_free_vsi(hw, vsi->idx, &ctx, false, NULL); 666 if (status != 0) { 667 device_printf(sc->dev, 668 "Free VSI %u AQ call failed, err %s aq_err %s\n", 669 vsi->idx, ice_status_str(status), 670 ice_aq_str(hw->adminq.sq_last_status)); 671 } 672 } 673 674 /** 675 * ice_release_vsi - Release resources associated with a VSI 676 * @vsi: the VSI to release 677 * 678 * Release software and firmware resources associated with a VSI. Release the 679 * queue managers associated with this VSI. Also free the VSI structure memory 680 * if the VSI was allocated dynamically using ice_alloc_vsi(). 681 */ 682 void 683 ice_release_vsi(struct ice_vsi *vsi) 684 { 685 struct ice_softc *sc = vsi->sc; 686 int idx = vsi->idx; 687 688 /* Assert that the VSI pointer matches in the list */ 689 MPASS(vsi == sc->all_vsi[idx]); 690 691 /* Cleanup RSS configuration */ 692 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS)) 693 ice_clean_vsi_rss_cfg(vsi); 694 695 ice_del_vsi_sysctl_ctx(vsi); 696 697 ice_deinit_vsi(vsi); 698 699 ice_free_vsi_qmaps(vsi); 700 701 if (vsi->dynamic) { 702 free(sc->all_vsi[idx], M_ICE); 703 } 704 705 sc->all_vsi[idx] = NULL; 706 } 707 708 /** 709 * ice_aq_speed_to_rate - Convert AdminQ speed enum to baudrate 710 * @pi: port info data 711 * 712 * Returns the baudrate value for the current link speed of a given port. 713 */ 714 uint64_t 715 ice_aq_speed_to_rate(struct ice_port_info *pi) 716 { 717 switch (pi->phy.link_info.link_speed) { 718 case ICE_AQ_LINK_SPEED_100GB: 719 return IF_Gbps(100); 720 case ICE_AQ_LINK_SPEED_50GB: 721 return IF_Gbps(50); 722 case ICE_AQ_LINK_SPEED_40GB: 723 return IF_Gbps(40); 724 case ICE_AQ_LINK_SPEED_25GB: 725 return IF_Gbps(25); 726 case ICE_AQ_LINK_SPEED_10GB: 727 return IF_Gbps(10); 728 case ICE_AQ_LINK_SPEED_5GB: 729 return IF_Gbps(5); 730 case ICE_AQ_LINK_SPEED_2500MB: 731 return IF_Mbps(2500); 732 case ICE_AQ_LINK_SPEED_1000MB: 733 return IF_Mbps(1000); 734 case ICE_AQ_LINK_SPEED_100MB: 735 return IF_Mbps(100); 736 case ICE_AQ_LINK_SPEED_10MB: 737 return IF_Mbps(10); 738 case ICE_AQ_LINK_SPEED_UNKNOWN: 739 default: 740 /* return 0 if we don't know the link speed */ 741 return 0; 742 } 743 } 744 745 /** 746 * ice_aq_speed_to_str - Convert AdminQ speed enum to string representation 747 * @pi: port info data 748 * 749 * Returns the string representation of the current link speed for a given 750 * port. 751 */ 752 static const char * 753 ice_aq_speed_to_str(struct ice_port_info *pi) 754 { 755 switch (pi->phy.link_info.link_speed) { 756 case ICE_AQ_LINK_SPEED_100GB: 757 return "100 Gbps"; 758 case ICE_AQ_LINK_SPEED_50GB: 759 return "50 Gbps"; 760 case ICE_AQ_LINK_SPEED_40GB: 761 return "40 Gbps"; 762 case ICE_AQ_LINK_SPEED_25GB: 763 return "25 Gbps"; 764 case ICE_AQ_LINK_SPEED_20GB: 765 return "20 Gbps"; 766 case ICE_AQ_LINK_SPEED_10GB: 767 return "10 Gbps"; 768 case ICE_AQ_LINK_SPEED_5GB: 769 return "5 Gbps"; 770 case ICE_AQ_LINK_SPEED_2500MB: 771 return "2.5 Gbps"; 772 case ICE_AQ_LINK_SPEED_1000MB: 773 return "1 Gbps"; 774 case ICE_AQ_LINK_SPEED_100MB: 775 return "100 Mbps"; 776 case ICE_AQ_LINK_SPEED_10MB: 777 return "10 Mbps"; 778 case ICE_AQ_LINK_SPEED_UNKNOWN: 779 default: 780 return "Unknown speed"; 781 } 782 } 783 784 /** 785 * ice_get_phy_type_low - Get media associated with phy_type_low 786 * @phy_type_low: the low 64bits of phy_type from the AdminQ 787 * 788 * Given the lower 64bits of the phy_type from the hardware, return the 789 * ifm_active bit associated. Return IFM_UNKNOWN when phy_type_low is unknown. 790 * Note that only one of ice_get_phy_type_low or ice_get_phy_type_high should 791 * be called. If phy_type_low is zero, call ice_phy_type_high. 792 */ 793 int 794 ice_get_phy_type_low(uint64_t phy_type_low) 795 { 796 switch (phy_type_low) { 797 case ICE_PHY_TYPE_LOW_100BASE_TX: 798 return IFM_100_TX; 799 case ICE_PHY_TYPE_LOW_100M_SGMII: 800 return IFM_100_SGMII; 801 case ICE_PHY_TYPE_LOW_1000BASE_T: 802 return IFM_1000_T; 803 case ICE_PHY_TYPE_LOW_1000BASE_SX: 804 return IFM_1000_SX; 805 case ICE_PHY_TYPE_LOW_1000BASE_LX: 806 return IFM_1000_LX; 807 case ICE_PHY_TYPE_LOW_1000BASE_KX: 808 return IFM_1000_KX; 809 case ICE_PHY_TYPE_LOW_1G_SGMII: 810 return IFM_1000_SGMII; 811 case ICE_PHY_TYPE_LOW_2500BASE_T: 812 return IFM_2500_T; 813 case ICE_PHY_TYPE_LOW_2500BASE_X: 814 return IFM_2500_X; 815 case ICE_PHY_TYPE_LOW_2500BASE_KX: 816 return IFM_2500_KX; 817 case ICE_PHY_TYPE_LOW_5GBASE_T: 818 return IFM_5000_T; 819 case ICE_PHY_TYPE_LOW_5GBASE_KR: 820 return IFM_5000_KR; 821 case ICE_PHY_TYPE_LOW_10GBASE_T: 822 return IFM_10G_T; 823 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 824 return IFM_10G_TWINAX; 825 case ICE_PHY_TYPE_LOW_10GBASE_SR: 826 return IFM_10G_SR; 827 case ICE_PHY_TYPE_LOW_10GBASE_LR: 828 return IFM_10G_LR; 829 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 830 return IFM_10G_KR; 831 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 832 return IFM_10G_AOC; 833 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 834 return IFM_10G_SFI; 835 case ICE_PHY_TYPE_LOW_25GBASE_T: 836 return IFM_25G_T; 837 case ICE_PHY_TYPE_LOW_25GBASE_CR: 838 return IFM_25G_CR; 839 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 840 return IFM_25G_CR_S; 841 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 842 return IFM_25G_CR1; 843 case ICE_PHY_TYPE_LOW_25GBASE_SR: 844 return IFM_25G_SR; 845 case ICE_PHY_TYPE_LOW_25GBASE_LR: 846 return IFM_25G_LR; 847 case ICE_PHY_TYPE_LOW_25GBASE_KR: 848 return IFM_25G_KR; 849 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 850 return IFM_25G_KR_S; 851 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 852 return IFM_25G_KR1; 853 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 854 return IFM_25G_AOC; 855 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 856 return IFM_25G_AUI; 857 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 858 return IFM_40G_CR4; 859 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 860 return IFM_40G_SR4; 861 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 862 return IFM_40G_LR4; 863 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 864 return IFM_40G_KR4; 865 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 866 return IFM_40G_XLAUI_AC; 867 case ICE_PHY_TYPE_LOW_40G_XLAUI: 868 return IFM_40G_XLAUI; 869 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 870 return IFM_50G_CR2; 871 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 872 return IFM_50G_SR2; 873 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 874 return IFM_50G_LR2; 875 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 876 return IFM_50G_KR2; 877 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 878 return IFM_50G_LAUI2_AC; 879 case ICE_PHY_TYPE_LOW_50G_LAUI2: 880 return IFM_50G_LAUI2; 881 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 882 return IFM_50G_AUI2_AC; 883 case ICE_PHY_TYPE_LOW_50G_AUI2: 884 return IFM_50G_AUI2; 885 case ICE_PHY_TYPE_LOW_50GBASE_CP: 886 return IFM_50G_CP; 887 case ICE_PHY_TYPE_LOW_50GBASE_SR: 888 return IFM_50G_SR; 889 case ICE_PHY_TYPE_LOW_50GBASE_FR: 890 return IFM_50G_FR; 891 case ICE_PHY_TYPE_LOW_50GBASE_LR: 892 return IFM_50G_LR; 893 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 894 return IFM_50G_KR_PAM4; 895 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 896 return IFM_50G_AUI1_AC; 897 case ICE_PHY_TYPE_LOW_50G_AUI1: 898 return IFM_50G_AUI1; 899 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 900 return IFM_100G_CR4; 901 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 902 return IFM_100G_SR4; 903 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 904 return IFM_100G_LR4; 905 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 906 return IFM_100G_KR4; 907 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 908 return IFM_100G_CAUI4_AC; 909 case ICE_PHY_TYPE_LOW_100G_CAUI4: 910 return IFM_100G_CAUI4; 911 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 912 return IFM_100G_AUI4_AC; 913 case ICE_PHY_TYPE_LOW_100G_AUI4: 914 return IFM_100G_AUI4; 915 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 916 return IFM_100G_CR_PAM4; 917 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 918 return IFM_100G_KR_PAM4; 919 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 920 return IFM_100G_CP2; 921 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 922 return IFM_100G_SR2; 923 case ICE_PHY_TYPE_LOW_100GBASE_DR: 924 return IFM_100G_DR; 925 default: 926 return IFM_UNKNOWN; 927 } 928 } 929 930 /** 931 * ice_get_phy_type_high - Get media associated with phy_type_high 932 * @phy_type_high: the upper 64bits of phy_type from the AdminQ 933 * 934 * Given the upper 64bits of the phy_type from the hardware, return the 935 * ifm_active bit associated. Return IFM_UNKNOWN on an unknown value. Note 936 * that only one of ice_get_phy_type_low or ice_get_phy_type_high should be 937 * called. If phy_type_high is zero, call ice_get_phy_type_low. 938 */ 939 int 940 ice_get_phy_type_high(uint64_t phy_type_high) 941 { 942 switch (phy_type_high) { 943 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 944 return IFM_100G_KR2_PAM4; 945 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 946 return IFM_100G_CAUI2_AC; 947 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 948 return IFM_100G_CAUI2; 949 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 950 return IFM_100G_AUI2_AC; 951 case ICE_PHY_TYPE_HIGH_100G_AUI2: 952 return IFM_100G_AUI2; 953 default: 954 return IFM_UNKNOWN; 955 } 956 } 957 958 /** 959 * ice_phy_types_to_max_rate - Returns port's max supported baudrate 960 * @pi: port info struct 961 * 962 * ice_aq_get_phy_caps() w/ ICE_AQC_REPORT_TOPO_CAP parameter needs to have 963 * been called before this function for it to work. 964 */ 965 static uint64_t 966 ice_phy_types_to_max_rate(struct ice_port_info *pi) 967 { 968 uint64_t phy_low = pi->phy.phy_type_low; 969 uint64_t phy_high = pi->phy.phy_type_high; 970 uint64_t max_rate = 0; 971 int bit; 972 973 /* 974 * These are based on the indices used in the BIT() macros for 975 * ICE_PHY_TYPE_LOW_* 976 */ 977 static const uint64_t phy_rates[] = { 978 IF_Mbps(100), 979 IF_Mbps(100), 980 IF_Gbps(1ULL), 981 IF_Gbps(1ULL), 982 IF_Gbps(1ULL), 983 IF_Gbps(1ULL), 984 IF_Gbps(1ULL), 985 IF_Mbps(2500ULL), 986 IF_Mbps(2500ULL), 987 IF_Mbps(2500ULL), 988 IF_Gbps(5ULL), 989 IF_Gbps(5ULL), 990 IF_Gbps(10ULL), 991 IF_Gbps(10ULL), 992 IF_Gbps(10ULL), 993 IF_Gbps(10ULL), 994 IF_Gbps(10ULL), 995 IF_Gbps(10ULL), 996 IF_Gbps(10ULL), 997 IF_Gbps(25ULL), 998 IF_Gbps(25ULL), 999 IF_Gbps(25ULL), 1000 IF_Gbps(25ULL), 1001 IF_Gbps(25ULL), 1002 IF_Gbps(25ULL), 1003 IF_Gbps(25ULL), 1004 IF_Gbps(25ULL), 1005 IF_Gbps(25ULL), 1006 IF_Gbps(25ULL), 1007 IF_Gbps(25ULL), 1008 IF_Gbps(40ULL), 1009 IF_Gbps(40ULL), 1010 IF_Gbps(40ULL), 1011 IF_Gbps(40ULL), 1012 IF_Gbps(40ULL), 1013 IF_Gbps(40ULL), 1014 IF_Gbps(50ULL), 1015 IF_Gbps(50ULL), 1016 IF_Gbps(50ULL), 1017 IF_Gbps(50ULL), 1018 IF_Gbps(50ULL), 1019 IF_Gbps(50ULL), 1020 IF_Gbps(50ULL), 1021 IF_Gbps(50ULL), 1022 IF_Gbps(50ULL), 1023 IF_Gbps(50ULL), 1024 IF_Gbps(50ULL), 1025 IF_Gbps(50ULL), 1026 IF_Gbps(50ULL), 1027 IF_Gbps(50ULL), 1028 IF_Gbps(50ULL), 1029 IF_Gbps(100ULL), 1030 IF_Gbps(100ULL), 1031 IF_Gbps(100ULL), 1032 IF_Gbps(100ULL), 1033 IF_Gbps(100ULL), 1034 IF_Gbps(100ULL), 1035 IF_Gbps(100ULL), 1036 IF_Gbps(100ULL), 1037 IF_Gbps(100ULL), 1038 IF_Gbps(100ULL), 1039 IF_Gbps(100ULL), 1040 IF_Gbps(100ULL), 1041 IF_Gbps(100ULL), 1042 /* These rates are for ICE_PHY_TYPE_HIGH_* */ 1043 IF_Gbps(100ULL), 1044 IF_Gbps(100ULL), 1045 IF_Gbps(100ULL), 1046 IF_Gbps(100ULL), 1047 IF_Gbps(100ULL) 1048 }; 1049 1050 /* coverity[address_of] */ 1051 for_each_set_bit(bit, &phy_high, 64) 1052 if ((bit + 64) < (int)ARRAY_SIZE(phy_rates)) 1053 max_rate = uqmax(max_rate, phy_rates[(bit + 64)]); 1054 1055 /* coverity[address_of] */ 1056 for_each_set_bit(bit, &phy_low, 64) 1057 max_rate = uqmax(max_rate, phy_rates[bit]); 1058 1059 return (max_rate); 1060 } 1061 1062 /* The if_media type is split over the original 5 bit media variant field, 1063 * along with extended types using up extra bits in the options section. 1064 * We want to convert this split number into a bitmap index, so we reverse the 1065 * calculation of IFM_X here. 1066 */ 1067 #define IFM_IDX(x) (((x) & IFM_TMASK) | \ 1068 (((x) & IFM_ETH_XTYPE) >> IFM_ETH_XSHIFT)) 1069 1070 /** 1071 * ice_add_media_types - Add supported media types to the media structure 1072 * @sc: ice private softc structure 1073 * @media: ifmedia structure to setup 1074 * 1075 * Looks up the supported phy types, and initializes the various media types 1076 * available. 1077 * 1078 * @pre this function must be protected from being called while another thread 1079 * is accessing the ifmedia types. 1080 */ 1081 enum ice_status 1082 ice_add_media_types(struct ice_softc *sc, struct ifmedia *media) 1083 { 1084 enum ice_status status; 1085 uint64_t phy_low, phy_high; 1086 int bit; 1087 1088 ASSERT_CFG_LOCKED(sc); 1089 1090 /* the maximum possible media type index is 511. We probably don't 1091 * need most of this space, but this ensures future compatibility when 1092 * additional media types are used. 1093 */ 1094 ice_declare_bitmap(already_added, 511); 1095 1096 /* Remove all previous media types */ 1097 ifmedia_removeall(media); 1098 1099 status = ice_get_phy_types(sc, &phy_low, &phy_high); 1100 if (status != ICE_SUCCESS) { 1101 /* Function already prints appropriate error 1102 * message 1103 */ 1104 return (status); 1105 } 1106 1107 /* make sure the added bitmap is zero'd */ 1108 memset(already_added, 0, sizeof(already_added)); 1109 1110 /* coverity[address_of] */ 1111 for_each_set_bit(bit, &phy_low, 64) { 1112 uint64_t type = BIT_ULL(bit); 1113 int ostype; 1114 1115 /* get the OS media type */ 1116 ostype = ice_get_phy_type_low(type); 1117 1118 /* don't bother adding the unknown type */ 1119 if (ostype == IFM_UNKNOWN) 1120 continue; 1121 1122 /* only add each media type to the list once */ 1123 if (ice_is_bit_set(already_added, IFM_IDX(ostype))) 1124 continue; 1125 1126 ifmedia_add(media, IFM_ETHER | ostype, 0, NULL); 1127 ice_set_bit(IFM_IDX(ostype), already_added); 1128 } 1129 1130 /* coverity[address_of] */ 1131 for_each_set_bit(bit, &phy_high, 64) { 1132 uint64_t type = BIT_ULL(bit); 1133 int ostype; 1134 1135 /* get the OS media type */ 1136 ostype = ice_get_phy_type_high(type); 1137 1138 /* don't bother adding the unknown type */ 1139 if (ostype == IFM_UNKNOWN) 1140 continue; 1141 1142 /* only add each media type to the list once */ 1143 if (ice_is_bit_set(already_added, IFM_IDX(ostype))) 1144 continue; 1145 1146 ifmedia_add(media, IFM_ETHER | ostype, 0, NULL); 1147 ice_set_bit(IFM_IDX(ostype), already_added); 1148 } 1149 1150 /* Use autoselect media by default */ 1151 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 1152 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 1153 1154 return (ICE_SUCCESS); 1155 } 1156 1157 /** 1158 * ice_configure_rxq_interrupts - Configure HW Rx queues for MSI-X interrupts 1159 * @vsi: the VSI to configure 1160 * 1161 * Called when setting up MSI-X interrupts to configure the Rx hardware queues. 1162 */ 1163 void 1164 ice_configure_rxq_interrupts(struct ice_vsi *vsi) 1165 { 1166 struct ice_hw *hw = &vsi->sc->hw; 1167 int i; 1168 1169 for (i = 0; i < vsi->num_rx_queues; i++) { 1170 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1171 u32 val; 1172 1173 val = (QINT_RQCTL_CAUSE_ENA_M | 1174 (ICE_RX_ITR << QINT_RQCTL_ITR_INDX_S) | 1175 (rxq->irqv->me << QINT_RQCTL_MSIX_INDX_S)); 1176 wr32(hw, QINT_RQCTL(vsi->rx_qmap[rxq->me]), val); 1177 } 1178 1179 ice_flush(hw); 1180 } 1181 1182 /** 1183 * ice_configure_txq_interrupts - Configure HW Tx queues for MSI-X interrupts 1184 * @vsi: the VSI to configure 1185 * 1186 * Called when setting up MSI-X interrupts to configure the Tx hardware queues. 1187 */ 1188 void 1189 ice_configure_txq_interrupts(struct ice_vsi *vsi) 1190 { 1191 struct ice_hw *hw = &vsi->sc->hw; 1192 int i; 1193 1194 for (i = 0; i < vsi->num_tx_queues; i++) { 1195 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1196 u32 val; 1197 1198 val = (QINT_TQCTL_CAUSE_ENA_M | 1199 (ICE_TX_ITR << QINT_TQCTL_ITR_INDX_S) | 1200 (txq->irqv->me << QINT_TQCTL_MSIX_INDX_S)); 1201 wr32(hw, QINT_TQCTL(vsi->tx_qmap[txq->me]), val); 1202 } 1203 1204 ice_flush(hw); 1205 } 1206 1207 /** 1208 * ice_flush_rxq_interrupts - Unconfigure Hw Rx queues MSI-X interrupt cause 1209 * @vsi: the VSI to configure 1210 * 1211 * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger 1212 * a software interrupt on that cause. This is required as part of the Rx 1213 * queue disable logic to dissociate the Rx queue from the interrupt. 1214 * 1215 * Note: this function must be called prior to disabling Rx queues with 1216 * ice_control_rx_queues, otherwise the Rx queue may not be disabled properly. 1217 */ 1218 void 1219 ice_flush_rxq_interrupts(struct ice_vsi *vsi) 1220 { 1221 struct ice_hw *hw = &vsi->sc->hw; 1222 int i; 1223 1224 for (i = 0; i < vsi->num_rx_queues; i++) { 1225 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1226 u32 reg, val; 1227 1228 /* Clear the CAUSE_ENA flag */ 1229 reg = vsi->rx_qmap[rxq->me]; 1230 val = rd32(hw, QINT_RQCTL(reg)); 1231 val &= ~QINT_RQCTL_CAUSE_ENA_M; 1232 wr32(hw, QINT_RQCTL(reg), val); 1233 1234 ice_flush(hw); 1235 1236 /* Trigger a software interrupt to complete interrupt 1237 * dissociation. 1238 */ 1239 wr32(hw, GLINT_DYN_CTL(rxq->irqv->me), 1240 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 1241 } 1242 } 1243 1244 /** 1245 * ice_flush_txq_interrupts - Unconfigure Hw Tx queues MSI-X interrupt cause 1246 * @vsi: the VSI to configure 1247 * 1248 * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger 1249 * a software interrupt on that cause. This is required as part of the Tx 1250 * queue disable logic to dissociate the Tx queue from the interrupt. 1251 * 1252 * Note: this function must be called prior to ice_vsi_disable_tx, otherwise 1253 * the Tx queue disable may not complete properly. 1254 */ 1255 void 1256 ice_flush_txq_interrupts(struct ice_vsi *vsi) 1257 { 1258 struct ice_hw *hw = &vsi->sc->hw; 1259 int i; 1260 1261 for (i = 0; i < vsi->num_tx_queues; i++) { 1262 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1263 u32 reg, val; 1264 1265 /* Clear the CAUSE_ENA flag */ 1266 reg = vsi->tx_qmap[txq->me]; 1267 val = rd32(hw, QINT_TQCTL(reg)); 1268 val &= ~QINT_TQCTL_CAUSE_ENA_M; 1269 wr32(hw, QINT_TQCTL(reg), val); 1270 1271 ice_flush(hw); 1272 1273 /* Trigger a software interrupt to complete interrupt 1274 * dissociation. 1275 */ 1276 wr32(hw, GLINT_DYN_CTL(txq->irqv->me), 1277 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 1278 } 1279 } 1280 1281 /** 1282 * ice_configure_rx_itr - Configure the Rx ITR settings for this VSI 1283 * @vsi: the VSI to configure 1284 * 1285 * Program the hardware ITR registers with the settings for this VSI. 1286 */ 1287 void 1288 ice_configure_rx_itr(struct ice_vsi *vsi) 1289 { 1290 struct ice_hw *hw = &vsi->sc->hw; 1291 int i; 1292 1293 /* TODO: Handle per-queue/per-vector ITR? */ 1294 1295 for (i = 0; i < vsi->num_rx_queues; i++) { 1296 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1297 1298 wr32(hw, GLINT_ITR(ICE_RX_ITR, rxq->irqv->me), 1299 ice_itr_to_reg(hw, vsi->rx_itr)); 1300 } 1301 1302 ice_flush(hw); 1303 } 1304 1305 /** 1306 * ice_configure_tx_itr - Configure the Tx ITR settings for this VSI 1307 * @vsi: the VSI to configure 1308 * 1309 * Program the hardware ITR registers with the settings for this VSI. 1310 */ 1311 void 1312 ice_configure_tx_itr(struct ice_vsi *vsi) 1313 { 1314 struct ice_hw *hw = &vsi->sc->hw; 1315 int i; 1316 1317 /* TODO: Handle per-queue/per-vector ITR? */ 1318 1319 for (i = 0; i < vsi->num_tx_queues; i++) { 1320 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1321 1322 wr32(hw, GLINT_ITR(ICE_TX_ITR, txq->irqv->me), 1323 ice_itr_to_reg(hw, vsi->tx_itr)); 1324 } 1325 1326 ice_flush(hw); 1327 } 1328 1329 /** 1330 * ice_setup_tx_ctx - Setup an ice_tlan_ctx structure for a queue 1331 * @txq: the Tx queue to configure 1332 * @tlan_ctx: the Tx LAN queue context structure to initialize 1333 * @pf_q: real queue number 1334 */ 1335 static int 1336 ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 1337 { 1338 struct ice_vsi *vsi = txq->vsi; 1339 struct ice_softc *sc = vsi->sc; 1340 struct ice_hw *hw = &sc->hw; 1341 1342 tlan_ctx->port_num = hw->port_info->lport; 1343 1344 /* number of descriptors in the queue */ 1345 tlan_ctx->qlen = txq->desc_count; 1346 1347 /* set the transmit queue base address, defined in 128 byte units */ 1348 tlan_ctx->base = txq->tx_paddr >> 7; 1349 1350 tlan_ctx->pf_num = hw->pf_id; 1351 1352 /* For now, we only have code supporting PF VSIs */ 1353 switch (vsi->type) { 1354 case ICE_VSI_PF: 1355 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 1356 break; 1357 default: 1358 return (ENODEV); 1359 } 1360 1361 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 1362 1363 /* Enable TSO */ 1364 tlan_ctx->tso_ena = 1; 1365 tlan_ctx->internal_usage_flag = 1; 1366 1367 tlan_ctx->tso_qnum = pf_q; 1368 1369 /* 1370 * Stick with the older legacy Tx queue interface, instead of the new 1371 * advanced queue interface. 1372 */ 1373 tlan_ctx->legacy_int = 1; 1374 1375 /* Descriptor WB mode */ 1376 tlan_ctx->wb_mode = 0; 1377 1378 return (0); 1379 } 1380 1381 /** 1382 * ice_cfg_vsi_for_tx - Configure the hardware for Tx 1383 * @vsi: the VSI to configure 1384 * 1385 * Configure the device Tx queues through firmware AdminQ commands. After 1386 * this, Tx queues will be ready for transmit. 1387 */ 1388 int 1389 ice_cfg_vsi_for_tx(struct ice_vsi *vsi) 1390 { 1391 struct ice_aqc_add_tx_qgrp qg = { 0 }; 1392 struct ice_hw *hw = &vsi->sc->hw; 1393 device_t dev = vsi->sc->dev; 1394 enum ice_status status; 1395 int i, err; 1396 u16 pf_q; 1397 1398 qg.num_txqs = 1; 1399 1400 for (i = 0; i < vsi->num_tx_queues; i++) { 1401 struct ice_tlan_ctx tlan_ctx = { 0 }; 1402 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 1403 1404 pf_q = vsi->tx_qmap[txq->me]; 1405 qg.txqs[0].txq_id = htole16(pf_q); 1406 1407 err = ice_setup_tx_ctx(txq, &tlan_ctx, pf_q); 1408 if (err) 1409 return err; 1410 1411 ice_set_ctx((u8 *)&tlan_ctx, qg.txqs[0].txq_ctx, 1412 ice_tlan_ctx_info); 1413 1414 status = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1415 i, 1, &qg, sizeof(qg), NULL); 1416 if (status) { 1417 device_printf(dev, 1418 "Failed to set LAN Tx queue context, err %s aq_err %s\n", 1419 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 1420 return (ENODEV); 1421 } 1422 1423 /* Keep track of the Tx queue TEID */ 1424 if (pf_q == le16toh(qg.txqs[0].txq_id)) 1425 txq->q_teid = le32toh(qg.txqs[0].q_teid); 1426 } 1427 1428 return (0); 1429 } 1430 1431 /** 1432 * ice_setup_rx_ctx - Setup an Rx context structure for a receive queue 1433 * @rxq: the receive queue to program 1434 * 1435 * Setup an Rx queue context structure and program it into the hardware 1436 * registers. This is a necessary step for enabling the Rx queue. 1437 * 1438 * @pre the VSI associated with this queue must have initialized mbuf_sz 1439 */ 1440 static int 1441 ice_setup_rx_ctx(struct ice_rx_queue *rxq) 1442 { 1443 struct ice_rlan_ctx rlan_ctx = {0}; 1444 struct ice_vsi *vsi = rxq->vsi; 1445 struct ice_softc *sc = vsi->sc; 1446 struct ice_hw *hw = &sc->hw; 1447 enum ice_status status; 1448 u32 rxdid = ICE_RXDID_FLEX_NIC; 1449 u32 regval; 1450 u16 pf_q; 1451 1452 pf_q = vsi->rx_qmap[rxq->me]; 1453 1454 /* set the receive queue base address, defined in 128 byte units */ 1455 rlan_ctx.base = rxq->rx_paddr >> 7; 1456 1457 rlan_ctx.qlen = rxq->desc_count; 1458 1459 rlan_ctx.dbuf = vsi->mbuf_sz >> ICE_RLAN_CTX_DBUF_S; 1460 1461 /* use 32 byte descriptors */ 1462 rlan_ctx.dsize = 1; 1463 1464 /* Strip the Ethernet CRC bytes before the packet is posted to the 1465 * host memory. 1466 */ 1467 rlan_ctx.crcstrip = 1; 1468 1469 rlan_ctx.l2tsel = 1; 1470 1471 /* don't do header splitting */ 1472 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; 1473 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; 1474 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; 1475 1476 /* strip VLAN from inner headers */ 1477 rlan_ctx.showiv = 1; 1478 1479 rlan_ctx.rxmax = min(vsi->max_frame_size, 1480 ICE_MAX_RX_SEGS * vsi->mbuf_sz); 1481 1482 rlan_ctx.lrxqthresh = 1; 1483 1484 if (vsi->type != ICE_VSI_VF) { 1485 regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 1486 regval &= ~QRXFLXP_CNTXT_RXDID_IDX_M; 1487 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 1488 QRXFLXP_CNTXT_RXDID_IDX_M; 1489 1490 regval &= ~QRXFLXP_CNTXT_RXDID_PRIO_M; 1491 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 1492 QRXFLXP_CNTXT_RXDID_PRIO_M; 1493 1494 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 1495 } 1496 1497 status = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); 1498 if (status) { 1499 device_printf(sc->dev, 1500 "Failed to set LAN Rx queue context, err %s aq_err %s\n", 1501 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 1502 return (EIO); 1503 } 1504 1505 wr32(hw, rxq->tail, 0); 1506 1507 return 0; 1508 } 1509 1510 /** 1511 * ice_cfg_vsi_for_rx - Configure the hardware for Rx 1512 * @vsi: the VSI to configure 1513 * 1514 * Prepare an Rx context descriptor and configure the device to receive 1515 * traffic. 1516 * 1517 * @pre the VSI must have initialized mbuf_sz 1518 */ 1519 int 1520 ice_cfg_vsi_for_rx(struct ice_vsi *vsi) 1521 { 1522 int i, err; 1523 1524 for (i = 0; i < vsi->num_rx_queues; i++) { 1525 MPASS(vsi->mbuf_sz > 0); 1526 err = ice_setup_rx_ctx(&vsi->rx_queues[i]); 1527 if (err) 1528 return err; 1529 } 1530 1531 return (0); 1532 } 1533 1534 /** 1535 * ice_is_rxq_ready - Check if an Rx queue is ready 1536 * @hw: ice hw structure 1537 * @pf_q: absolute PF queue index to check 1538 * @reg: on successful return, contains qrx_ctrl contents 1539 * 1540 * Reads the QRX_CTRL register and verifies if the queue is in a consistent 1541 * state. That is, QENA_REQ matches QENA_STAT. Used to check before making 1542 * a request to change the queue, as well as to verify the request has 1543 * finished. The queue should change status within a few microseconds, so we 1544 * use a small delay while polling the register. 1545 * 1546 * Returns an error code if the queue does not update after a few retries. 1547 */ 1548 static int 1549 ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg) 1550 { 1551 u32 qrx_ctrl, qena_req, qena_stat; 1552 int i; 1553 1554 for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { 1555 qrx_ctrl = rd32(hw, QRX_CTRL(pf_q)); 1556 qena_req = (qrx_ctrl >> QRX_CTRL_QENA_REQ_S) & 1; 1557 qena_stat = (qrx_ctrl >> QRX_CTRL_QENA_STAT_S) & 1; 1558 1559 /* if the request and status bits equal, then the queue is 1560 * fully disabled or enabled. 1561 */ 1562 if (qena_req == qena_stat) { 1563 *reg = qrx_ctrl; 1564 return (0); 1565 } 1566 1567 /* wait a few microseconds before we check again */ 1568 DELAY(10); 1569 } 1570 1571 return (ETIMEDOUT); 1572 } 1573 1574 /** 1575 * ice_control_rx_queues - Configure hardware to start or stop the Rx queues 1576 * @vsi: VSI to enable/disable queues 1577 * @enable: true to enable queues, false to disable 1578 * 1579 * Control the Rx queues through the QRX_CTRL register, enabling or disabling 1580 * them. Wait for the appropriate time to ensure that the queues have actually 1581 * reached the expected state. 1582 */ 1583 int 1584 ice_control_rx_queues(struct ice_vsi *vsi, bool enable) 1585 { 1586 struct ice_hw *hw = &vsi->sc->hw; 1587 device_t dev = vsi->sc->dev; 1588 u32 qrx_ctrl = 0; 1589 int i, err; 1590 1591 /* TODO: amortize waits by changing all queues up front and then 1592 * checking their status afterwards. This will become more necessary 1593 * when we have a large number of queues. 1594 */ 1595 for (i = 0; i < vsi->num_rx_queues; i++) { 1596 struct ice_rx_queue *rxq = &vsi->rx_queues[i]; 1597 int pf_q = vsi->rx_qmap[rxq->me]; 1598 1599 err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl); 1600 if (err) { 1601 device_printf(dev, 1602 "Rx queue %d is not ready\n", 1603 pf_q); 1604 return err; 1605 } 1606 1607 /* Skip if the queue is already in correct state */ 1608 if (enable == !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) 1609 continue; 1610 1611 if (enable) 1612 qrx_ctrl |= QRX_CTRL_QENA_REQ_M; 1613 else 1614 qrx_ctrl &= ~QRX_CTRL_QENA_REQ_M; 1615 wr32(hw, QRX_CTRL(pf_q), qrx_ctrl); 1616 1617 /* wait for the queue to finalize the request */ 1618 err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl); 1619 if (err) { 1620 device_printf(dev, 1621 "Rx queue %d %sable timeout\n", 1622 pf_q, (enable ? "en" : "dis")); 1623 return err; 1624 } 1625 1626 /* this should never happen */ 1627 if (enable != !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) { 1628 device_printf(dev, 1629 "Rx queue %d invalid state\n", 1630 pf_q); 1631 return (EDOOFUS); 1632 } 1633 } 1634 1635 return (0); 1636 } 1637 1638 /** 1639 * ice_add_mac_to_list - Add MAC filter to a MAC filter list 1640 * @vsi: the VSI to forward to 1641 * @list: list which contains MAC filter entries 1642 * @addr: the MAC address to be added 1643 * @action: filter action to perform on match 1644 * 1645 * Adds a MAC address filter to the list which will be forwarded to firmware 1646 * to add a series of MAC address filters. 1647 * 1648 * Returns 0 on success, and an error code on failure. 1649 * 1650 */ 1651 static int 1652 ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list, 1653 const u8 *addr, enum ice_sw_fwd_act_type action) 1654 { 1655 struct ice_fltr_list_entry *entry; 1656 1657 entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO); 1658 if (!entry) 1659 return (ENOMEM); 1660 1661 entry->fltr_info.flag = ICE_FLTR_TX; 1662 entry->fltr_info.src_id = ICE_SRC_ID_VSI; 1663 entry->fltr_info.lkup_type = ICE_SW_LKUP_MAC; 1664 entry->fltr_info.fltr_act = action; 1665 entry->fltr_info.vsi_handle = vsi->idx; 1666 bcopy(addr, entry->fltr_info.l_data.mac.mac_addr, ETHER_ADDR_LEN); 1667 1668 LIST_ADD(&entry->list_entry, list); 1669 1670 return 0; 1671 } 1672 1673 /** 1674 * ice_free_fltr_list - Free memory associated with a MAC address list 1675 * @list: the list to free 1676 * 1677 * Free the memory of each entry associated with the list. 1678 */ 1679 static void 1680 ice_free_fltr_list(struct ice_list_head *list) 1681 { 1682 struct ice_fltr_list_entry *e, *tmp; 1683 1684 LIST_FOR_EACH_ENTRY_SAFE(e, tmp, list, ice_fltr_list_entry, list_entry) { 1685 LIST_DEL(&e->list_entry); 1686 free(e, M_ICE); 1687 } 1688 } 1689 1690 /** 1691 * ice_add_vsi_mac_filter - Add a MAC address filter for a VSI 1692 * @vsi: the VSI to add the filter for 1693 * @addr: MAC address to add a filter for 1694 * 1695 * Add a MAC address filter for a given VSI. This is a wrapper around 1696 * ice_add_mac to simplify the interface. First, it only accepts a single 1697 * address, so we don't have to mess around with the list setup in other 1698 * functions. Second, it ignores the ICE_ERR_ALREADY_EXIST error, so that 1699 * callers don't need to worry about attempting to add the same filter twice. 1700 */ 1701 int 1702 ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr) 1703 { 1704 struct ice_list_head mac_addr_list; 1705 struct ice_hw *hw = &vsi->sc->hw; 1706 device_t dev = vsi->sc->dev; 1707 enum ice_status status; 1708 int err = 0; 1709 1710 INIT_LIST_HEAD(&mac_addr_list); 1711 1712 err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI); 1713 if (err) 1714 goto free_mac_list; 1715 1716 status = ice_add_mac(hw, &mac_addr_list); 1717 if (status == ICE_ERR_ALREADY_EXISTS) { 1718 ; /* Don't complain if we try to add a filter that already exists */ 1719 } else if (status) { 1720 device_printf(dev, 1721 "Failed to add a filter for MAC %6D, err %s aq_err %s\n", 1722 addr, ":", 1723 ice_status_str(status), 1724 ice_aq_str(hw->adminq.sq_last_status)); 1725 err = (EIO); 1726 } 1727 1728 free_mac_list: 1729 ice_free_fltr_list(&mac_addr_list); 1730 return err; 1731 } 1732 1733 /** 1734 * ice_cfg_pf_default_mac_filters - Setup default unicast and broadcast addrs 1735 * @sc: device softc structure 1736 * 1737 * Program the default unicast and broadcast filters for the PF VSI. 1738 */ 1739 int 1740 ice_cfg_pf_default_mac_filters(struct ice_softc *sc) 1741 { 1742 struct ice_vsi *vsi = &sc->pf_vsi; 1743 struct ice_hw *hw = &sc->hw; 1744 int err; 1745 1746 /* Add the LAN MAC address */ 1747 err = ice_add_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr); 1748 if (err) 1749 return err; 1750 1751 /* Add the broadcast address */ 1752 err = ice_add_vsi_mac_filter(vsi, broadcastaddr); 1753 if (err) 1754 return err; 1755 1756 return (0); 1757 } 1758 1759 /** 1760 * ice_remove_vsi_mac_filter - Remove a MAC address filter for a VSI 1761 * @vsi: the VSI to add the filter for 1762 * @addr: MAC address to remove a filter for 1763 * 1764 * Remove a MAC address filter from a given VSI. This is a wrapper around 1765 * ice_remove_mac to simplify the interface. First, it only accepts a single 1766 * address, so we don't have to mess around with the list setup in other 1767 * functions. Second, it ignores the ICE_ERR_DOES_NOT_EXIST error, so that 1768 * callers don't need to worry about attempting to remove filters which 1769 * haven't yet been added. 1770 */ 1771 int 1772 ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr) 1773 { 1774 struct ice_list_head mac_addr_list; 1775 struct ice_hw *hw = &vsi->sc->hw; 1776 device_t dev = vsi->sc->dev; 1777 enum ice_status status; 1778 int err = 0; 1779 1780 INIT_LIST_HEAD(&mac_addr_list); 1781 1782 err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI); 1783 if (err) 1784 goto free_mac_list; 1785 1786 status = ice_remove_mac(hw, &mac_addr_list); 1787 if (status == ICE_ERR_DOES_NOT_EXIST) { 1788 ; /* Don't complain if we try to remove a filter that doesn't exist */ 1789 } else if (status) { 1790 device_printf(dev, 1791 "Failed to remove a filter for MAC %6D, err %s aq_err %s\n", 1792 addr, ":", 1793 ice_status_str(status), 1794 ice_aq_str(hw->adminq.sq_last_status)); 1795 err = (EIO); 1796 } 1797 1798 free_mac_list: 1799 ice_free_fltr_list(&mac_addr_list); 1800 return err; 1801 } 1802 1803 /** 1804 * ice_rm_pf_default_mac_filters - Remove default unicast and broadcast addrs 1805 * @sc: device softc structure 1806 * 1807 * Remove the default unicast and broadcast filters from the PF VSI. 1808 */ 1809 int 1810 ice_rm_pf_default_mac_filters(struct ice_softc *sc) 1811 { 1812 struct ice_vsi *vsi = &sc->pf_vsi; 1813 struct ice_hw *hw = &sc->hw; 1814 int err; 1815 1816 /* Remove the LAN MAC address */ 1817 err = ice_remove_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr); 1818 if (err) 1819 return err; 1820 1821 /* Remove the broadcast address */ 1822 err = ice_remove_vsi_mac_filter(vsi, broadcastaddr); 1823 if (err) 1824 return (EIO); 1825 1826 return (0); 1827 } 1828 1829 /** 1830 * ice_check_ctrlq_errors - Check for and report controlq errors 1831 * @sc: device private structure 1832 * @qname: name of the controlq 1833 * @cq: the controlq to check 1834 * 1835 * Check and report controlq errors. Currently all we do is report them to the 1836 * kernel message log, but we might want to improve this in the future, such 1837 * as to keep track of statistics. 1838 */ 1839 static void 1840 ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname, 1841 struct ice_ctl_q_info *cq) 1842 { 1843 struct ice_hw *hw = &sc->hw; 1844 u32 val; 1845 1846 /* Check for error indications. Note that all the controlqs use the 1847 * same register layout, so we use the PF_FW_AxQLEN defines only. 1848 */ 1849 val = rd32(hw, cq->rq.len); 1850 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1851 PF_FW_ARQLEN_ARQCRIT_M)) { 1852 if (val & PF_FW_ARQLEN_ARQVFE_M) 1853 device_printf(sc->dev, 1854 "%s Receive Queue VF Error detected\n", qname); 1855 if (val & PF_FW_ARQLEN_ARQOVFL_M) 1856 device_printf(sc->dev, 1857 "%s Receive Queue Overflow Error detected\n", 1858 qname); 1859 if (val & PF_FW_ARQLEN_ARQCRIT_M) 1860 device_printf(sc->dev, 1861 "%s Receive Queue Critical Error detected\n", 1862 qname); 1863 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | 1864 PF_FW_ARQLEN_ARQCRIT_M); 1865 wr32(hw, cq->rq.len, val); 1866 } 1867 1868 val = rd32(hw, cq->sq.len); 1869 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1870 PF_FW_ATQLEN_ATQCRIT_M)) { 1871 if (val & PF_FW_ATQLEN_ATQVFE_M) 1872 device_printf(sc->dev, 1873 "%s Send Queue VF Error detected\n", qname); 1874 if (val & PF_FW_ATQLEN_ATQOVFL_M) 1875 device_printf(sc->dev, 1876 "%s Send Queue Overflow Error detected\n", 1877 qname); 1878 if (val & PF_FW_ATQLEN_ATQCRIT_M) 1879 device_printf(sc->dev, 1880 "%s Send Queue Critical Error detected\n", 1881 qname); 1882 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | 1883 PF_FW_ATQLEN_ATQCRIT_M); 1884 wr32(hw, cq->sq.len, val); 1885 } 1886 } 1887 1888 /** 1889 * ice_process_link_event - Process a link event indication from firmware 1890 * @sc: device softc structure 1891 * @e: the received event data 1892 * 1893 * Gets the current link status from hardware, and may print a message if an 1894 * unqualified is detected. 1895 */ 1896 static void 1897 ice_process_link_event(struct ice_softc *sc, 1898 struct ice_rq_event_info __invariant_only *e) 1899 { 1900 struct ice_port_info *pi = sc->hw.port_info; 1901 struct ice_hw *hw = &sc->hw; 1902 device_t dev = sc->dev; 1903 enum ice_status status; 1904 1905 /* Sanity check that the data length matches */ 1906 MPASS(le16toh(e->desc.datalen) == sizeof(struct ice_aqc_get_link_status_data)); 1907 1908 /* 1909 * Even though the adapter gets link status information inside the 1910 * event, it needs to send a Get Link Status AQ command in order 1911 * to re-enable link events. 1912 */ 1913 pi->phy.get_link_info = true; 1914 ice_get_link_status(pi, &sc->link_up); 1915 1916 if (pi->phy.link_info.topo_media_conflict & 1917 (ICE_AQ_LINK_TOPO_CONFLICT | ICE_AQ_LINK_MEDIA_CONFLICT | 1918 ICE_AQ_LINK_TOPO_CORRUPT)) 1919 device_printf(dev, 1920 "Possible mis-configuration of the Ethernet port detected; please use the Intel (R) Ethernet Port Configuration Tool utility to address the issue.\n"); 1921 1922 if ((pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) && 1923 !(pi->phy.link_info.link_info & ICE_AQ_LINK_UP) && 1924 !(pi->phy.link_info.an_info & ICE_AQ_QUALIFIED_MODULE)) 1925 device_printf(dev, 1926 "Link is disabled on this device because an unsupported module type was detected! Refer to the Intel (R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); 1927 1928 if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { 1929 if (!ice_testandset_state(&sc->state, ICE_STATE_NO_MEDIA)) { 1930 status = ice_aq_set_link_restart_an(pi, false, NULL); 1931 if (status != ICE_SUCCESS) 1932 device_printf(dev, 1933 "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", 1934 __func__, ice_status_str(status), 1935 ice_aq_str(hw->adminq.sq_last_status)); 1936 } 1937 } 1938 /* ICE_STATE_NO_MEDIA is cleared when polling task detects media */ 1939 1940 /* Indicate that link status must be reported again */ 1941 ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED); 1942 1943 /* OS link info is updated elsewhere */ 1944 } 1945 1946 /** 1947 * ice_process_ctrlq_event - Respond to a controlq event 1948 * @sc: device private structure 1949 * @qname: the name for this controlq 1950 * @event: the event to process 1951 * 1952 * Perform actions in response to various controlq event notifications. 1953 */ 1954 static void 1955 ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, 1956 struct ice_rq_event_info *event) 1957 { 1958 u16 opcode; 1959 1960 opcode = le16toh(event->desc.opcode); 1961 1962 switch (opcode) { 1963 case ice_aqc_opc_get_link_status: 1964 ice_process_link_event(sc, event); 1965 break; 1966 case ice_mbx_opc_send_msg_to_pf: 1967 /* TODO: handle IOV event */ 1968 break; 1969 case ice_aqc_opc_lldp_set_mib_change: 1970 ice_handle_mib_change_event(sc, event); 1971 break; 1972 case ice_aqc_opc_event_lan_overflow: 1973 ice_handle_lan_overflow_event(sc, event); 1974 break; 1975 default: 1976 device_printf(sc->dev, 1977 "%s Receive Queue unhandled event 0x%04x ignored\n", 1978 qname, opcode); 1979 } 1980 } 1981 1982 /** 1983 * ice_process_ctrlq - helper function to process controlq rings 1984 * @sc: device private structure 1985 * @q_type: specific control queue type 1986 * @pending: return parameter to track remaining events 1987 * 1988 * Process controlq events for a given control queue type. Returns zero on 1989 * success, and an error code on failure. If successful, pending is the number 1990 * of remaining events left in the queue. 1991 */ 1992 int 1993 ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending) 1994 { 1995 struct ice_rq_event_info event = { { 0 } }; 1996 struct ice_hw *hw = &sc->hw; 1997 struct ice_ctl_q_info *cq; 1998 enum ice_status status; 1999 const char *qname; 2000 int loop = 0; 2001 2002 switch (q_type) { 2003 case ICE_CTL_Q_ADMIN: 2004 cq = &hw->adminq; 2005 qname = "Admin"; 2006 break; 2007 case ICE_CTL_Q_MAILBOX: 2008 cq = &hw->mailboxq; 2009 qname = "Mailbox"; 2010 break; 2011 default: 2012 device_printf(sc->dev, 2013 "Unknown control queue type 0x%x\n", 2014 q_type); 2015 return 0; 2016 } 2017 2018 ice_check_ctrlq_errors(sc, qname, cq); 2019 2020 /* 2021 * Control queue processing happens during the admin task which may be 2022 * holding a non-sleepable lock, so we *must* use M_NOWAIT here. 2023 */ 2024 event.buf_len = cq->rq_buf_size; 2025 event.msg_buf = (u8 *)malloc(event.buf_len, M_ICE, M_ZERO | M_NOWAIT); 2026 if (!event.msg_buf) { 2027 device_printf(sc->dev, 2028 "Unable to allocate memory for %s Receive Queue event\n", 2029 qname); 2030 return (ENOMEM); 2031 } 2032 2033 do { 2034 status = ice_clean_rq_elem(hw, cq, &event, pending); 2035 if (status == ICE_ERR_AQ_NO_WORK) 2036 break; 2037 if (status) { 2038 if (q_type == ICE_CTL_Q_ADMIN) 2039 device_printf(sc->dev, 2040 "%s Receive Queue event error %s aq_err %s\n", 2041 qname, ice_status_str(status), 2042 ice_aq_str(cq->rq_last_status)); 2043 else 2044 device_printf(sc->dev, 2045 "%s Receive Queue event error %s cq_err %d\n", 2046 qname, ice_status_str(status), cq->rq_last_status); 2047 free(event.msg_buf, M_ICE); 2048 return (EIO); 2049 } 2050 /* XXX should we separate this handler by controlq type? */ 2051 ice_process_ctrlq_event(sc, qname, &event); 2052 } while (*pending && (++loop < ICE_CTRLQ_WORK_LIMIT)); 2053 2054 free(event.msg_buf, M_ICE); 2055 2056 return 0; 2057 } 2058 2059 /** 2060 * pkg_ver_empty - Check if a package version is empty 2061 * @pkg_ver: the package version to check 2062 * @pkg_name: the package name to check 2063 * 2064 * Checks if the package version structure is empty. We consider a package 2065 * version as empty if none of the versions are non-zero and the name string 2066 * is null as well. 2067 * 2068 * This is used to check if the package version was initialized by the driver, 2069 * as we do not expect an actual DDP package file to have a zero'd version and 2070 * name. 2071 * 2072 * @returns true if the package version is valid, or false otherwise. 2073 */ 2074 static bool 2075 pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name) 2076 { 2077 return (pkg_name[0] == '\0' && 2078 pkg_ver->major == 0 && 2079 pkg_ver->minor == 0 && 2080 pkg_ver->update == 0 && 2081 pkg_ver->draft == 0); 2082 } 2083 2084 /** 2085 * pkg_ver_compatible - Check if the package version is compatible 2086 * @pkg_ver: the package version to check 2087 * 2088 * Compares the package version number to the driver's expected major/minor 2089 * version. Returns an integer indicating whether the version is older, newer, 2090 * or compatible with the driver. 2091 * 2092 * @returns 0 if the package version is compatible, -1 if the package version 2093 * is older, and 1 if the package version is newer than the driver version. 2094 */ 2095 static int 2096 pkg_ver_compatible(struct ice_pkg_ver *pkg_ver) 2097 { 2098 if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ) 2099 return (1); /* newer */ 2100 else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) && 2101 (pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) 2102 return (1); /* newer */ 2103 else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) && 2104 (pkg_ver->minor == ICE_PKG_SUPP_VER_MNR)) 2105 return (0); /* compatible */ 2106 else 2107 return (-1); /* older */ 2108 } 2109 2110 /** 2111 * ice_os_pkg_version_str - Format OS package version info into a sbuf 2112 * @hw: device hw structure 2113 * @buf: string buffer to store name/version string 2114 * 2115 * Formats the name and version of the OS DDP package as found in the ice_ddp 2116 * module into a string. 2117 * 2118 * @remark This will almost always be the same as the active package, but 2119 * could be different in some cases. Use ice_active_pkg_version_str to get the 2120 * version of the active DDP package. 2121 */ 2122 static void 2123 ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf) 2124 { 2125 char name_buf[ICE_PKG_NAME_SIZE]; 2126 2127 /* If the OS DDP package info is empty, use "None" */ 2128 if (pkg_ver_empty(&hw->pkg_ver, hw->pkg_name)) { 2129 sbuf_printf(buf, "None"); 2130 return; 2131 } 2132 2133 /* 2134 * This should already be null-terminated, but since this is a raw 2135 * value from an external source, strlcpy() into a new buffer to 2136 * make sure. 2137 */ 2138 bzero(name_buf, sizeof(name_buf)); 2139 strlcpy(name_buf, (char *)hw->pkg_name, ICE_PKG_NAME_SIZE); 2140 2141 sbuf_printf(buf, "%s version %u.%u.%u.%u", 2142 name_buf, 2143 hw->pkg_ver.major, 2144 hw->pkg_ver.minor, 2145 hw->pkg_ver.update, 2146 hw->pkg_ver.draft); 2147 } 2148 2149 /** 2150 * ice_active_pkg_version_str - Format active package version info into a sbuf 2151 * @hw: device hw structure 2152 * @buf: string buffer to store name/version string 2153 * 2154 * Formats the name and version of the active DDP package info into a string 2155 * buffer for use. 2156 */ 2157 static void 2158 ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf) 2159 { 2160 char name_buf[ICE_PKG_NAME_SIZE]; 2161 2162 /* If the active DDP package info is empty, use "None" */ 2163 if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) { 2164 sbuf_printf(buf, "None"); 2165 return; 2166 } 2167 2168 /* 2169 * This should already be null-terminated, but since this is a raw 2170 * value from an external source, strlcpy() into a new buffer to 2171 * make sure. 2172 */ 2173 bzero(name_buf, sizeof(name_buf)); 2174 strlcpy(name_buf, (char *)hw->active_pkg_name, ICE_PKG_NAME_SIZE); 2175 2176 sbuf_printf(buf, "%s version %u.%u.%u.%u", 2177 name_buf, 2178 hw->active_pkg_ver.major, 2179 hw->active_pkg_ver.minor, 2180 hw->active_pkg_ver.update, 2181 hw->active_pkg_ver.draft); 2182 2183 if (hw->active_track_id != 0) 2184 sbuf_printf(buf, ", track id 0x%08x", hw->active_track_id); 2185 } 2186 2187 /** 2188 * ice_nvm_version_str - Format the NVM version information into a sbuf 2189 * @hw: device hw structure 2190 * @buf: string buffer to store version string 2191 * 2192 * Formats the NVM information including firmware version, API version, NVM 2193 * version, the EETRACK id, and OEM specific version information into a string 2194 * buffer. 2195 */ 2196 static void 2197 ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf) 2198 { 2199 struct ice_nvm_info *nvm = &hw->nvm; 2200 struct ice_orom_info *orom = &nvm->orom; 2201 struct ice_netlist_ver_info *netlist_ver = &hw->netlist_ver; 2202 2203 /* Note that the netlist versions are stored in packed Binary Coded 2204 * Decimal format. The use of '%x' will correctly display these as 2205 * decimal numbers. This works because every 4 bits will be displayed 2206 * as a hexadecimal digit, and the BCD format will only use the values 2207 * 0-9. 2208 */ 2209 sbuf_printf(buf, 2210 "fw %u.%u.%u api %u.%u nvm %x.%02x etid %08x netlist %x.%x.%x-%x.%x.%x.%04x oem %u.%u.%u", 2211 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch, 2212 hw->api_maj_ver, hw->api_min_ver, 2213 nvm->major_ver, nvm->minor_ver, nvm->eetrack, 2214 netlist_ver->major, netlist_ver->minor, 2215 netlist_ver->type >> 16, netlist_ver->type & 0xFFFF, 2216 netlist_ver->rev, netlist_ver->cust_ver, netlist_ver->hash, 2217 orom->major, orom->build, orom->patch); 2218 } 2219 2220 /** 2221 * ice_print_nvm_version - Print the NVM info to the kernel message log 2222 * @sc: the device softc structure 2223 * 2224 * Format and print an NVM version string using ice_nvm_version_str(). 2225 */ 2226 void 2227 ice_print_nvm_version(struct ice_softc *sc) 2228 { 2229 struct ice_hw *hw = &sc->hw; 2230 device_t dev = sc->dev; 2231 struct sbuf *sbuf; 2232 2233 sbuf = sbuf_new_auto(); 2234 ice_nvm_version_str(hw, sbuf); 2235 sbuf_finish(sbuf); 2236 device_printf(dev, "%s\n", sbuf_data(sbuf)); 2237 sbuf_delete(sbuf); 2238 } 2239 2240 /** 2241 * ice_update_vsi_hw_stats - Update VSI-specific ethernet statistics counters 2242 * @vsi: the VSI to be updated 2243 * 2244 * Reads hardware stats and updates the ice_vsi_hw_stats tracking structure with 2245 * the updated values. 2246 */ 2247 void 2248 ice_update_vsi_hw_stats(struct ice_vsi *vsi) 2249 { 2250 struct ice_eth_stats *prev_es, *cur_es; 2251 struct ice_hw *hw = &vsi->sc->hw; 2252 u16 vsi_num; 2253 2254 if (!ice_is_vsi_valid(hw, vsi->idx)) 2255 return; 2256 2257 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); /* HW absolute index of a VSI */ 2258 prev_es = &vsi->hw_stats.prev; 2259 cur_es = &vsi->hw_stats.cur; 2260 2261 #define ICE_VSI_STAT40(name, location) \ 2262 ice_stat_update40(hw, name ## L(vsi_num), \ 2263 vsi->hw_stats.offsets_loaded, \ 2264 &prev_es->location, &cur_es->location) 2265 2266 #define ICE_VSI_STAT32(name, location) \ 2267 ice_stat_update32(hw, name(vsi_num), \ 2268 vsi->hw_stats.offsets_loaded, \ 2269 &prev_es->location, &cur_es->location) 2270 2271 ICE_VSI_STAT40(GLV_GORC, rx_bytes); 2272 ICE_VSI_STAT40(GLV_UPRC, rx_unicast); 2273 ICE_VSI_STAT40(GLV_MPRC, rx_multicast); 2274 ICE_VSI_STAT40(GLV_BPRC, rx_broadcast); 2275 ICE_VSI_STAT32(GLV_RDPC, rx_discards); 2276 ICE_VSI_STAT40(GLV_GOTC, tx_bytes); 2277 ICE_VSI_STAT40(GLV_UPTC, tx_unicast); 2278 ICE_VSI_STAT40(GLV_MPTC, tx_multicast); 2279 ICE_VSI_STAT40(GLV_BPTC, tx_broadcast); 2280 ICE_VSI_STAT32(GLV_TEPC, tx_errors); 2281 2282 ice_stat_update_repc(hw, vsi->idx, vsi->hw_stats.offsets_loaded, 2283 cur_es); 2284 2285 #undef ICE_VSI_STAT40 2286 #undef ICE_VSI_STAT32 2287 2288 vsi->hw_stats.offsets_loaded = true; 2289 } 2290 2291 /** 2292 * ice_reset_vsi_stats - Reset VSI statistics counters 2293 * @vsi: VSI structure 2294 * 2295 * Resets the software tracking counters for the VSI statistics, and indicate 2296 * that the offsets haven't been loaded. This is intended to be called 2297 * post-reset so that VSI statistics count from zero again. 2298 */ 2299 void 2300 ice_reset_vsi_stats(struct ice_vsi *vsi) 2301 { 2302 /* Reset HW stats */ 2303 memset(&vsi->hw_stats.prev, 0, sizeof(vsi->hw_stats.prev)); 2304 memset(&vsi->hw_stats.cur, 0, sizeof(vsi->hw_stats.cur)); 2305 vsi->hw_stats.offsets_loaded = false; 2306 } 2307 2308 /** 2309 * ice_update_pf_stats - Update port stats counters 2310 * @sc: device private softc structure 2311 * 2312 * Reads hardware statistics registers and updates the software tracking 2313 * structure with new values. 2314 */ 2315 void 2316 ice_update_pf_stats(struct ice_softc *sc) 2317 { 2318 struct ice_hw_port_stats *prev_ps, *cur_ps; 2319 struct ice_hw *hw = &sc->hw; 2320 u8 lport; 2321 2322 MPASS(hw->port_info); 2323 2324 prev_ps = &sc->stats.prev; 2325 cur_ps = &sc->stats.cur; 2326 lport = hw->port_info->lport; 2327 2328 #define ICE_PF_STAT40(name, location) \ 2329 ice_stat_update40(hw, name ## L(lport), \ 2330 sc->stats.offsets_loaded, \ 2331 &prev_ps->location, &cur_ps->location) 2332 2333 #define ICE_PF_STAT32(name, location) \ 2334 ice_stat_update32(hw, name(lport), \ 2335 sc->stats.offsets_loaded, \ 2336 &prev_ps->location, &cur_ps->location) 2337 2338 ICE_PF_STAT40(GLPRT_GORC, eth.rx_bytes); 2339 ICE_PF_STAT40(GLPRT_UPRC, eth.rx_unicast); 2340 ICE_PF_STAT40(GLPRT_MPRC, eth.rx_multicast); 2341 ICE_PF_STAT40(GLPRT_BPRC, eth.rx_broadcast); 2342 ICE_PF_STAT40(GLPRT_GOTC, eth.tx_bytes); 2343 ICE_PF_STAT40(GLPRT_UPTC, eth.tx_unicast); 2344 ICE_PF_STAT40(GLPRT_MPTC, eth.tx_multicast); 2345 ICE_PF_STAT40(GLPRT_BPTC, eth.tx_broadcast); 2346 2347 ICE_PF_STAT32(GLPRT_TDOLD, tx_dropped_link_down); 2348 ICE_PF_STAT40(GLPRT_PRC64, rx_size_64); 2349 ICE_PF_STAT40(GLPRT_PRC127, rx_size_127); 2350 ICE_PF_STAT40(GLPRT_PRC255, rx_size_255); 2351 ICE_PF_STAT40(GLPRT_PRC511, rx_size_511); 2352 ICE_PF_STAT40(GLPRT_PRC1023, rx_size_1023); 2353 ICE_PF_STAT40(GLPRT_PRC1522, rx_size_1522); 2354 ICE_PF_STAT40(GLPRT_PRC9522, rx_size_big); 2355 ICE_PF_STAT40(GLPRT_PTC64, tx_size_64); 2356 ICE_PF_STAT40(GLPRT_PTC127, tx_size_127); 2357 ICE_PF_STAT40(GLPRT_PTC255, tx_size_255); 2358 ICE_PF_STAT40(GLPRT_PTC511, tx_size_511); 2359 ICE_PF_STAT40(GLPRT_PTC1023, tx_size_1023); 2360 ICE_PF_STAT40(GLPRT_PTC1522, tx_size_1522); 2361 ICE_PF_STAT40(GLPRT_PTC9522, tx_size_big); 2362 2363 ICE_PF_STAT32(GLPRT_LXONRXC, link_xon_rx); 2364 ICE_PF_STAT32(GLPRT_LXOFFRXC, link_xoff_rx); 2365 ICE_PF_STAT32(GLPRT_LXONTXC, link_xon_tx); 2366 ICE_PF_STAT32(GLPRT_LXOFFTXC, link_xoff_tx); 2367 ICE_PF_STAT32(GLPRT_CRCERRS, crc_errors); 2368 ICE_PF_STAT32(GLPRT_ILLERRC, illegal_bytes); 2369 ICE_PF_STAT32(GLPRT_MLFC, mac_local_faults); 2370 ICE_PF_STAT32(GLPRT_MRFC, mac_remote_faults); 2371 ICE_PF_STAT32(GLPRT_RLEC, rx_len_errors); 2372 ICE_PF_STAT32(GLPRT_RUC, rx_undersize); 2373 ICE_PF_STAT32(GLPRT_RFC, rx_fragments); 2374 ICE_PF_STAT32(GLPRT_ROC, rx_oversize); 2375 ICE_PF_STAT32(GLPRT_RJC, rx_jabber); 2376 2377 #undef ICE_PF_STAT40 2378 #undef ICE_PF_STAT32 2379 2380 sc->stats.offsets_loaded = true; 2381 } 2382 2383 /** 2384 * ice_reset_pf_stats - Reset port stats counters 2385 * @sc: Device private softc structure 2386 * 2387 * Reset software tracking values for statistics to zero, and indicate that 2388 * offsets haven't been loaded. Intended to be called after a device reset so 2389 * that statistics count from zero again. 2390 */ 2391 void 2392 ice_reset_pf_stats(struct ice_softc *sc) 2393 { 2394 memset(&sc->stats.prev, 0, sizeof(sc->stats.prev)); 2395 memset(&sc->stats.cur, 0, sizeof(sc->stats.cur)); 2396 sc->stats.offsets_loaded = false; 2397 } 2398 2399 /** 2400 * ice_sysctl_show_fw - sysctl callback to show firmware information 2401 * @oidp: sysctl oid structure 2402 * @arg1: pointer to private data structure 2403 * @arg2: unused 2404 * @req: sysctl request pointer 2405 * 2406 * Callback for the fw_version sysctl, to display the current firmware 2407 * information found at hardware init time. 2408 */ 2409 static int 2410 ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS) 2411 { 2412 struct ice_softc *sc = (struct ice_softc *)arg1; 2413 struct ice_hw *hw = &sc->hw; 2414 struct sbuf *sbuf; 2415 2416 UNREFERENCED_PARAMETER(oidp); 2417 UNREFERENCED_PARAMETER(arg2); 2418 2419 if (ice_driver_is_detaching(sc)) 2420 return (ESHUTDOWN); 2421 2422 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2423 ice_nvm_version_str(hw, sbuf); 2424 sbuf_finish(sbuf); 2425 sbuf_delete(sbuf); 2426 2427 return (0); 2428 } 2429 2430 /** 2431 * ice_sysctl_pba_number - sysctl callback to show PBA number 2432 * @oidp: sysctl oid structure 2433 * @arg1: pointer to private data structure 2434 * @arg2: unused 2435 * @req: sysctl request pointer 2436 * 2437 * Callback for the pba_number sysctl, used to read the Product Board Assembly 2438 * number for this device. 2439 */ 2440 static int 2441 ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS) 2442 { 2443 struct ice_softc *sc = (struct ice_softc *)arg1; 2444 struct ice_hw *hw = &sc->hw; 2445 device_t dev = sc->dev; 2446 u8 pba_string[32] = ""; 2447 enum ice_status status; 2448 2449 UNREFERENCED_PARAMETER(arg2); 2450 2451 if (ice_driver_is_detaching(sc)) 2452 return (ESHUTDOWN); 2453 2454 status = ice_read_pba_string(hw, pba_string, sizeof(pba_string)); 2455 if (status) { 2456 device_printf(dev, 2457 "%s: failed to read PBA string from NVM; status %s, aq_err %s\n", 2458 __func__, ice_status_str(status), 2459 ice_aq_str(hw->adminq.sq_last_status)); 2460 return (EIO); 2461 } 2462 2463 return sysctl_handle_string(oidp, pba_string, sizeof(pba_string), req); 2464 } 2465 2466 /** 2467 * ice_sysctl_pkg_version - sysctl to show the active package version info 2468 * @oidp: sysctl oid structure 2469 * @arg1: pointer to private data structure 2470 * @arg2: unused 2471 * @req: sysctl request pointer 2472 * 2473 * Callback for the pkg_version sysctl, to display the active DDP package name 2474 * and version information. 2475 */ 2476 static int 2477 ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS) 2478 { 2479 struct ice_softc *sc = (struct ice_softc *)arg1; 2480 struct ice_hw *hw = &sc->hw; 2481 struct sbuf *sbuf; 2482 2483 UNREFERENCED_PARAMETER(oidp); 2484 UNREFERENCED_PARAMETER(arg2); 2485 2486 if (ice_driver_is_detaching(sc)) 2487 return (ESHUTDOWN); 2488 2489 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2490 ice_active_pkg_version_str(hw, sbuf); 2491 sbuf_finish(sbuf); 2492 sbuf_delete(sbuf); 2493 2494 return (0); 2495 } 2496 2497 /** 2498 * ice_sysctl_os_pkg_version - sysctl to show the OS package version info 2499 * @oidp: sysctl oid structure 2500 * @arg1: pointer to private data structure 2501 * @arg2: unused 2502 * @req: sysctl request pointer 2503 * 2504 * Callback for the pkg_version sysctl, to display the OS DDP package name and 2505 * version info found in the ice_ddp module. 2506 */ 2507 static int 2508 ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS) 2509 { 2510 struct ice_softc *sc = (struct ice_softc *)arg1; 2511 struct ice_hw *hw = &sc->hw; 2512 struct sbuf *sbuf; 2513 2514 UNREFERENCED_PARAMETER(oidp); 2515 UNREFERENCED_PARAMETER(arg2); 2516 2517 if (ice_driver_is_detaching(sc)) 2518 return (ESHUTDOWN); 2519 2520 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2521 ice_os_pkg_version_str(hw, sbuf); 2522 sbuf_finish(sbuf); 2523 sbuf_delete(sbuf); 2524 2525 return (0); 2526 } 2527 2528 /** 2529 * ice_sysctl_current_speed - sysctl callback to show current link speed 2530 * @oidp: sysctl oid structure 2531 * @arg1: pointer to private data structure 2532 * @arg2: unused 2533 * @req: sysctl request pointer 2534 * 2535 * Callback for the current_speed sysctl, to display the string representing 2536 * the current link speed. 2537 */ 2538 static int 2539 ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS) 2540 { 2541 struct ice_softc *sc = (struct ice_softc *)arg1; 2542 struct ice_hw *hw = &sc->hw; 2543 struct sbuf *sbuf; 2544 2545 UNREFERENCED_PARAMETER(oidp); 2546 UNREFERENCED_PARAMETER(arg2); 2547 2548 if (ice_driver_is_detaching(sc)) 2549 return (ESHUTDOWN); 2550 2551 sbuf = sbuf_new_for_sysctl(NULL, NULL, 10, req); 2552 sbuf_printf(sbuf, "%s", ice_aq_speed_to_str(hw->port_info)); 2553 sbuf_finish(sbuf); 2554 sbuf_delete(sbuf); 2555 2556 return (0); 2557 } 2558 2559 /** 2560 * @var phy_link_speeds 2561 * @brief PHY link speed conversion array 2562 * 2563 * Array of link speeds to convert ICE_PHY_TYPE_LOW and ICE_PHY_TYPE_HIGH into 2564 * link speeds used by the link speed sysctls. 2565 * 2566 * @remark these are based on the indices used in the BIT() macros for the 2567 * ICE_PHY_TYPE_LOW_* and ICE_PHY_TYPE_HIGH_* definitions. 2568 */ 2569 static const uint16_t phy_link_speeds[] = { 2570 ICE_AQ_LINK_SPEED_100MB, 2571 ICE_AQ_LINK_SPEED_100MB, 2572 ICE_AQ_LINK_SPEED_1000MB, 2573 ICE_AQ_LINK_SPEED_1000MB, 2574 ICE_AQ_LINK_SPEED_1000MB, 2575 ICE_AQ_LINK_SPEED_1000MB, 2576 ICE_AQ_LINK_SPEED_1000MB, 2577 ICE_AQ_LINK_SPEED_2500MB, 2578 ICE_AQ_LINK_SPEED_2500MB, 2579 ICE_AQ_LINK_SPEED_2500MB, 2580 ICE_AQ_LINK_SPEED_5GB, 2581 ICE_AQ_LINK_SPEED_5GB, 2582 ICE_AQ_LINK_SPEED_10GB, 2583 ICE_AQ_LINK_SPEED_10GB, 2584 ICE_AQ_LINK_SPEED_10GB, 2585 ICE_AQ_LINK_SPEED_10GB, 2586 ICE_AQ_LINK_SPEED_10GB, 2587 ICE_AQ_LINK_SPEED_10GB, 2588 ICE_AQ_LINK_SPEED_10GB, 2589 ICE_AQ_LINK_SPEED_25GB, 2590 ICE_AQ_LINK_SPEED_25GB, 2591 ICE_AQ_LINK_SPEED_25GB, 2592 ICE_AQ_LINK_SPEED_25GB, 2593 ICE_AQ_LINK_SPEED_25GB, 2594 ICE_AQ_LINK_SPEED_25GB, 2595 ICE_AQ_LINK_SPEED_25GB, 2596 ICE_AQ_LINK_SPEED_25GB, 2597 ICE_AQ_LINK_SPEED_25GB, 2598 ICE_AQ_LINK_SPEED_25GB, 2599 ICE_AQ_LINK_SPEED_25GB, 2600 ICE_AQ_LINK_SPEED_40GB, 2601 ICE_AQ_LINK_SPEED_40GB, 2602 ICE_AQ_LINK_SPEED_40GB, 2603 ICE_AQ_LINK_SPEED_40GB, 2604 ICE_AQ_LINK_SPEED_40GB, 2605 ICE_AQ_LINK_SPEED_40GB, 2606 ICE_AQ_LINK_SPEED_50GB, 2607 ICE_AQ_LINK_SPEED_50GB, 2608 ICE_AQ_LINK_SPEED_50GB, 2609 ICE_AQ_LINK_SPEED_50GB, 2610 ICE_AQ_LINK_SPEED_50GB, 2611 ICE_AQ_LINK_SPEED_50GB, 2612 ICE_AQ_LINK_SPEED_50GB, 2613 ICE_AQ_LINK_SPEED_50GB, 2614 ICE_AQ_LINK_SPEED_50GB, 2615 ICE_AQ_LINK_SPEED_50GB, 2616 ICE_AQ_LINK_SPEED_50GB, 2617 ICE_AQ_LINK_SPEED_50GB, 2618 ICE_AQ_LINK_SPEED_50GB, 2619 ICE_AQ_LINK_SPEED_50GB, 2620 ICE_AQ_LINK_SPEED_50GB, 2621 ICE_AQ_LINK_SPEED_100GB, 2622 ICE_AQ_LINK_SPEED_100GB, 2623 ICE_AQ_LINK_SPEED_100GB, 2624 ICE_AQ_LINK_SPEED_100GB, 2625 ICE_AQ_LINK_SPEED_100GB, 2626 ICE_AQ_LINK_SPEED_100GB, 2627 ICE_AQ_LINK_SPEED_100GB, 2628 ICE_AQ_LINK_SPEED_100GB, 2629 ICE_AQ_LINK_SPEED_100GB, 2630 ICE_AQ_LINK_SPEED_100GB, 2631 ICE_AQ_LINK_SPEED_100GB, 2632 ICE_AQ_LINK_SPEED_100GB, 2633 ICE_AQ_LINK_SPEED_100GB, 2634 /* These rates are for ICE_PHY_TYPE_HIGH_* */ 2635 ICE_AQ_LINK_SPEED_100GB, 2636 ICE_AQ_LINK_SPEED_100GB, 2637 ICE_AQ_LINK_SPEED_100GB, 2638 ICE_AQ_LINK_SPEED_100GB, 2639 ICE_AQ_LINK_SPEED_100GB 2640 }; 2641 2642 #define ICE_SYSCTL_HELP_ADVERTISE_SPEED \ 2643 "\nControl advertised link speed." \ 2644 "\nFlags:" \ 2645 "\n\t 0x0 - Auto" \ 2646 "\n\t 0x1 - 10 Mb" \ 2647 "\n\t 0x2 - 100 Mb" \ 2648 "\n\t 0x4 - 1G" \ 2649 "\n\t 0x8 - 2.5G" \ 2650 "\n\t 0x10 - 5G" \ 2651 "\n\t 0x20 - 10G" \ 2652 "\n\t 0x40 - 20G" \ 2653 "\n\t 0x80 - 25G" \ 2654 "\n\t 0x100 - 40G" \ 2655 "\n\t 0x200 - 50G" \ 2656 "\n\t 0x400 - 100G" \ 2657 "\n\t0x8000 - Unknown" \ 2658 "\n\t" \ 2659 "\nUse \"sysctl -x\" to view flags properly." 2660 2661 #define ICE_PHYS_100MB \ 2662 (ICE_PHY_TYPE_LOW_100BASE_TX | \ 2663 ICE_PHY_TYPE_LOW_100M_SGMII) 2664 #define ICE_PHYS_1000MB \ 2665 (ICE_PHY_TYPE_LOW_1000BASE_T | \ 2666 ICE_PHY_TYPE_LOW_1000BASE_SX | \ 2667 ICE_PHY_TYPE_LOW_1000BASE_LX | \ 2668 ICE_PHY_TYPE_LOW_1000BASE_KX | \ 2669 ICE_PHY_TYPE_LOW_1G_SGMII) 2670 #define ICE_PHYS_2500MB \ 2671 (ICE_PHY_TYPE_LOW_2500BASE_T | \ 2672 ICE_PHY_TYPE_LOW_2500BASE_X | \ 2673 ICE_PHY_TYPE_LOW_2500BASE_KX) 2674 #define ICE_PHYS_5GB \ 2675 (ICE_PHY_TYPE_LOW_5GBASE_T | \ 2676 ICE_PHY_TYPE_LOW_5GBASE_KR) 2677 #define ICE_PHYS_10GB \ 2678 (ICE_PHY_TYPE_LOW_10GBASE_T | \ 2679 ICE_PHY_TYPE_LOW_10G_SFI_DA | \ 2680 ICE_PHY_TYPE_LOW_10GBASE_SR | \ 2681 ICE_PHY_TYPE_LOW_10GBASE_LR | \ 2682 ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \ 2683 ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \ 2684 ICE_PHY_TYPE_LOW_10G_SFI_C2C) 2685 #define ICE_PHYS_25GB \ 2686 (ICE_PHY_TYPE_LOW_25GBASE_T | \ 2687 ICE_PHY_TYPE_LOW_25GBASE_CR | \ 2688 ICE_PHY_TYPE_LOW_25GBASE_CR_S | \ 2689 ICE_PHY_TYPE_LOW_25GBASE_CR1 | \ 2690 ICE_PHY_TYPE_LOW_25GBASE_SR | \ 2691 ICE_PHY_TYPE_LOW_25GBASE_LR | \ 2692 ICE_PHY_TYPE_LOW_25GBASE_KR | \ 2693 ICE_PHY_TYPE_LOW_25GBASE_KR_S | \ 2694 ICE_PHY_TYPE_LOW_25GBASE_KR1 | \ 2695 ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | \ 2696 ICE_PHY_TYPE_LOW_25G_AUI_C2C) 2697 #define ICE_PHYS_40GB \ 2698 (ICE_PHY_TYPE_LOW_40GBASE_CR4 | \ 2699 ICE_PHY_TYPE_LOW_40GBASE_SR4 | \ 2700 ICE_PHY_TYPE_LOW_40GBASE_LR4 | \ 2701 ICE_PHY_TYPE_LOW_40GBASE_KR4 | \ 2702 ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | \ 2703 ICE_PHY_TYPE_LOW_40G_XLAUI) 2704 #define ICE_PHYS_50GB \ 2705 (ICE_PHY_TYPE_LOW_50GBASE_CR2 | \ 2706 ICE_PHY_TYPE_LOW_50GBASE_SR2 | \ 2707 ICE_PHY_TYPE_LOW_50GBASE_LR2 | \ 2708 ICE_PHY_TYPE_LOW_50GBASE_KR2 | \ 2709 ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | \ 2710 ICE_PHY_TYPE_LOW_50G_LAUI2 | \ 2711 ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | \ 2712 ICE_PHY_TYPE_LOW_50G_AUI2 | \ 2713 ICE_PHY_TYPE_LOW_50GBASE_CP | \ 2714 ICE_PHY_TYPE_LOW_50GBASE_SR | \ 2715 ICE_PHY_TYPE_LOW_50GBASE_FR | \ 2716 ICE_PHY_TYPE_LOW_50GBASE_LR | \ 2717 ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 | \ 2718 ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | \ 2719 ICE_PHY_TYPE_LOW_50G_AUI1) 2720 #define ICE_PHYS_100GB_LOW \ 2721 (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \ 2722 ICE_PHY_TYPE_LOW_100GBASE_SR4 | \ 2723 ICE_PHY_TYPE_LOW_100GBASE_LR4 | \ 2724 ICE_PHY_TYPE_LOW_100GBASE_KR4 | \ 2725 ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \ 2726 ICE_PHY_TYPE_LOW_100G_CAUI4 | \ 2727 ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \ 2728 ICE_PHY_TYPE_LOW_100G_AUI4 | \ 2729 ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \ 2730 ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \ 2731 ICE_PHY_TYPE_LOW_100GBASE_CP2 | \ 2732 ICE_PHY_TYPE_LOW_100GBASE_SR2 | \ 2733 ICE_PHY_TYPE_LOW_100GBASE_DR) 2734 #define ICE_PHYS_100GB_HIGH \ 2735 (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \ 2736 ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | \ 2737 ICE_PHY_TYPE_HIGH_100G_CAUI2 | \ 2738 ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ 2739 ICE_PHY_TYPE_HIGH_100G_AUI2) 2740 2741 /** 2742 * ice_aq_phy_types_to_sysctl_speeds - Convert the PHY Types to speeds 2743 * @phy_type_low: lower 64-bit PHY Type bitmask 2744 * @phy_type_high: upper 64-bit PHY Type bitmask 2745 * 2746 * Convert the PHY Type fields from Get PHY Abilities and Set PHY Config into 2747 * link speed flags. If phy_type_high has an unknown PHY type, then the return 2748 * value will include the "ICE_AQ_LINK_SPEED_UNKNOWN" flag as well. 2749 */ 2750 static u16 2751 ice_aq_phy_types_to_sysctl_speeds(u64 phy_type_low, u64 phy_type_high) 2752 { 2753 u16 sysctl_speeds = 0; 2754 int bit; 2755 2756 /* coverity[address_of] */ 2757 for_each_set_bit(bit, &phy_type_low, 64) 2758 sysctl_speeds |= phy_link_speeds[bit]; 2759 2760 /* coverity[address_of] */ 2761 for_each_set_bit(bit, &phy_type_high, 64) { 2762 if ((bit + 64) < (int)ARRAY_SIZE(phy_link_speeds)) 2763 sysctl_speeds |= phy_link_speeds[bit + 64]; 2764 else 2765 sysctl_speeds |= ICE_AQ_LINK_SPEED_UNKNOWN; 2766 } 2767 2768 return (sysctl_speeds); 2769 } 2770 2771 /** 2772 * ice_sysctl_speeds_to_aq_phy_types - Convert sysctl speed flags to AQ PHY flags 2773 * @sysctl_speeds: 16-bit sysctl speeds or AQ_LINK_SPEED flags 2774 * @phy_type_low: output parameter for lower AQ PHY flags 2775 * @phy_type_high: output parameter for higher AQ PHY flags 2776 * 2777 * Converts the given link speed flags into AQ PHY type flag sets appropriate 2778 * for use in a Set PHY Config command. 2779 */ 2780 static void 2781 ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, 2782 u64 *phy_type_high) 2783 { 2784 *phy_type_low = 0, *phy_type_high = 0; 2785 2786 if (sysctl_speeds & ICE_AQ_LINK_SPEED_100MB) 2787 *phy_type_low |= ICE_PHYS_100MB; 2788 if (sysctl_speeds & ICE_AQ_LINK_SPEED_1000MB) 2789 *phy_type_low |= ICE_PHYS_1000MB; 2790 if (sysctl_speeds & ICE_AQ_LINK_SPEED_2500MB) 2791 *phy_type_low |= ICE_PHYS_2500MB; 2792 if (sysctl_speeds & ICE_AQ_LINK_SPEED_5GB) 2793 *phy_type_low |= ICE_PHYS_5GB; 2794 if (sysctl_speeds & ICE_AQ_LINK_SPEED_10GB) 2795 *phy_type_low |= ICE_PHYS_10GB; 2796 if (sysctl_speeds & ICE_AQ_LINK_SPEED_25GB) 2797 *phy_type_low |= ICE_PHYS_25GB; 2798 if (sysctl_speeds & ICE_AQ_LINK_SPEED_40GB) 2799 *phy_type_low |= ICE_PHYS_40GB; 2800 if (sysctl_speeds & ICE_AQ_LINK_SPEED_50GB) 2801 *phy_type_low |= ICE_PHYS_50GB; 2802 if (sysctl_speeds & ICE_AQ_LINK_SPEED_100GB) { 2803 *phy_type_low |= ICE_PHYS_100GB_LOW; 2804 *phy_type_high |= ICE_PHYS_100GB_HIGH; 2805 } 2806 } 2807 2808 /** 2809 * ice_intersect_media_types_with_caps - Restrict input AQ PHY flags 2810 * @sc: driver private structure 2811 * @phy_type_low: input/output flag set for low PHY types 2812 * @phy_type_high: input/output flag set for high PHY types 2813 * 2814 * Intersects the input PHY flags with PHY flags retrieved from the adapter to 2815 * ensure the flags are compatible. 2816 * 2817 * @returns 0 on success, EIO if an AQ command fails, or EINVAL if input PHY 2818 * types have no intersection with TOPO_CAPS and the adapter is in non-lenient 2819 * mode 2820 */ 2821 static int 2822 ice_intersect_media_types_with_caps(struct ice_softc *sc, u64 *phy_type_low, 2823 u64 *phy_type_high) 2824 { 2825 device_t dev = sc->dev; 2826 enum ice_status status; 2827 2828 u64 new_phy_low, new_phy_high; 2829 2830 status = ice_get_phy_types(sc, &new_phy_low, &new_phy_high); 2831 if (status != ICE_SUCCESS) { 2832 /* Function already prints appropriate error message */ 2833 return (EIO); 2834 } 2835 2836 ice_apply_supported_speed_filter(&new_phy_low, &new_phy_high); 2837 2838 new_phy_low &= *phy_type_low; 2839 new_phy_high &= *phy_type_high; 2840 2841 if (new_phy_low == 0 && new_phy_high == 0) { 2842 device_printf(dev, 2843 "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); 2844 return (EINVAL); 2845 } 2846 2847 /* Overwrite input phy_type values and return */ 2848 *phy_type_low = new_phy_low; 2849 *phy_type_high = new_phy_high; 2850 2851 return (0); 2852 } 2853 2854 /** 2855 * ice_get_auto_speeds - Get PHY type flags for "auto" speed 2856 * @sc: driver private structure 2857 * @phy_type_low: output low PHY type flags 2858 * @phy_type_high: output high PHY type flags 2859 * 2860 * Retrieves a suitable set of PHY type flags to use for an "auto" speed 2861 * setting by either using the NVM default overrides for speed, or retrieving 2862 * a default from the adapter using Get PHY capabilities in TOPO_CAPS mode. 2863 * 2864 * @returns 0 on success or EIO on AQ command failure 2865 */ 2866 static int 2867 ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low, 2868 u64 *phy_type_high) 2869 { 2870 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 2871 struct ice_hw *hw = &sc->hw; 2872 struct ice_port_info *pi = hw->port_info; 2873 device_t dev = sc->dev; 2874 enum ice_status status; 2875 2876 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_DEFAULT_OVERRIDE)) { 2877 /* copy over speed settings from LDO TLV */ 2878 *phy_type_low = CPU_TO_LE64(sc->ldo_tlv.phy_type_low); 2879 *phy_type_high = CPU_TO_LE64(sc->ldo_tlv.phy_type_high); 2880 } else { 2881 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 2882 &pcaps, NULL); 2883 if (status != ICE_SUCCESS) { 2884 device_printf(dev, 2885 "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", 2886 __func__, ice_status_str(status), 2887 ice_aq_str(hw->adminq.sq_last_status)); 2888 return (EIO); 2889 } 2890 2891 *phy_type_low = le64toh(pcaps.phy_type_low); 2892 *phy_type_high = le64toh(pcaps.phy_type_high); 2893 } 2894 2895 return (0); 2896 } 2897 2898 /** 2899 * ice_sysctl_advertise_speed - Display/change link speeds supported by port 2900 * @oidp: sysctl oid structure 2901 * @arg1: pointer to private data structure 2902 * @arg2: unused 2903 * @req: sysctl request pointer 2904 * 2905 * On read: Displays the currently supported speeds 2906 * On write: Sets the device's supported speeds 2907 * Valid input flags: see ICE_SYSCTL_HELP_ADVERTISE_SPEED 2908 */ 2909 static int 2910 ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS) 2911 { 2912 struct ice_softc *sc = (struct ice_softc *)arg1; 2913 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 2914 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 2915 struct ice_hw *hw = &sc->hw; 2916 struct ice_port_info *pi = hw->port_info; 2917 device_t dev = sc->dev; 2918 enum ice_status status; 2919 u64 phy_low, phy_high; 2920 u16 sysctl_speeds = 0; 2921 int error = 0; 2922 2923 UNREFERENCED_PARAMETER(arg2); 2924 2925 if (ice_driver_is_detaching(sc)) 2926 return (ESHUTDOWN); 2927 2928 /* Get the current speeds from the adapter's "active" configuration. */ 2929 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 2930 &pcaps, NULL); 2931 if (status != ICE_SUCCESS) { 2932 device_printf(dev, 2933 "%s: ice_aq_get_phy_caps (SW_CFG) failed; status %s, aq_err %s\n", 2934 __func__, ice_status_str(status), 2935 ice_aq_str(hw->adminq.sq_last_status)); 2936 return (EIO); 2937 } 2938 2939 phy_low = le64toh(pcaps.phy_type_low); 2940 phy_high = le64toh(pcaps.phy_type_high); 2941 sysctl_speeds = ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); 2942 2943 error = sysctl_handle_16(oidp, &sysctl_speeds, 0, req); 2944 if ((error) || (req->newptr == NULL)) 2945 return (error); 2946 2947 if (sysctl_speeds > 0x7FF) { 2948 device_printf(dev, 2949 "%s: \"%u\" is outside of the range of acceptable values.\n", 2950 __func__, sysctl_speeds); 2951 return (EINVAL); 2952 } 2953 2954 /* 0 is treated as "Auto"; the driver will handle selecting the correct speeds, 2955 * or apply an override if one is specified in the NVM. 2956 */ 2957 if (sysctl_speeds == 0) { 2958 error = ice_get_auto_speeds(sc, &phy_low, &phy_high); 2959 if (error) 2960 /* Function already prints appropriate error message */ 2961 return (error); 2962 } else { 2963 ice_sysctl_speeds_to_aq_phy_types(sysctl_speeds, &phy_low, &phy_high); 2964 error = ice_intersect_media_types_with_caps(sc, &phy_low, &phy_high); 2965 if (error) 2966 /* Function already prints appropriate error message */ 2967 return (error); 2968 } 2969 sysctl_speeds = ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); 2970 2971 /* Cache new user setting for speeds */ 2972 pi->phy.curr_user_speed_req = sysctl_speeds; 2973 2974 /* Setup new PHY config with new input PHY types */ 2975 ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); 2976 2977 cfg.phy_type_low = phy_low; 2978 cfg.phy_type_high = phy_high; 2979 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2980 2981 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 2982 if (status != ICE_SUCCESS) { 2983 /* Don't indicate failure if there's no media in the port -- the sysctl 2984 * handler has saved the value and will apply it when media is inserted. 2985 */ 2986 if (status == ICE_ERR_AQ_ERROR && 2987 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 2988 device_printf(dev, 2989 "%s: Setting will be applied when media is inserted\n", __func__); 2990 return (0); 2991 } else { 2992 device_printf(dev, 2993 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 2994 __func__, ice_status_str(status), 2995 ice_aq_str(hw->adminq.sq_last_status)); 2996 return (EIO); 2997 } 2998 } 2999 3000 return (0); 3001 } 3002 3003 #define ICE_SYSCTL_HELP_FEC_CONFIG \ 3004 "\nDisplay or set the port's requested FEC mode." \ 3005 "\n\tauto - " ICE_FEC_STRING_AUTO \ 3006 "\n\tfc - " ICE_FEC_STRING_BASER \ 3007 "\n\trs - " ICE_FEC_STRING_RS \ 3008 "\n\tnone - " ICE_FEC_STRING_NONE \ 3009 "\nEither of the left or right strings above can be used to set the requested mode." 3010 3011 /** 3012 * ice_sysctl_fec_config - Display/change the configured FEC mode 3013 * @oidp: sysctl oid structure 3014 * @arg1: pointer to private data structure 3015 * @arg2: unused 3016 * @req: sysctl request pointer 3017 * 3018 * On read: Displays the configured FEC mode 3019 * On write: Sets the device's FEC mode to the input string, if it's valid. 3020 * Valid input strings: see ICE_SYSCTL_HELP_FEC_CONFIG 3021 */ 3022 static int 3023 ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS) 3024 { 3025 struct ice_softc *sc = (struct ice_softc *)arg1; 3026 struct ice_port_info *pi = sc->hw.port_info; 3027 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3028 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3029 struct ice_hw *hw = &sc->hw; 3030 enum ice_fec_mode new_mode; 3031 enum ice_status status; 3032 device_t dev = sc->dev; 3033 char req_fec[32]; 3034 int error = 0; 3035 3036 UNREFERENCED_PARAMETER(arg2); 3037 3038 if (ice_driver_is_detaching(sc)) 3039 return (ESHUTDOWN); 3040 3041 bzero(req_fec, sizeof(req_fec)); 3042 strlcpy(req_fec, ice_requested_fec_mode(pi), sizeof(req_fec)); 3043 3044 error = sysctl_handle_string(oidp, req_fec, sizeof(req_fec), req); 3045 if ((error) || (req->newptr == NULL)) 3046 return (error); 3047 3048 if (strcmp(req_fec, "auto") == 0 || 3049 strcmp(req_fec, ice_fec_str(ICE_FEC_AUTO)) == 0) { 3050 new_mode = ICE_FEC_AUTO; 3051 } else if (strcmp(req_fec, "fc") == 0 || 3052 strcmp(req_fec, ice_fec_str(ICE_FEC_BASER)) == 0) { 3053 new_mode = ICE_FEC_BASER; 3054 } else if (strcmp(req_fec, "rs") == 0 || 3055 strcmp(req_fec, ice_fec_str(ICE_FEC_RS)) == 0) { 3056 new_mode = ICE_FEC_RS; 3057 } else if (strcmp(req_fec, "none") == 0 || 3058 strcmp(req_fec, ice_fec_str(ICE_FEC_NONE)) == 0) { 3059 new_mode = ICE_FEC_NONE; 3060 } else { 3061 device_printf(dev, 3062 "%s: \"%s\" is not a valid FEC mode\n", 3063 __func__, req_fec); 3064 return (EINVAL); 3065 } 3066 3067 /* Cache user FEC mode for later link ups */ 3068 pi->phy.curr_user_fec_req = new_mode; 3069 3070 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 3071 &pcaps, NULL); 3072 if (status != ICE_SUCCESS) { 3073 device_printf(dev, 3074 "%s: ice_aq_get_phy_caps failed (SW_CFG); status %s, aq_err %s\n", 3075 __func__, ice_status_str(status), 3076 ice_aq_str(hw->adminq.sq_last_status)); 3077 return (EIO); 3078 } 3079 3080 ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); 3081 3082 /* Get link_fec_opt/AUTO_FEC mode from TOPO caps for base for new FEC mode */ 3083 memset(&pcaps, 0, sizeof(pcaps)); 3084 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 3085 &pcaps, NULL); 3086 if (status != ICE_SUCCESS) { 3087 device_printf(dev, 3088 "%s: ice_aq_get_phy_caps failed (TOPO_CAP); status %s, aq_err %s\n", 3089 __func__, ice_status_str(status), 3090 ice_aq_str(hw->adminq.sq_last_status)); 3091 return (EIO); 3092 } 3093 3094 /* Configure new FEC options using TOPO caps */ 3095 cfg.link_fec_opt = pcaps.link_fec_options; 3096 cfg.caps &= ~ICE_AQ_PHY_ENA_AUTO_FEC; 3097 if (pcaps.caps & ICE_AQC_PHY_EN_AUTO_FEC) 3098 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_FEC; 3099 3100 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_DEFAULT_OVERRIDE) && 3101 new_mode == ICE_FEC_AUTO) { 3102 /* copy over FEC settings from LDO TLV */ 3103 cfg.link_fec_opt = sc->ldo_tlv.fec_options; 3104 } else { 3105 ice_cfg_phy_fec(pi, &cfg, new_mode); 3106 3107 /* Check if the new mode is valid, and exit with an error if not */ 3108 if (cfg.link_fec_opt && 3109 !(cfg.link_fec_opt & pcaps.link_fec_options)) { 3110 device_printf(dev, 3111 "%s: The requested FEC mode, %s, is not supported by current media\n", 3112 __func__, ice_fec_str(new_mode)); 3113 return (ENOTSUP); 3114 } 3115 } 3116 3117 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3118 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3119 if (status != ICE_SUCCESS) { 3120 /* Don't indicate failure if there's no media in the port -- the sysctl 3121 * handler has saved the value and will apply it when media is inserted. 3122 */ 3123 if (status == ICE_ERR_AQ_ERROR && 3124 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 3125 device_printf(dev, 3126 "%s: Setting will be applied when media is inserted\n", __func__); 3127 return (0); 3128 } else { 3129 device_printf(dev, 3130 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 3131 __func__, ice_status_str(status), 3132 ice_aq_str(hw->adminq.sq_last_status)); 3133 return (EIO); 3134 } 3135 } 3136 3137 return (0); 3138 } 3139 3140 /** 3141 * ice_sysctl_negotiated_fec - Display the negotiated FEC mode on the link 3142 * @oidp: sysctl oid structure 3143 * @arg1: pointer to private data structure 3144 * @arg2: unused 3145 * @req: sysctl request pointer 3146 * 3147 * On read: Displays the negotiated FEC mode, in a string 3148 */ 3149 static int 3150 ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS) 3151 { 3152 struct ice_softc *sc = (struct ice_softc *)arg1; 3153 struct ice_hw *hw = &sc->hw; 3154 char neg_fec[32]; 3155 int error; 3156 3157 UNREFERENCED_PARAMETER(arg2); 3158 3159 if (ice_driver_is_detaching(sc)) 3160 return (ESHUTDOWN); 3161 3162 /* Copy const string into a buffer to drop const qualifier */ 3163 bzero(neg_fec, sizeof(neg_fec)); 3164 strlcpy(neg_fec, ice_negotiated_fec_mode(hw->port_info), sizeof(neg_fec)); 3165 3166 error = sysctl_handle_string(oidp, neg_fec, 0, req); 3167 if (req->newptr != NULL) 3168 return (EPERM); 3169 3170 return (error); 3171 } 3172 3173 #define ICE_SYSCTL_HELP_FC_CONFIG \ 3174 "\nDisplay or set the port's advertised flow control mode.\n" \ 3175 "\t0 - " ICE_FC_STRING_NONE \ 3176 "\n\t1 - " ICE_FC_STRING_RX \ 3177 "\n\t2 - " ICE_FC_STRING_TX \ 3178 "\n\t3 - " ICE_FC_STRING_FULL \ 3179 "\nEither the numbers or the strings above can be used to set the advertised mode." 3180 3181 /** 3182 * ice_sysctl_fc_config - Display/change the advertised flow control mode 3183 * @oidp: sysctl oid structure 3184 * @arg1: pointer to private data structure 3185 * @arg2: unused 3186 * @req: sysctl request pointer 3187 * 3188 * On read: Displays the configured flow control mode 3189 * On write: Sets the device's flow control mode to the input, if it's valid. 3190 * Valid input strings: see ICE_SYSCTL_HELP_FC_CONFIG 3191 */ 3192 static int 3193 ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS) 3194 { 3195 struct ice_softc *sc = (struct ice_softc *)arg1; 3196 struct ice_port_info *pi = sc->hw.port_info; 3197 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3198 enum ice_fc_mode old_mode, new_mode; 3199 struct ice_hw *hw = &sc->hw; 3200 device_t dev = sc->dev; 3201 enum ice_status status; 3202 int error = 0, fc_num; 3203 bool mode_set = false; 3204 struct sbuf buf; 3205 char *fc_str_end; 3206 char fc_str[32]; 3207 u8 aq_failures; 3208 3209 UNREFERENCED_PARAMETER(arg2); 3210 3211 if (ice_driver_is_detaching(sc)) 3212 return (ESHUTDOWN); 3213 3214 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 3215 &pcaps, NULL); 3216 if (status != ICE_SUCCESS) { 3217 device_printf(dev, 3218 "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", 3219 __func__, ice_status_str(status), 3220 ice_aq_str(hw->adminq.sq_last_status)); 3221 return (EIO); 3222 } 3223 3224 /* Convert HW response format to SW enum value */ 3225 if ((pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && 3226 (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) 3227 old_mode = ICE_FC_FULL; 3228 else if (pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3229 old_mode = ICE_FC_TX_PAUSE; 3230 else if (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3231 old_mode = ICE_FC_RX_PAUSE; 3232 else 3233 old_mode = ICE_FC_NONE; 3234 3235 /* Create "old" string for output */ 3236 bzero(fc_str, sizeof(fc_str)); 3237 sbuf_new_for_sysctl(&buf, fc_str, sizeof(fc_str), req); 3238 sbuf_printf(&buf, "%d<%s>", old_mode, ice_fc_str(old_mode)); 3239 sbuf_finish(&buf); 3240 sbuf_delete(&buf); 3241 3242 error = sysctl_handle_string(oidp, fc_str, sizeof(fc_str), req); 3243 if ((error) || (req->newptr == NULL)) 3244 return (error); 3245 3246 /* Try to parse input as a string, first */ 3247 if (strcasecmp(ice_fc_str(ICE_FC_FULL), fc_str) == 0) { 3248 new_mode = ICE_FC_FULL; 3249 mode_set = true; 3250 } 3251 else if (strcasecmp(ice_fc_str(ICE_FC_TX_PAUSE), fc_str) == 0) { 3252 new_mode = ICE_FC_TX_PAUSE; 3253 mode_set = true; 3254 } 3255 else if (strcasecmp(ice_fc_str(ICE_FC_RX_PAUSE), fc_str) == 0) { 3256 new_mode = ICE_FC_RX_PAUSE; 3257 mode_set = true; 3258 } 3259 else if (strcasecmp(ice_fc_str(ICE_FC_NONE), fc_str) == 0) { 3260 new_mode = ICE_FC_NONE; 3261 mode_set = true; 3262 } 3263 3264 /* 3265 * Then check if it's an integer, for compatibility with the method 3266 * used in older drivers. 3267 */ 3268 if (!mode_set) { 3269 fc_num = strtol(fc_str, &fc_str_end, 0); 3270 if (fc_str_end == fc_str) 3271 fc_num = -1; 3272 switch (fc_num) { 3273 case 3: 3274 new_mode = ICE_FC_FULL; 3275 break; 3276 case 2: 3277 new_mode = ICE_FC_TX_PAUSE; 3278 break; 3279 case 1: 3280 new_mode = ICE_FC_RX_PAUSE; 3281 break; 3282 case 0: 3283 new_mode = ICE_FC_NONE; 3284 break; 3285 default: 3286 device_printf(dev, 3287 "%s: \"%s\" is not a valid flow control mode\n", 3288 __func__, fc_str); 3289 return (EINVAL); 3290 } 3291 } 3292 3293 /* Finally, set the flow control mode in FW */ 3294 hw->port_info->fc.req_mode = new_mode; 3295 status = ice_set_fc(pi, &aq_failures, true); 3296 if (status != ICE_SUCCESS) { 3297 /* Don't indicate failure if there's no media in the port -- the sysctl 3298 * handler has saved the value and will apply it when media is inserted. 3299 */ 3300 if (aq_failures == ICE_SET_FC_AQ_FAIL_SET && 3301 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 3302 device_printf(dev, 3303 "%s: Setting will be applied when media is inserted\n", __func__); 3304 return (0); 3305 } else { 3306 device_printf(dev, 3307 "%s: ice_set_fc AQ failure = %d\n", __func__, aq_failures); 3308 return (EIO); 3309 } 3310 } 3311 3312 return (0); 3313 } 3314 3315 /** 3316 * ice_sysctl_negotiated_fc - Display currently negotiated FC mode 3317 * @oidp: sysctl oid structure 3318 * @arg1: pointer to private data structure 3319 * @arg2: unused 3320 * @req: sysctl request pointer 3321 * 3322 * On read: Displays the currently negotiated flow control settings. 3323 * 3324 * If link is not established, this will report ICE_FC_NONE, as no flow 3325 * control is negotiated while link is down. 3326 */ 3327 static int 3328 ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS) 3329 { 3330 struct ice_softc *sc = (struct ice_softc *)arg1; 3331 struct ice_port_info *pi = sc->hw.port_info; 3332 const char *negotiated_fc; 3333 3334 UNREFERENCED_PARAMETER(arg2); 3335 3336 if (ice_driver_is_detaching(sc)) 3337 return (ESHUTDOWN); 3338 3339 negotiated_fc = ice_flowcontrol_mode(pi); 3340 3341 return sysctl_handle_string(oidp, __DECONST(char *, negotiated_fc), 0, req); 3342 } 3343 3344 /** 3345 * __ice_sysctl_phy_type_handler - Display/change supported PHY types/speeds 3346 * @oidp: sysctl oid structure 3347 * @arg1: pointer to private data structure 3348 * @arg2: unused 3349 * @req: sysctl request pointer 3350 * @is_phy_type_high: if true, handle the high PHY type instead of the low PHY type 3351 * 3352 * Private handler for phy_type_high and phy_type_low sysctls. 3353 */ 3354 static int 3355 __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high) 3356 { 3357 struct ice_softc *sc = (struct ice_softc *)arg1; 3358 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3359 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3360 struct ice_hw *hw = &sc->hw; 3361 device_t dev = sc->dev; 3362 enum ice_status status; 3363 uint64_t types; 3364 int error = 0; 3365 3366 UNREFERENCED_PARAMETER(arg2); 3367 3368 if (ice_driver_is_detaching(sc)) 3369 return (ESHUTDOWN); 3370 3371 status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_SW_CFG, 3372 &pcaps, NULL); 3373 if (status != ICE_SUCCESS) { 3374 device_printf(dev, 3375 "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", 3376 __func__, ice_status_str(status), 3377 ice_aq_str(hw->adminq.sq_last_status)); 3378 return (EIO); 3379 } 3380 3381 if (is_phy_type_high) 3382 types = pcaps.phy_type_high; 3383 else 3384 types = pcaps.phy_type_low; 3385 3386 error = sysctl_handle_64(oidp, &types, sizeof(types), req); 3387 if ((error) || (req->newptr == NULL)) 3388 return (error); 3389 3390 ice_copy_phy_caps_to_cfg(hw->port_info, &pcaps, &cfg); 3391 3392 if (is_phy_type_high) 3393 cfg.phy_type_high = types & hw->port_info->phy.phy_type_high; 3394 else 3395 cfg.phy_type_low = types & hw->port_info->phy.phy_type_low; 3396 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3397 3398 status = ice_aq_set_phy_cfg(hw, hw->port_info, &cfg, NULL); 3399 if (status != ICE_SUCCESS) { 3400 device_printf(dev, 3401 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 3402 __func__, ice_status_str(status), 3403 ice_aq_str(hw->adminq.sq_last_status)); 3404 return (EIO); 3405 } 3406 3407 return (0); 3408 3409 } 3410 3411 /** 3412 * ice_sysctl_phy_type_low - Display/change supported lower PHY types/speeds 3413 * @oidp: sysctl oid structure 3414 * @arg1: pointer to private data structure 3415 * @arg2: unused 3416 * @req: sysctl request pointer 3417 * 3418 * On read: Displays the currently supported lower PHY types 3419 * On write: Sets the device's supported low PHY types 3420 */ 3421 static int 3422 ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS) 3423 { 3424 return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, false); 3425 } 3426 3427 /** 3428 * ice_sysctl_phy_type_high - Display/change supported higher PHY types/speeds 3429 * @oidp: sysctl oid structure 3430 * @arg1: pointer to private data structure 3431 * @arg2: unused 3432 * @req: sysctl request pointer 3433 * 3434 * On read: Displays the currently supported higher PHY types 3435 * On write: Sets the device's supported high PHY types 3436 */ 3437 static int 3438 ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS) 3439 { 3440 return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, true); 3441 } 3442 3443 /** 3444 * ice_sysctl_phy_caps - Display response from Get PHY abililties 3445 * @oidp: sysctl oid structure 3446 * @arg1: pointer to private data structure 3447 * @arg2: unused 3448 * @req: sysctl request pointer 3449 * @report_mode: the mode to report 3450 * 3451 * On read: Display the response from Get PHY abillities with the given report 3452 * mode. 3453 */ 3454 static int 3455 ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode) 3456 { 3457 struct ice_softc *sc = (struct ice_softc *)arg1; 3458 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 3459 struct ice_hw *hw = &sc->hw; 3460 struct ice_port_info *pi = hw->port_info; 3461 device_t dev = sc->dev; 3462 enum ice_status status; 3463 int error; 3464 3465 UNREFERENCED_PARAMETER(arg2); 3466 3467 error = priv_check(curthread, PRIV_DRIVER); 3468 if (error) 3469 return (error); 3470 3471 if (ice_driver_is_detaching(sc)) 3472 return (ESHUTDOWN); 3473 3474 status = ice_aq_get_phy_caps(pi, true, report_mode, &pcaps, NULL); 3475 if (status != ICE_SUCCESS) { 3476 device_printf(dev, 3477 "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", 3478 __func__, ice_status_str(status), 3479 ice_aq_str(hw->adminq.sq_last_status)); 3480 return (EIO); 3481 } 3482 3483 error = sysctl_handle_opaque(oidp, &pcaps, sizeof(pcaps), req); 3484 if (req->newptr != NULL) 3485 return (EPERM); 3486 3487 return (error); 3488 } 3489 3490 /** 3491 * ice_sysctl_phy_sw_caps - Display response from Get PHY abililties 3492 * @oidp: sysctl oid structure 3493 * @arg1: pointer to private data structure 3494 * @arg2: unused 3495 * @req: sysctl request pointer 3496 * 3497 * On read: Display the response from Get PHY abillities reporting the last 3498 * software configuration. 3499 */ 3500 static int 3501 ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS) 3502 { 3503 return ice_sysctl_phy_caps(oidp, arg1, arg2, req, 3504 ICE_AQC_REPORT_SW_CFG); 3505 } 3506 3507 /** 3508 * ice_sysctl_phy_nvm_caps - Display response from Get PHY abililties 3509 * @oidp: sysctl oid structure 3510 * @arg1: pointer to private data structure 3511 * @arg2: unused 3512 * @req: sysctl request pointer 3513 * 3514 * On read: Display the response from Get PHY abillities reporting the NVM 3515 * configuration. 3516 */ 3517 static int 3518 ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS) 3519 { 3520 return ice_sysctl_phy_caps(oidp, arg1, arg2, req, 3521 ICE_AQC_REPORT_NVM_CAP); 3522 } 3523 3524 /** 3525 * ice_sysctl_phy_topo_caps - Display response from Get PHY abililties 3526 * @oidp: sysctl oid structure 3527 * @arg1: pointer to private data structure 3528 * @arg2: unused 3529 * @req: sysctl request pointer 3530 * 3531 * On read: Display the response from Get PHY abillities reporting the 3532 * topology configuration. 3533 */ 3534 static int 3535 ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS) 3536 { 3537 return ice_sysctl_phy_caps(oidp, arg1, arg2, req, 3538 ICE_AQC_REPORT_TOPO_CAP); 3539 } 3540 3541 /** 3542 * ice_sysctl_phy_link_status - Display response from Get Link Status 3543 * @oidp: sysctl oid structure 3544 * @arg1: pointer to private data structure 3545 * @arg2: unused 3546 * @req: sysctl request pointer 3547 * 3548 * On read: Display the response from firmware for the Get Link Status 3549 * request. 3550 */ 3551 static int 3552 ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS) 3553 { 3554 struct ice_aqc_get_link_status_data link_data = { 0 }; 3555 struct ice_softc *sc = (struct ice_softc *)arg1; 3556 struct ice_hw *hw = &sc->hw; 3557 struct ice_port_info *pi = hw->port_info; 3558 struct ice_aqc_get_link_status *resp; 3559 struct ice_aq_desc desc; 3560 device_t dev = sc->dev; 3561 enum ice_status status; 3562 int error; 3563 3564 UNREFERENCED_PARAMETER(arg2); 3565 3566 /* 3567 * Ensure that only contexts with driver privilege are allowed to 3568 * access this information 3569 */ 3570 error = priv_check(curthread, PRIV_DRIVER); 3571 if (error) 3572 return (error); 3573 3574 if (ice_driver_is_detaching(sc)) 3575 return (ESHUTDOWN); 3576 3577 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 3578 resp = &desc.params.get_link_status; 3579 resp->lport_num = pi->lport; 3580 3581 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), NULL); 3582 if (status != ICE_SUCCESS) { 3583 device_printf(dev, 3584 "%s: ice_aq_send_cmd failed; status %s, aq_err %s\n", 3585 __func__, ice_status_str(status), 3586 ice_aq_str(hw->adminq.sq_last_status)); 3587 return (EIO); 3588 } 3589 3590 error = sysctl_handle_opaque(oidp, &link_data, sizeof(link_data), req); 3591 if (req->newptr != NULL) 3592 return (EPERM); 3593 3594 return (error); 3595 } 3596 3597 /** 3598 * ice_sysctl_fw_cur_lldp_persist_status - Display current FW LLDP status 3599 * @oidp: sysctl oid structure 3600 * @arg1: pointer to private softc structure 3601 * @arg2: unused 3602 * @req: sysctl request pointer 3603 * 3604 * On read: Displays current persistent LLDP status. 3605 */ 3606 static int 3607 ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS) 3608 { 3609 struct ice_softc *sc = (struct ice_softc *)arg1; 3610 struct ice_hw *hw = &sc->hw; 3611 device_t dev = sc->dev; 3612 enum ice_status status; 3613 struct sbuf *sbuf; 3614 u32 lldp_state; 3615 3616 UNREFERENCED_PARAMETER(arg2); 3617 UNREFERENCED_PARAMETER(oidp); 3618 3619 if (ice_driver_is_detaching(sc)) 3620 return (ESHUTDOWN); 3621 3622 status = ice_get_cur_lldp_persist_status(hw, &lldp_state); 3623 if (status) { 3624 device_printf(dev, 3625 "Could not acquire current LLDP persistence status, err %s aq_err %s\n", 3626 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 3627 return (EIO); 3628 } 3629 3630 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3631 sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state)); 3632 sbuf_finish(sbuf); 3633 sbuf_delete(sbuf); 3634 3635 return (0); 3636 } 3637 3638 /** 3639 * ice_sysctl_fw_dflt_lldp_persist_status - Display default FW LLDP status 3640 * @oidp: sysctl oid structure 3641 * @arg1: pointer to private softc structure 3642 * @arg2: unused 3643 * @req: sysctl request pointer 3644 * 3645 * On read: Displays default persistent LLDP status. 3646 */ 3647 static int 3648 ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS) 3649 { 3650 struct ice_softc *sc = (struct ice_softc *)arg1; 3651 struct ice_hw *hw = &sc->hw; 3652 device_t dev = sc->dev; 3653 enum ice_status status; 3654 struct sbuf *sbuf; 3655 u32 lldp_state; 3656 3657 UNREFERENCED_PARAMETER(arg2); 3658 UNREFERENCED_PARAMETER(oidp); 3659 3660 if (ice_driver_is_detaching(sc)) 3661 return (ESHUTDOWN); 3662 3663 status = ice_get_dflt_lldp_persist_status(hw, &lldp_state); 3664 if (status) { 3665 device_printf(dev, 3666 "Could not acquire default LLDP persistence status, err %s aq_err %s\n", 3667 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 3668 return (EIO); 3669 } 3670 3671 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 3672 sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state)); 3673 sbuf_finish(sbuf); 3674 sbuf_delete(sbuf); 3675 3676 return (0); 3677 } 3678 3679 #define ICE_SYSCTL_HELP_FW_LLDP_AGENT \ 3680 "\nDisplay or change FW LLDP agent state:" \ 3681 "\n\t0 - disabled" \ 3682 "\n\t1 - enabled" 3683 3684 /** 3685 * ice_sysctl_fw_lldp_agent - Display or change the FW LLDP agent status 3686 * @oidp: sysctl oid structure 3687 * @arg1: pointer to private softc structure 3688 * @arg2: unused 3689 * @req: sysctl request pointer 3690 * 3691 * On read: Displays whether the FW LLDP agent is running 3692 * On write: Persistently enables or disables the FW LLDP agent 3693 */ 3694 static int 3695 ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS) 3696 { 3697 struct ice_softc *sc = (struct ice_softc *)arg1; 3698 struct ice_hw *hw = &sc->hw; 3699 device_t dev = sc->dev; 3700 enum ice_status status; 3701 int error = 0; 3702 u32 old_state; 3703 u8 fw_lldp_enabled; 3704 bool retried_start_lldp = false; 3705 3706 UNREFERENCED_PARAMETER(arg2); 3707 3708 if (ice_driver_is_detaching(sc)) 3709 return (ESHUTDOWN); 3710 3711 status = ice_get_cur_lldp_persist_status(hw, &old_state); 3712 if (status) { 3713 device_printf(dev, 3714 "Could not acquire current LLDP persistence status, err %s aq_err %s\n", 3715 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 3716 return (EIO); 3717 } 3718 3719 if (old_state > ICE_LLDP_ADMINSTATUS_ENA_RXTX) { 3720 status = ice_get_dflt_lldp_persist_status(hw, &old_state); 3721 if (status) { 3722 device_printf(dev, 3723 "Could not acquire default LLDP persistence status, err %s aq_err %s\n", 3724 ice_status_str(status), 3725 ice_aq_str(hw->adminq.sq_last_status)); 3726 return (EIO); 3727 } 3728 } 3729 if (old_state == 0) 3730 fw_lldp_enabled = false; 3731 else 3732 fw_lldp_enabled = true; 3733 3734 error = sysctl_handle_bool(oidp, &fw_lldp_enabled, 0, req); 3735 if ((error) || (req->newptr == NULL)) 3736 return (error); 3737 3738 if (old_state == 0 && fw_lldp_enabled == false) 3739 return (0); 3740 3741 if (old_state != 0 && fw_lldp_enabled == true) 3742 return (0); 3743 3744 if (fw_lldp_enabled == false) { 3745 status = ice_aq_stop_lldp(hw, true, true, NULL); 3746 /* EPERM is returned if the LLDP agent is already shutdown */ 3747 if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EPERM) { 3748 device_printf(dev, 3749 "%s: ice_aq_stop_lldp failed; status %s, aq_err %s\n", 3750 __func__, ice_status_str(status), 3751 ice_aq_str(hw->adminq.sq_last_status)); 3752 return (EIO); 3753 } 3754 ice_aq_set_dcb_parameters(hw, true, NULL); 3755 hw->port_info->is_sw_lldp = true; 3756 ice_add_rx_lldp_filter(sc); 3757 } else { 3758 retry_start_lldp: 3759 status = ice_aq_start_lldp(hw, true, NULL); 3760 if (status) { 3761 switch (hw->adminq.sq_last_status) { 3762 /* EEXIST is returned if the LLDP agent is already started */ 3763 case ICE_AQ_RC_EEXIST: 3764 break; 3765 case ICE_AQ_RC_EAGAIN: 3766 /* Retry command after a 2 second wait */ 3767 if (retried_start_lldp == false) { 3768 retried_start_lldp = true; 3769 pause("slldp", ICE_START_LLDP_RETRY_WAIT); 3770 goto retry_start_lldp; 3771 } 3772 /* Fallthrough */ 3773 default: 3774 device_printf(dev, 3775 "%s: ice_aq_start_lldp failed; status %s, aq_err %s\n", 3776 __func__, ice_status_str(status), 3777 ice_aq_str(hw->adminq.sq_last_status)); 3778 return (EIO); 3779 } 3780 } 3781 hw->port_info->is_sw_lldp = false; 3782 ice_del_rx_lldp_filter(sc); 3783 } 3784 3785 return (error); 3786 } 3787 3788 /** 3789 * ice_add_device_sysctls - add device specific dynamic sysctls 3790 * @sc: device private structure 3791 * 3792 * Add per-device dynamic sysctls which show device configuration or enable 3793 * configuring device functionality. For tunable values which can be set prior 3794 * to load, see ice_add_device_tunables. 3795 * 3796 * This function depends on the sysctl layout setup by ice_add_device_tunables, 3797 * and likely should be called near the end of the attach process. 3798 */ 3799 void 3800 ice_add_device_sysctls(struct ice_softc *sc) 3801 { 3802 struct sysctl_oid *hw_node; 3803 device_t dev = sc->dev; 3804 3805 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 3806 struct sysctl_oid_list *ctx_list = 3807 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 3808 3809 SYSCTL_ADD_PROC(ctx, ctx_list, 3810 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, 3811 sc, 0, ice_sysctl_show_fw, "A", "Firmware version"); 3812 3813 SYSCTL_ADD_PROC(ctx, ctx_list, 3814 OID_AUTO, "pba_number", CTLTYPE_STRING | CTLFLAG_RD, 3815 sc, 0, ice_sysctl_pba_number, "A", "Product Board Assembly Number"); 3816 3817 SYSCTL_ADD_PROC(ctx, ctx_list, 3818 OID_AUTO, "ddp_version", CTLTYPE_STRING | CTLFLAG_RD, 3819 sc, 0, ice_sysctl_pkg_version, "A", "Active DDP package name and version"); 3820 3821 SYSCTL_ADD_PROC(ctx, ctx_list, 3822 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, 3823 sc, 0, ice_sysctl_current_speed, "A", "Current Port Link Speed"); 3824 3825 SYSCTL_ADD_PROC(ctx, ctx_list, 3826 OID_AUTO, "requested_fec", CTLTYPE_STRING | CTLFLAG_RW, 3827 sc, 0, ice_sysctl_fec_config, "A", ICE_SYSCTL_HELP_FEC_CONFIG); 3828 3829 SYSCTL_ADD_PROC(ctx, ctx_list, 3830 OID_AUTO, "negotiated_fec", CTLTYPE_STRING | CTLFLAG_RD, 3831 sc, 0, ice_sysctl_negotiated_fec, "A", "Current Negotiated FEC mode"); 3832 3833 SYSCTL_ADD_PROC(ctx, ctx_list, 3834 OID_AUTO, "fc", CTLTYPE_STRING | CTLFLAG_RW, 3835 sc, 0, ice_sysctl_fc_config, "A", ICE_SYSCTL_HELP_FC_CONFIG); 3836 3837 SYSCTL_ADD_PROC(ctx, ctx_list, 3838 OID_AUTO, "advertise_speed", CTLTYPE_U16 | CTLFLAG_RW, 3839 sc, 0, ice_sysctl_advertise_speed, "SU", ICE_SYSCTL_HELP_ADVERTISE_SPEED); 3840 3841 SYSCTL_ADD_PROC(ctx, ctx_list, 3842 OID_AUTO, "fw_lldp_agent", CTLTYPE_U8 | CTLFLAG_RWTUN, 3843 sc, 0, ice_sysctl_fw_lldp_agent, "CU", ICE_SYSCTL_HELP_FW_LLDP_AGENT); 3844 3845 /* Differentiate software and hardware statistics, by keeping hw stats 3846 * in their own node. This isn't in ice_add_device_tunables, because 3847 * we won't have any CTLFLAG_TUN sysctls under this node. 3848 */ 3849 hw_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "hw", CTLFLAG_RD, 3850 NULL, "Port Hardware Statistics"); 3851 3852 ice_add_sysctls_mac_stats(ctx, hw_node, &sc->stats.cur); 3853 3854 /* Add the main PF VSI stats now. Other VSIs will add their own stats 3855 * during creation 3856 */ 3857 ice_add_vsi_sysctls(&sc->pf_vsi); 3858 3859 /* Add sysctls related to debugging the device driver. This includes 3860 * sysctls which display additional internal driver state for use in 3861 * understanding what is happening within the driver. 3862 */ 3863 ice_add_debug_sysctls(sc); 3864 } 3865 3866 /** 3867 * @enum hmc_error_type 3868 * @brief enumeration of HMC errors 3869 * 3870 * Enumeration defining the possible HMC errors that might occur. 3871 */ 3872 enum hmc_error_type { 3873 HMC_ERR_PMF_INVALID = 0, 3874 HMC_ERR_VF_IDX_INVALID = 1, 3875 HMC_ERR_VF_PARENT_PF_INVALID = 2, 3876 /* 3 is reserved */ 3877 HMC_ERR_INDEX_TOO_BIG = 4, 3878 HMC_ERR_ADDRESS_TOO_LARGE = 5, 3879 HMC_ERR_SEGMENT_DESC_INVALID = 6, 3880 HMC_ERR_SEGMENT_DESC_TOO_SMALL = 7, 3881 HMC_ERR_PAGE_DESC_INVALID = 8, 3882 HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION = 9, 3883 /* 10 is reserved */ 3884 HMC_ERR_INVALID_OBJECT_TYPE = 11, 3885 /* 12 is reserved */ 3886 }; 3887 3888 /** 3889 * ice_log_hmc_error - Log an HMC error message 3890 * @hw: device hw structure 3891 * @dev: the device to pass to device_printf() 3892 * 3893 * Log a message when an HMC error interrupt is triggered. 3894 */ 3895 void 3896 ice_log_hmc_error(struct ice_hw *hw, device_t dev) 3897 { 3898 u32 info, data; 3899 u8 index, errtype, objtype; 3900 bool isvf; 3901 3902 info = rd32(hw, PFHMC_ERRORINFO); 3903 data = rd32(hw, PFHMC_ERRORDATA); 3904 3905 index = (u8)(info & PFHMC_ERRORINFO_PMF_INDEX_M); 3906 errtype = (u8)((info & PFHMC_ERRORINFO_HMC_ERROR_TYPE_M) >> 3907 PFHMC_ERRORINFO_HMC_ERROR_TYPE_S); 3908 objtype = (u8)((info & PFHMC_ERRORINFO_HMC_OBJECT_TYPE_M) >> 3909 PFHMC_ERRORINFO_HMC_OBJECT_TYPE_S); 3910 3911 isvf = info & PFHMC_ERRORINFO_PMF_ISVF_M; 3912 3913 device_printf(dev, "%s HMC Error detected on PMF index %d:\n", 3914 isvf ? "VF" : "PF", index); 3915 3916 device_printf(dev, "error type %d, object type %d, data 0x%08x\n", 3917 errtype, objtype, data); 3918 3919 switch (errtype) { 3920 case HMC_ERR_PMF_INVALID: 3921 device_printf(dev, "Private Memory Function is not valid\n"); 3922 break; 3923 case HMC_ERR_VF_IDX_INVALID: 3924 device_printf(dev, "Invalid Private Memory Function index for PE enabled VF\n"); 3925 break; 3926 case HMC_ERR_VF_PARENT_PF_INVALID: 3927 device_printf(dev, "Invalid parent PF for PE enabled VF\n"); 3928 break; 3929 case HMC_ERR_INDEX_TOO_BIG: 3930 device_printf(dev, "Object index too big\n"); 3931 break; 3932 case HMC_ERR_ADDRESS_TOO_LARGE: 3933 device_printf(dev, "Address extends beyond segment descriptor limit\n"); 3934 break; 3935 case HMC_ERR_SEGMENT_DESC_INVALID: 3936 device_printf(dev, "Segment descriptor is invalid\n"); 3937 break; 3938 case HMC_ERR_SEGMENT_DESC_TOO_SMALL: 3939 device_printf(dev, "Segment descriptor is too small\n"); 3940 break; 3941 case HMC_ERR_PAGE_DESC_INVALID: 3942 device_printf(dev, "Page descriptor is invalid\n"); 3943 break; 3944 case HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION: 3945 device_printf(dev, "Unsupported Request completion received from PCIe\n"); 3946 break; 3947 case HMC_ERR_INVALID_OBJECT_TYPE: 3948 device_printf(dev, "Invalid object type\n"); 3949 break; 3950 default: 3951 device_printf(dev, "Unknown HMC error\n"); 3952 } 3953 3954 /* Clear the error indication */ 3955 wr32(hw, PFHMC_ERRORINFO, 0); 3956 } 3957 3958 /** 3959 * @struct ice_sysctl_info 3960 * @brief sysctl information 3961 * 3962 * Structure used to simplify the process of defining the many similar 3963 * statistics sysctls. 3964 */ 3965 struct ice_sysctl_info { 3966 u64 *stat; 3967 const char *name; 3968 const char *description; 3969 }; 3970 3971 /** 3972 * ice_add_sysctls_eth_stats - Add sysctls for ethernet statistics 3973 * @ctx: sysctl ctx to use 3974 * @parent: the parent node to add sysctls under 3975 * @stats: the ethernet stats structure to source values from 3976 * 3977 * Adds statistics sysctls for the ethernet statistics of the MAC or a VSI. 3978 * Will add them under the parent node specified. 3979 * 3980 * Note that rx_discards and tx_errors are only meaningful for VSIs and not 3981 * the global MAC/PF statistics, so they are not included here. 3982 */ 3983 void 3984 ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, 3985 struct sysctl_oid *parent, 3986 struct ice_eth_stats *stats) 3987 { 3988 const struct ice_sysctl_info ctls[] = { 3989 /* Rx Stats */ 3990 { &stats->rx_bytes, "good_octets_rcvd", "Good Octets Received" }, 3991 { &stats->rx_unicast, "ucast_pkts_rcvd", "Unicast Packets Received" }, 3992 { &stats->rx_multicast, "mcast_pkts_rcvd", "Multicast Packets Received" }, 3993 { &stats->rx_broadcast, "bcast_pkts_rcvd", "Broadcast Packets Received" }, 3994 /* Tx Stats */ 3995 { &stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted" }, 3996 { &stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted" }, 3997 { &stats->tx_multicast, "mcast_pkts_txd", "Multicast Packets Transmitted" }, 3998 { &stats->tx_broadcast, "bcast_pkts_txd", "Broadcast Packets Transmitted" }, 3999 /* End */ 4000 { 0, 0, 0 } 4001 }; 4002 4003 struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); 4004 4005 const struct ice_sysctl_info *entry = ctls; 4006 while (entry->stat != 0) { 4007 SYSCTL_ADD_U64(ctx, parent_list, OID_AUTO, entry->name, 4008 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 4009 entry->description); 4010 entry++; 4011 } 4012 } 4013 4014 /** 4015 * ice_sysctl_tx_cso_stat - Display Tx checksum offload statistic 4016 * @oidp: sysctl oid structure 4017 * @arg1: pointer to private data structure 4018 * @arg2: Tx CSO stat to read 4019 * @req: sysctl request pointer 4020 * 4021 * On read: Sums the per-queue Tx CSO stat and displays it. 4022 */ 4023 static int 4024 ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS) 4025 { 4026 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4027 enum ice_tx_cso_stat type = (enum ice_tx_cso_stat)arg2; 4028 u64 stat = 0; 4029 int i; 4030 4031 if (ice_driver_is_detaching(vsi->sc)) 4032 return (ESHUTDOWN); 4033 4034 /* Check that the type is valid */ 4035 if (type >= ICE_CSO_STAT_TX_COUNT) 4036 return (EDOOFUS); 4037 4038 /* Sum the stat for each of the Tx queues */ 4039 for (i = 0; i < vsi->num_tx_queues; i++) 4040 stat += vsi->tx_queues[i].stats.cso[type]; 4041 4042 return sysctl_handle_64(oidp, NULL, stat, req); 4043 } 4044 4045 /** 4046 * ice_sysctl_rx_cso_stat - Display Rx checksum offload statistic 4047 * @oidp: sysctl oid structure 4048 * @arg1: pointer to private data structure 4049 * @arg2: Rx CSO stat to read 4050 * @req: sysctl request pointer 4051 * 4052 * On read: Sums the per-queue Rx CSO stat and displays it. 4053 */ 4054 static int 4055 ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS) 4056 { 4057 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4058 enum ice_rx_cso_stat type = (enum ice_rx_cso_stat)arg2; 4059 u64 stat = 0; 4060 int i; 4061 4062 if (ice_driver_is_detaching(vsi->sc)) 4063 return (ESHUTDOWN); 4064 4065 /* Check that the type is valid */ 4066 if (type >= ICE_CSO_STAT_RX_COUNT) 4067 return (EDOOFUS); 4068 4069 /* Sum the stat for each of the Rx queues */ 4070 for (i = 0; i < vsi->num_rx_queues; i++) 4071 stat += vsi->rx_queues[i].stats.cso[type]; 4072 4073 return sysctl_handle_64(oidp, NULL, stat, req); 4074 } 4075 4076 /** 4077 * @struct ice_rx_cso_stat_info 4078 * @brief sysctl information for an Rx checksum offload statistic 4079 * 4080 * Structure used to simplify the process of defining the checksum offload 4081 * statistics. 4082 */ 4083 struct ice_rx_cso_stat_info { 4084 enum ice_rx_cso_stat type; 4085 const char *name; 4086 const char *description; 4087 }; 4088 4089 /** 4090 * @struct ice_tx_cso_stat_info 4091 * @brief sysctl information for a Tx checksum offload statistic 4092 * 4093 * Structure used to simplify the process of defining the checksum offload 4094 * statistics. 4095 */ 4096 struct ice_tx_cso_stat_info { 4097 enum ice_tx_cso_stat type; 4098 const char *name; 4099 const char *description; 4100 }; 4101 4102 /** 4103 * ice_add_sysctls_sw_stats - Add sysctls for software statistics 4104 * @vsi: pointer to the VSI to add sysctls for 4105 * @ctx: sysctl ctx to use 4106 * @parent: the parent node to add sysctls under 4107 * 4108 * Add statistics sysctls for software tracked statistics of a VSI. 4109 * 4110 * Currently this only adds checksum offload statistics, but more counters may 4111 * be added in the future. 4112 */ 4113 static void 4114 ice_add_sysctls_sw_stats(struct ice_vsi *vsi, 4115 struct sysctl_ctx_list *ctx, 4116 struct sysctl_oid *parent) 4117 { 4118 struct sysctl_oid *cso_node; 4119 struct sysctl_oid_list *cso_list; 4120 4121 /* Tx CSO Stats */ 4122 const struct ice_tx_cso_stat_info tx_ctls[] = { 4123 { ICE_CSO_STAT_TX_TCP, "tx_tcp", "Transmit TCP Packets marked for HW checksum" }, 4124 { ICE_CSO_STAT_TX_UDP, "tx_udp", "Transmit UDP Packets marked for HW checksum" }, 4125 { ICE_CSO_STAT_TX_SCTP, "tx_sctp", "Transmit SCTP Packets marked for HW checksum" }, 4126 { ICE_CSO_STAT_TX_IP4, "tx_ip4", "Transmit IPv4 Packets marked for HW checksum" }, 4127 { ICE_CSO_STAT_TX_IP6, "tx_ip6", "Transmit IPv6 Packets marked for HW checksum" }, 4128 { ICE_CSO_STAT_TX_L3_ERR, "tx_l3_err", "Transmit packets that driver failed to set L3 HW CSO bits for" }, 4129 { ICE_CSO_STAT_TX_L4_ERR, "tx_l4_err", "Transmit packets that driver failed to set L4 HW CSO bits for" }, 4130 /* End */ 4131 { ICE_CSO_STAT_TX_COUNT, 0, 0 } 4132 }; 4133 4134 /* Rx CSO Stats */ 4135 const struct ice_rx_cso_stat_info rx_ctls[] = { 4136 { ICE_CSO_STAT_RX_IP4_ERR, "rx_ip4_err", "Received packets with invalid IPv4 checksum indicated by HW" }, 4137 { ICE_CSO_STAT_RX_IP6_ERR, "rx_ip6_err", "Received IPv6 packets with extension headers" }, 4138 { ICE_CSO_STAT_RX_L3_ERR, "rx_l3_err", "Received packets with an unexpected invalid L3 checksum indicated by HW" }, 4139 { ICE_CSO_STAT_RX_TCP_ERR, "rx_tcp_err", "Received packets with invalid TCP checksum indicated by HW" }, 4140 { ICE_CSO_STAT_RX_UDP_ERR, "rx_udp_err", "Received packets with invalid UDP checksum indicated by HW" }, 4141 { ICE_CSO_STAT_RX_SCTP_ERR, "rx_sctp_err", "Received packets with invalid SCTP checksum indicated by HW" }, 4142 { ICE_CSO_STAT_RX_L4_ERR, "rx_l4_err", "Received packets with an unexpected invalid L4 checksum indicated by HW" }, 4143 /* End */ 4144 { ICE_CSO_STAT_RX_COUNT, 0, 0 } 4145 }; 4146 4147 struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); 4148 4149 /* Add a node for statistics tracked by software. */ 4150 cso_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "cso", CTLFLAG_RD, 4151 NULL, "Checksum offload Statistics"); 4152 cso_list = SYSCTL_CHILDREN(cso_node); 4153 4154 const struct ice_tx_cso_stat_info *tx_entry = tx_ctls; 4155 while (tx_entry->name && tx_entry->description) { 4156 SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, tx_entry->name, 4157 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4158 vsi, tx_entry->type, ice_sysctl_tx_cso_stat, "QU", 4159 tx_entry->description); 4160 tx_entry++; 4161 } 4162 4163 const struct ice_rx_cso_stat_info *rx_entry = rx_ctls; 4164 while (rx_entry->name && rx_entry->description) { 4165 SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, rx_entry->name, 4166 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, 4167 vsi, rx_entry->type, ice_sysctl_rx_cso_stat, "QU", 4168 rx_entry->description); 4169 rx_entry++; 4170 } 4171 } 4172 4173 /** 4174 * ice_add_vsi_sysctls - Add sysctls for a VSI 4175 * @vsi: pointer to VSI structure 4176 * 4177 * Add various sysctls for a given VSI. 4178 */ 4179 void 4180 ice_add_vsi_sysctls(struct ice_vsi *vsi) 4181 { 4182 struct sysctl_ctx_list *ctx = &vsi->ctx; 4183 struct sysctl_oid *hw_node, *sw_node; 4184 struct sysctl_oid_list *vsi_list, *hw_list, *sw_list; 4185 4186 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 4187 4188 /* Keep hw stats in their own node. */ 4189 hw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "hw", CTLFLAG_RD, 4190 NULL, "VSI Hardware Statistics"); 4191 hw_list = SYSCTL_CHILDREN(hw_node); 4192 4193 /* Add the ethernet statistics for this VSI */ 4194 ice_add_sysctls_eth_stats(ctx, hw_node, &vsi->hw_stats.cur); 4195 4196 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_discards", 4197 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_discards, 4198 0, "Discarded Rx Packets"); 4199 4200 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_errors", 4201 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_errors, 4202 0, "Rx Packets Discarded Due To Error"); 4203 4204 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_no_desc", 4205 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_no_desc, 4206 0, "Rx Packets Discarded Due To Lack Of Descriptors"); 4207 4208 SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "tx_errors", 4209 CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.tx_errors, 4210 0, "Tx Packets Discarded Due To Error"); 4211 4212 /* Add a node for statistics tracked by software. */ 4213 sw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "sw", CTLFLAG_RD, 4214 NULL, "VSI Software Statistics"); 4215 sw_list = SYSCTL_CHILDREN(sw_node); 4216 4217 ice_add_sysctls_sw_stats(vsi, ctx, sw_node); 4218 } 4219 4220 /** 4221 * ice_add_sysctls_mac_stats - Add sysctls for global MAC statistics 4222 * @ctx: the sysctl ctx to use 4223 * @parent: parent node to add the sysctls under 4224 * @stats: the hw ports stat structure to pull values from 4225 * 4226 * Add global MAC statistics sysctls. 4227 */ 4228 void 4229 ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 4230 struct sysctl_oid *parent, 4231 struct ice_hw_port_stats *stats) 4232 { 4233 struct sysctl_oid *mac_node; 4234 struct sysctl_oid_list *parent_list, *mac_list; 4235 4236 parent_list = SYSCTL_CHILDREN(parent); 4237 4238 mac_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "mac", CTLFLAG_RD, 4239 NULL, "Mac Hardware Statistics"); 4240 mac_list = SYSCTL_CHILDREN(mac_node); 4241 4242 /* add the common ethernet statistics */ 4243 ice_add_sysctls_eth_stats(ctx, mac_node, &stats->eth); 4244 4245 const struct ice_sysctl_info ctls[] = { 4246 /* Packet Reception Stats */ 4247 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, 4248 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, 4249 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, 4250 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, 4251 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, 4252 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, 4253 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, 4254 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, 4255 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, 4256 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, 4257 {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, 4258 {&stats->rx_len_errors, "rx_length_errors", "Receive Length Errors"}, 4259 /* Packet Transmission Stats */ 4260 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, 4261 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, 4262 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, 4263 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, 4264 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, 4265 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, 4266 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, 4267 {&stats->tx_dropped_link_down, "tx_dropped", "Tx Dropped Due To Link Down"}, 4268 /* Flow control */ 4269 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, 4270 {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, 4271 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, 4272 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, 4273 /* Other */ 4274 {&stats->crc_errors, "crc_errors", "CRC Errors"}, 4275 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, 4276 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, 4277 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, 4278 /* End */ 4279 { 0, 0, 0 } 4280 }; 4281 4282 const struct ice_sysctl_info *entry = ctls; 4283 while (entry->stat != 0) { 4284 SYSCTL_ADD_U64(ctx, mac_list, OID_AUTO, entry->name, 4285 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 4286 entry->description); 4287 entry++; 4288 } 4289 } 4290 4291 /** 4292 * ice_configure_misc_interrupts - enable 'other' interrupt causes 4293 * @sc: pointer to device private softc 4294 * 4295 * Enable various "other" interrupt causes, and associate them to interrupt 0, 4296 * which is our administrative interrupt. 4297 */ 4298 void 4299 ice_configure_misc_interrupts(struct ice_softc *sc) 4300 { 4301 struct ice_hw *hw = &sc->hw; 4302 u32 val; 4303 4304 /* Read the OICR register to clear it */ 4305 rd32(hw, PFINT_OICR); 4306 4307 /* Enable useful "other" interrupt causes */ 4308 val = (PFINT_OICR_ECC_ERR_M | 4309 PFINT_OICR_MAL_DETECT_M | 4310 PFINT_OICR_GRST_M | 4311 PFINT_OICR_PCI_EXCEPTION_M | 4312 PFINT_OICR_VFLR_M | 4313 PFINT_OICR_HMC_ERR_M | 4314 PFINT_OICR_PE_CRITERR_M); 4315 4316 wr32(hw, PFINT_OICR_ENA, val); 4317 4318 /* Note that since we're using MSI-X index 0, and ITR index 0, we do 4319 * not explicitly program them when writing to the PFINT_*_CTL 4320 * registers. Nevertheless, these writes are associating the 4321 * interrupts with the ITR 0 vector 4322 */ 4323 4324 /* Associate the OICR interrupt with ITR 0, and enable it */ 4325 wr32(hw, PFINT_OICR_CTL, PFINT_OICR_CTL_CAUSE_ENA_M); 4326 4327 /* Associate the Mailbox interrupt with ITR 0, and enable it */ 4328 wr32(hw, PFINT_MBX_CTL, PFINT_MBX_CTL_CAUSE_ENA_M); 4329 4330 /* Associate the AdminQ interrupt with ITR 0, and enable it */ 4331 wr32(hw, PFINT_FW_CTL, PFINT_FW_CTL_CAUSE_ENA_M); 4332 } 4333 4334 /** 4335 * ice_filter_is_mcast - Check if info is a multicast filter 4336 * @vsi: vsi structure addresses are targeted towards 4337 * @info: filter info 4338 * 4339 * @returns true if the provided info is a multicast filter, and false 4340 * otherwise. 4341 */ 4342 static bool 4343 ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info) 4344 { 4345 const u8 *addr = info->l_data.mac.mac_addr; 4346 4347 /* 4348 * Check if this info matches a multicast filter added by 4349 * ice_add_mac_to_list 4350 */ 4351 if ((info->flag == ICE_FLTR_TX) && 4352 (info->src_id == ICE_SRC_ID_VSI) && 4353 (info->lkup_type == ICE_SW_LKUP_MAC) && 4354 (info->vsi_handle == vsi->idx) && 4355 ETHER_IS_MULTICAST(addr) && !ETHER_IS_BROADCAST(addr)) 4356 return true; 4357 4358 return false; 4359 } 4360 4361 /** 4362 * @struct ice_mcast_sync_data 4363 * @brief data used by ice_sync_one_mcast_filter function 4364 * 4365 * Structure used to store data needed for processing by the 4366 * ice_sync_one_mcast_filter. This structure contains a linked list of filters 4367 * to be added, an error indication, and a pointer to the device softc. 4368 */ 4369 struct ice_mcast_sync_data { 4370 struct ice_list_head add_list; 4371 struct ice_softc *sc; 4372 int err; 4373 }; 4374 4375 /** 4376 * ice_sync_one_mcast_filter - Check if we need to program the filter 4377 * @p: void pointer to algorithm data 4378 * @sdl: link level socket address 4379 * @count: unused count value 4380 * 4381 * Called by if_foreach_llmaddr to operate on each filter in the ifp filter 4382 * list. For the given address, search our internal list to see if we have 4383 * found the filter. If not, add it to our list of filters that need to be 4384 * programmed. 4385 * 4386 * @returns (1) if we've actually setup the filter to be added 4387 */ 4388 static u_int 4389 ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, 4390 u_int __unused count) 4391 { 4392 struct ice_mcast_sync_data *data = (struct ice_mcast_sync_data *)p; 4393 struct ice_softc *sc = data->sc; 4394 struct ice_hw *hw = &sc->hw; 4395 struct ice_switch_info *sw = hw->switch_info; 4396 const u8 *sdl_addr = (const u8 *)LLADDR(sdl); 4397 struct ice_fltr_mgmt_list_entry *itr; 4398 struct ice_list_head *rules; 4399 int err; 4400 4401 rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 4402 4403 /* 4404 * If a previous filter already indicated an error, there is no need 4405 * for us to finish processing the rest of the filters. 4406 */ 4407 if (data->err) 4408 return (0); 4409 4410 /* See if this filter has already been programmed */ 4411 LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) { 4412 struct ice_fltr_info *info = &itr->fltr_info; 4413 const u8 *addr = info->l_data.mac.mac_addr; 4414 4415 /* Only check multicast filters */ 4416 if (!ice_filter_is_mcast(&sc->pf_vsi, info)) 4417 continue; 4418 4419 /* 4420 * If this filter matches, mark the internal filter as 4421 * "found", and exit. 4422 */ 4423 if (bcmp(addr, sdl_addr, ETHER_ADDR_LEN) == 0) { 4424 itr->marker = ICE_FLTR_FOUND; 4425 return (1); 4426 } 4427 } 4428 4429 /* 4430 * If we failed to locate the filter in our internal list, we need to 4431 * place it into our add list. 4432 */ 4433 err = ice_add_mac_to_list(&sc->pf_vsi, &data->add_list, sdl_addr, 4434 ICE_FWD_TO_VSI); 4435 if (err) { 4436 device_printf(sc->dev, 4437 "Failed to place MAC %6D onto add list, err %s\n", 4438 sdl_addr, ":", ice_err_str(err)); 4439 data->err = err; 4440 4441 return (0); 4442 } 4443 4444 return (1); 4445 } 4446 4447 /** 4448 * ice_sync_multicast_filters - Synchronize OS and internal filter list 4449 * @sc: device private structure 4450 * 4451 * Called in response to SIOCDELMULTI to synchronize the operating system 4452 * multicast address list with the internal list of filters programmed to 4453 * firmware. 4454 * 4455 * Works in one phase to find added and deleted filters using a marker bit on 4456 * the internal list. 4457 * 4458 * First, a loop over the internal list clears the marker bit. Second, for 4459 * each filter in the ifp list is checked. If we find it in the internal list, 4460 * the marker bit is set. Otherwise, the filter is added to the add list. 4461 * Third, a loop over the internal list determines if any filters have not 4462 * been found. Each of these is added to the delete list. Finally, the add and 4463 * delete lists are programmed to firmware to update the filters. 4464 * 4465 * @returns zero on success or an integer error code on failure. 4466 */ 4467 int 4468 ice_sync_multicast_filters(struct ice_softc *sc) 4469 { 4470 struct ice_hw *hw = &sc->hw; 4471 struct ice_switch_info *sw = hw->switch_info; 4472 struct ice_fltr_mgmt_list_entry *itr; 4473 struct ice_mcast_sync_data data = {}; 4474 struct ice_list_head *rules, remove_list; 4475 enum ice_status status; 4476 int err = 0; 4477 4478 INIT_LIST_HEAD(&data.add_list); 4479 INIT_LIST_HEAD(&remove_list); 4480 data.sc = sc; 4481 data.err = 0; 4482 4483 rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 4484 4485 /* Acquire the lock for the entire duration */ 4486 ice_acquire_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4487 4488 /* (1) Reset the marker state for all filters */ 4489 LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) 4490 itr->marker = ICE_FLTR_NOT_FOUND; 4491 4492 /* (2) determine which filters need to be added and removed */ 4493 if_foreach_llmaddr(sc->ifp, ice_sync_one_mcast_filter, (void *)&data); 4494 if (data.err) { 4495 /* ice_sync_one_mcast_filter already prints an error */ 4496 err = data.err; 4497 ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4498 goto free_filter_lists; 4499 } 4500 4501 LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) { 4502 struct ice_fltr_info *info = &itr->fltr_info; 4503 const u8 *addr = info->l_data.mac.mac_addr; 4504 4505 /* Only check multicast filters */ 4506 if (!ice_filter_is_mcast(&sc->pf_vsi, info)) 4507 continue; 4508 4509 /* 4510 * If the filter is not marked as found, then it must no 4511 * longer be in the ifp address list, so we need to remove it. 4512 */ 4513 if (itr->marker == ICE_FLTR_NOT_FOUND) { 4514 err = ice_add_mac_to_list(&sc->pf_vsi, &remove_list, 4515 addr, ICE_FWD_TO_VSI); 4516 if (err) { 4517 device_printf(sc->dev, 4518 "Failed to place MAC %6D onto remove list, err %s\n", 4519 addr, ":", ice_err_str(err)); 4520 ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4521 goto free_filter_lists; 4522 } 4523 } 4524 } 4525 4526 ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); 4527 4528 status = ice_add_mac(hw, &data.add_list); 4529 if (status) { 4530 device_printf(sc->dev, 4531 "Could not add new MAC filters, err %s aq_err %s\n", 4532 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 4533 err = (EIO); 4534 goto free_filter_lists; 4535 } 4536 4537 status = ice_remove_mac(hw, &remove_list); 4538 if (status) { 4539 device_printf(sc->dev, 4540 "Could not remove old MAC filters, err %s aq_err %s\n", 4541 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 4542 err = (EIO); 4543 goto free_filter_lists; 4544 } 4545 4546 free_filter_lists: 4547 ice_free_fltr_list(&data.add_list); 4548 ice_free_fltr_list(&remove_list); 4549 4550 return (err); 4551 } 4552 4553 /** 4554 * ice_add_vlan_hw_filter - Add a VLAN filter for a given VSI 4555 * @vsi: The VSI to add the filter for 4556 * @vid: VLAN to add 4557 * 4558 * Programs a HW filter so that the given VSI will receive the specified VLAN. 4559 */ 4560 enum ice_status 4561 ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid) 4562 { 4563 struct ice_hw *hw = &vsi->sc->hw; 4564 struct ice_list_head vlan_list; 4565 struct ice_fltr_list_entry vlan_entry; 4566 4567 INIT_LIST_HEAD(&vlan_list); 4568 memset(&vlan_entry, 0, sizeof(vlan_entry)); 4569 4570 vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 4571 vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 4572 vlan_entry.fltr_info.flag = ICE_FLTR_TX; 4573 vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI; 4574 vlan_entry.fltr_info.vsi_handle = vsi->idx; 4575 vlan_entry.fltr_info.l_data.vlan.vlan_id = vid; 4576 4577 LIST_ADD(&vlan_entry.list_entry, &vlan_list); 4578 4579 return ice_add_vlan(hw, &vlan_list); 4580 } 4581 4582 /** 4583 * ice_remove_vlan_hw_filter - Remove a VLAN filter for a given VSI 4584 * @vsi: The VSI to add the filter for 4585 * @vid: VLAN to remove 4586 * 4587 * Removes a previously programmed HW filter for the specified VSI. 4588 */ 4589 enum ice_status 4590 ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid) 4591 { 4592 struct ice_hw *hw = &vsi->sc->hw; 4593 struct ice_list_head vlan_list; 4594 struct ice_fltr_list_entry vlan_entry; 4595 4596 INIT_LIST_HEAD(&vlan_list); 4597 memset(&vlan_entry, 0, sizeof(vlan_entry)); 4598 4599 vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN; 4600 vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 4601 vlan_entry.fltr_info.flag = ICE_FLTR_TX; 4602 vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI; 4603 vlan_entry.fltr_info.vsi_handle = vsi->idx; 4604 vlan_entry.fltr_info.l_data.vlan.vlan_id = vid; 4605 4606 LIST_ADD(&vlan_entry.list_entry, &vlan_list); 4607 4608 return ice_remove_vlan(hw, &vlan_list); 4609 } 4610 4611 #define ICE_SYSCTL_HELP_RX_ITR \ 4612 "\nControl Rx interrupt throttle rate." \ 4613 "\n\t0-8160 - sets interrupt rate in usecs" \ 4614 "\n\t -1 - reset the Rx itr to default" 4615 4616 /** 4617 * ice_sysctl_rx_itr - Display or change the Rx ITR for a VSI 4618 * @oidp: sysctl oid structure 4619 * @arg1: pointer to private data structure 4620 * @arg2: unused 4621 * @req: sysctl request pointer 4622 * 4623 * On read: Displays the current Rx ITR value 4624 * on write: Sets the Rx ITR value, reconfiguring device if it is up 4625 */ 4626 static int 4627 ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS) 4628 { 4629 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4630 struct ice_softc *sc = vsi->sc; 4631 int increment, error = 0; 4632 4633 UNREFERENCED_PARAMETER(arg2); 4634 4635 if (ice_driver_is_detaching(sc)) 4636 return (ESHUTDOWN); 4637 4638 error = sysctl_handle_16(oidp, &vsi->rx_itr, 0, req); 4639 if ((error) || (req->newptr == NULL)) 4640 return (error); 4641 4642 if (vsi->rx_itr < 0) 4643 vsi->rx_itr = ICE_DFLT_RX_ITR; 4644 if (vsi->rx_itr > ICE_ITR_MAX) 4645 vsi->rx_itr = ICE_ITR_MAX; 4646 4647 /* Assume 2usec increment if it hasn't been loaded yet */ 4648 increment = sc->hw.itr_gran ? : 2; 4649 4650 /* We need to round the value to the hardware's ITR granularity */ 4651 vsi->rx_itr = (vsi->rx_itr / increment ) * increment; 4652 4653 /* If the driver has finished initializing, then we need to reprogram 4654 * the ITR registers now. Otherwise, they will be programmed during 4655 * driver initialization. 4656 */ 4657 if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) 4658 ice_configure_rx_itr(vsi); 4659 4660 return (0); 4661 } 4662 4663 #define ICE_SYSCTL_HELP_TX_ITR \ 4664 "\nControl Tx interrupt throttle rate." \ 4665 "\n\t0-8160 - sets interrupt rate in usecs" \ 4666 "\n\t -1 - reset the Tx itr to default" 4667 4668 /** 4669 * ice_sysctl_tx_itr - Display or change the Tx ITR for a VSI 4670 * @oidp: sysctl oid structure 4671 * @arg1: pointer to private data structure 4672 * @arg2: unused 4673 * @req: sysctl request pointer 4674 * 4675 * On read: Displays the current Tx ITR value 4676 * on write: Sets the Tx ITR value, reconfiguring device if it is up 4677 */ 4678 static int 4679 ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS) 4680 { 4681 struct ice_vsi *vsi = (struct ice_vsi *)arg1; 4682 struct ice_softc *sc = vsi->sc; 4683 int increment, error = 0; 4684 4685 UNREFERENCED_PARAMETER(arg2); 4686 4687 if (ice_driver_is_detaching(sc)) 4688 return (ESHUTDOWN); 4689 4690 error = sysctl_handle_16(oidp, &vsi->tx_itr, 0, req); 4691 if ((error) || (req->newptr == NULL)) 4692 return (error); 4693 4694 /* Allow configuring a negative value to reset to the default */ 4695 if (vsi->tx_itr < 0) 4696 vsi->tx_itr = ICE_DFLT_TX_ITR; 4697 if (vsi->tx_itr > ICE_ITR_MAX) 4698 vsi->tx_itr = ICE_ITR_MAX; 4699 4700 /* Assume 2usec increment if it hasn't been loaded yet */ 4701 increment = sc->hw.itr_gran ? : 2; 4702 4703 /* We need to round the value to the hardware's ITR granularity */ 4704 vsi->tx_itr = (vsi->tx_itr / increment ) * increment; 4705 4706 /* If the driver has finished initializing, then we need to reprogram 4707 * the ITR registers now. Otherwise, they will be programmed during 4708 * driver initialization. 4709 */ 4710 if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) 4711 ice_configure_tx_itr(vsi); 4712 4713 return (0); 4714 } 4715 4716 /** 4717 * ice_add_vsi_tunables - Add tunables and nodes for a VSI 4718 * @vsi: pointer to VSI structure 4719 * @parent: parent node to add the tunables under 4720 * 4721 * Create a sysctl context for the VSI, so that sysctls for the VSI can be 4722 * dynamically removed upon VSI removal. 4723 * 4724 * Add various tunables and set up the basic node structure for the VSI. Must 4725 * be called *prior* to ice_add_vsi_sysctls. It should be called as soon as 4726 * possible after the VSI memory is initialized. 4727 * 4728 * VSI specific sysctls with CTLFLAG_TUN should be initialized here so that 4729 * their values can be read from loader.conf prior to their first use in the 4730 * driver. 4731 */ 4732 void 4733 ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent) 4734 { 4735 struct sysctl_oid_list *vsi_list; 4736 char vsi_name[32], vsi_desc[32]; 4737 4738 struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); 4739 4740 /* Initialize the sysctl context for this VSI */ 4741 sysctl_ctx_init(&vsi->ctx); 4742 4743 /* Add a node to collect this VSI's statistics together */ 4744 snprintf(vsi_name, sizeof(vsi_name), "%u", vsi->idx); 4745 snprintf(vsi_desc, sizeof(vsi_desc), "VSI %u", vsi->idx); 4746 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->ctx, parent_list, OID_AUTO, vsi_name, 4747 CTLFLAG_RD, NULL, vsi_desc); 4748 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 4749 4750 vsi->rx_itr = ICE_DFLT_TX_ITR; 4751 SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "rx_itr", 4752 CTLTYPE_S16 | CTLFLAG_RWTUN, 4753 vsi, 0, ice_sysctl_rx_itr, "S", 4754 ICE_SYSCTL_HELP_RX_ITR); 4755 4756 vsi->tx_itr = ICE_DFLT_TX_ITR; 4757 SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "tx_itr", 4758 CTLTYPE_S16 | CTLFLAG_RWTUN, 4759 vsi, 0, ice_sysctl_tx_itr, "S", 4760 ICE_SYSCTL_HELP_TX_ITR); 4761 } 4762 4763 /** 4764 * ice_del_vsi_sysctl_ctx - Delete the sysctl context(s) of a VSI 4765 * @vsi: the VSI to remove contexts for 4766 * 4767 * Free the context for the VSI sysctls. This includes the main context, as 4768 * well as the per-queue sysctls. 4769 */ 4770 void 4771 ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi) 4772 { 4773 device_t dev = vsi->sc->dev; 4774 int err; 4775 4776 if (vsi->vsi_node) { 4777 err = sysctl_ctx_free(&vsi->ctx); 4778 if (err) 4779 device_printf(dev, "failed to free VSI %d sysctl context, err %s\n", 4780 vsi->idx, ice_err_str(err)); 4781 vsi->vsi_node = NULL; 4782 } 4783 } 4784 4785 /** 4786 * ice_add_device_tunables - Add early tunable sysctls and sysctl nodes 4787 * @sc: device private structure 4788 * 4789 * Add per-device dynamic tunable sysctls, and setup the general sysctl trees 4790 * for re-use by ice_add_device_sysctls. 4791 * 4792 * In order for the sysctl fields to be initialized before use, this function 4793 * should be called as early as possible during attach activities. 4794 * 4795 * Any non-global sysctl marked as CTLFLAG_TUN should likely be initialized 4796 * here in this function, rather than later in ice_add_device_sysctls. 4797 * 4798 * To make things easier, this function is also expected to setup the various 4799 * sysctl nodes in addition to tunables so that other sysctls which can't be 4800 * initialized early can hook into the same nodes. 4801 */ 4802 void 4803 ice_add_device_tunables(struct ice_softc *sc) 4804 { 4805 device_t dev = sc->dev; 4806 4807 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 4808 struct sysctl_oid_list *ctx_list = 4809 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 4810 4811 /* Add a node to track VSI sysctls. Keep track of the node in the 4812 * softc so that we can hook other sysctls into it later. This 4813 * includes both the VSI statistics, as well as potentially dynamic 4814 * VSIs in the future. 4815 */ 4816 4817 sc->vsi_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "vsi", 4818 CTLFLAG_RD, NULL, "VSI Configuration and Statistics"); 4819 4820 /* Add debug tunables */ 4821 ice_add_debug_tunables(sc); 4822 } 4823 4824 /** 4825 * ice_sysctl_dump_mac_filters - Dump a list of all HW MAC Filters 4826 * @oidp: sysctl oid structure 4827 * @arg1: pointer to private data structure 4828 * @arg2: unused 4829 * @req: sysctl request pointer 4830 * 4831 * Callback for "mac_filters" sysctl to dump the programmed MAC filters. 4832 */ 4833 static int 4834 ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS) 4835 { 4836 struct ice_softc *sc = (struct ice_softc *)arg1; 4837 struct ice_hw *hw = &sc->hw; 4838 struct ice_switch_info *sw = hw->switch_info; 4839 struct ice_fltr_mgmt_list_entry *fm_entry; 4840 struct ice_list_head *rule_head; 4841 struct ice_lock *rule_lock; 4842 struct ice_fltr_info *fi; 4843 struct sbuf *sbuf; 4844 int ret; 4845 4846 UNREFERENCED_PARAMETER(oidp); 4847 UNREFERENCED_PARAMETER(arg2); 4848 4849 if (ice_driver_is_detaching(sc)) 4850 return (ESHUTDOWN); 4851 4852 /* Wire the old buffer so we can take a non-sleepable lock */ 4853 ret = sysctl_wire_old_buffer(req, 0); 4854 if (ret) 4855 return (ret); 4856 4857 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4858 4859 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 4860 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 4861 4862 sbuf_printf(sbuf, "MAC Filter List"); 4863 4864 ice_acquire_lock(rule_lock); 4865 4866 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 4867 fi = &fm_entry->fltr_info; 4868 4869 sbuf_printf(sbuf, 4870 "\nmac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %d", 4871 fi->l_data.mac.mac_addr, ":", fi->vsi_handle, 4872 ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, 4873 ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); 4874 4875 /* if we have a vsi_list_info, print some information about that */ 4876 if (fm_entry->vsi_list_info) { 4877 sbuf_printf(sbuf, 4878 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 4879 fm_entry->vsi_count, 4880 fm_entry->vsi_list_info->vsi_list_id, 4881 fm_entry->vsi_list_info->ref_cnt); 4882 } 4883 } 4884 4885 ice_release_lock(rule_lock); 4886 4887 sbuf_finish(sbuf); 4888 sbuf_delete(sbuf); 4889 4890 return (0); 4891 } 4892 4893 /** 4894 * ice_sysctl_dump_vlan_filters - Dump a list of all HW VLAN Filters 4895 * @oidp: sysctl oid structure 4896 * @arg1: pointer to private data structure 4897 * @arg2: unused 4898 * @req: sysctl request pointer 4899 * 4900 * Callback for "vlan_filters" sysctl to dump the programmed VLAN filters. 4901 */ 4902 static int 4903 ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS) 4904 { 4905 struct ice_softc *sc = (struct ice_softc *)arg1; 4906 struct ice_hw *hw = &sc->hw; 4907 struct ice_switch_info *sw = hw->switch_info; 4908 struct ice_fltr_mgmt_list_entry *fm_entry; 4909 struct ice_list_head *rule_head; 4910 struct ice_lock *rule_lock; 4911 struct ice_fltr_info *fi; 4912 struct sbuf *sbuf; 4913 int ret; 4914 4915 UNREFERENCED_PARAMETER(oidp); 4916 UNREFERENCED_PARAMETER(arg2); 4917 4918 if (ice_driver_is_detaching(sc)) 4919 return (ESHUTDOWN); 4920 4921 /* Wire the old buffer so we can take a non-sleepable lock */ 4922 ret = sysctl_wire_old_buffer(req, 0); 4923 if (ret) 4924 return (ret); 4925 4926 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4927 4928 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 4929 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 4930 4931 sbuf_printf(sbuf, "VLAN Filter List"); 4932 4933 ice_acquire_lock(rule_lock); 4934 4935 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 4936 fi = &fm_entry->fltr_info; 4937 4938 sbuf_printf(sbuf, 4939 "\nvlan_id = %4d, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", 4940 fi->l_data.vlan.vlan_id, fi->vsi_handle, 4941 ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, 4942 ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); 4943 4944 /* if we have a vsi_list_info, print some information about that */ 4945 if (fm_entry->vsi_list_info) { 4946 sbuf_printf(sbuf, 4947 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 4948 fm_entry->vsi_count, 4949 fm_entry->vsi_list_info->vsi_list_id, 4950 fm_entry->vsi_list_info->ref_cnt); 4951 } 4952 } 4953 4954 ice_release_lock(rule_lock); 4955 4956 sbuf_finish(sbuf); 4957 sbuf_delete(sbuf); 4958 4959 return (0); 4960 } 4961 4962 /** 4963 * ice_sysctl_dump_ethertype_filters - Dump a list of all HW Ethertype filters 4964 * @oidp: sysctl oid structure 4965 * @arg1: pointer to private data structure 4966 * @arg2: unused 4967 * @req: sysctl request pointer 4968 * 4969 * Callback for "ethertype_filters" sysctl to dump the programmed Ethertype 4970 * filters. 4971 */ 4972 static int 4973 ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS) 4974 { 4975 struct ice_softc *sc = (struct ice_softc *)arg1; 4976 struct ice_hw *hw = &sc->hw; 4977 struct ice_switch_info *sw = hw->switch_info; 4978 struct ice_fltr_mgmt_list_entry *fm_entry; 4979 struct ice_list_head *rule_head; 4980 struct ice_lock *rule_lock; 4981 struct ice_fltr_info *fi; 4982 struct sbuf *sbuf; 4983 int ret; 4984 4985 UNREFERENCED_PARAMETER(oidp); 4986 UNREFERENCED_PARAMETER(arg2); 4987 4988 if (ice_driver_is_detaching(sc)) 4989 return (ESHUTDOWN); 4990 4991 /* Wire the old buffer so we can take a non-sleepable lock */ 4992 ret = sysctl_wire_old_buffer(req, 0); 4993 if (ret) 4994 return (ret); 4995 4996 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 4997 4998 rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rule_lock; 4999 rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rules; 5000 5001 sbuf_printf(sbuf, "Ethertype Filter List"); 5002 5003 ice_acquire_lock(rule_lock); 5004 5005 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 5006 fi = &fm_entry->fltr_info; 5007 5008 sbuf_printf(sbuf, 5009 "\nethertype = 0x%04x, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", 5010 fi->l_data.ethertype_mac.ethertype, 5011 fi->vsi_handle, ice_fltr_flag_str(fi->flag), 5012 fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), 5013 fi->fltr_rule_id); 5014 5015 /* if we have a vsi_list_info, print some information about that */ 5016 if (fm_entry->vsi_list_info) { 5017 sbuf_printf(sbuf, 5018 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 5019 fm_entry->vsi_count, 5020 fm_entry->vsi_list_info->vsi_list_id, 5021 fm_entry->vsi_list_info->ref_cnt); 5022 } 5023 } 5024 5025 ice_release_lock(rule_lock); 5026 5027 sbuf_finish(sbuf); 5028 sbuf_delete(sbuf); 5029 5030 return (0); 5031 } 5032 5033 /** 5034 * ice_sysctl_dump_ethertype_mac_filters - Dump a list of all HW Ethertype/MAC filters 5035 * @oidp: sysctl oid structure 5036 * @arg1: pointer to private data structure 5037 * @arg2: unused 5038 * @req: sysctl request pointer 5039 * 5040 * Callback for "ethertype_mac_filters" sysctl to dump the programmed 5041 * Ethertype/MAC filters. 5042 */ 5043 static int 5044 ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS) 5045 { 5046 struct ice_softc *sc = (struct ice_softc *)arg1; 5047 struct ice_hw *hw = &sc->hw; 5048 struct ice_switch_info *sw = hw->switch_info; 5049 struct ice_fltr_mgmt_list_entry *fm_entry; 5050 struct ice_list_head *rule_head; 5051 struct ice_lock *rule_lock; 5052 struct ice_fltr_info *fi; 5053 struct sbuf *sbuf; 5054 int ret; 5055 5056 UNREFERENCED_PARAMETER(oidp); 5057 UNREFERENCED_PARAMETER(arg2); 5058 5059 if (ice_driver_is_detaching(sc)) 5060 return (ESHUTDOWN); 5061 5062 /* Wire the old buffer so we can take a non-sleepable lock */ 5063 ret = sysctl_wire_old_buffer(req, 0); 5064 if (ret) 5065 return (ret); 5066 5067 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5068 5069 rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rule_lock; 5070 rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rules; 5071 5072 sbuf_printf(sbuf, "Ethertype/MAC Filter List"); 5073 5074 ice_acquire_lock(rule_lock); 5075 5076 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { 5077 fi = &fm_entry->fltr_info; 5078 5079 sbuf_printf(sbuf, 5080 "\nethertype = 0x%04x, mac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", 5081 fi->l_data.ethertype_mac.ethertype, 5082 fi->l_data.ethertype_mac.mac_addr, ":", 5083 fi->vsi_handle, ice_fltr_flag_str(fi->flag), 5084 fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), 5085 fi->fltr_rule_id); 5086 5087 /* if we have a vsi_list_info, print some information about that */ 5088 if (fm_entry->vsi_list_info) { 5089 sbuf_printf(sbuf, 5090 ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", 5091 fm_entry->vsi_count, 5092 fm_entry->vsi_list_info->vsi_list_id, 5093 fm_entry->vsi_list_info->ref_cnt); 5094 } 5095 } 5096 5097 ice_release_lock(rule_lock); 5098 5099 sbuf_finish(sbuf); 5100 sbuf_delete(sbuf); 5101 5102 return (0); 5103 } 5104 5105 /** 5106 * ice_sysctl_dump_state_flags - Dump device driver state flags 5107 * @oidp: sysctl oid structure 5108 * @arg1: pointer to private data structure 5109 * @arg2: unused 5110 * @req: sysctl request pointer 5111 * 5112 * Callback for "state" sysctl to display currently set driver state flags. 5113 */ 5114 static int 5115 ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS) 5116 { 5117 struct ice_softc *sc = (struct ice_softc *)arg1; 5118 struct sbuf *sbuf; 5119 u32 copied_state; 5120 unsigned int i; 5121 bool at_least_one = false; 5122 5123 UNREFERENCED_PARAMETER(oidp); 5124 UNREFERENCED_PARAMETER(arg2); 5125 5126 if (ice_driver_is_detaching(sc)) 5127 return (ESHUTDOWN); 5128 5129 /* Make a copy of the state to ensure we display coherent values */ 5130 copied_state = atomic_load_acq_32(&sc->state); 5131 5132 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5133 5134 /* Add the string for each set state to the sbuf */ 5135 for (i = 0; i < 32; i++) { 5136 if (copied_state & BIT(i)) { 5137 const char *str = ice_state_to_str((enum ice_state)i); 5138 5139 at_least_one = true; 5140 5141 if (str) 5142 sbuf_printf(sbuf, "\n%s", str); 5143 else 5144 sbuf_printf(sbuf, "\nBIT(%u)", i); 5145 } 5146 } 5147 5148 if (!at_least_one) 5149 sbuf_printf(sbuf, "Nothing set"); 5150 5151 sbuf_finish(sbuf); 5152 sbuf_delete(sbuf); 5153 5154 return (0); 5155 } 5156 5157 /** 5158 * ice_add_debug_tunables - Add tunables helpful for debugging the device driver 5159 * @sc: device private structure 5160 * 5161 * Add sysctl tunable values related to debugging the device driver. For now, 5162 * this means a tunable to set the debug mask early during driver load. 5163 * 5164 * The debug node will be marked CTLFLAG_SKIP unless INVARIANTS is defined, so 5165 * that in normal kernel builds, these will all be hidden, but on a debug 5166 * kernel they will be more easily visible. 5167 */ 5168 static void 5169 ice_add_debug_tunables(struct ice_softc *sc) 5170 { 5171 struct sysctl_oid_list *debug_list; 5172 device_t dev = sc->dev; 5173 5174 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 5175 struct sysctl_oid_list *ctx_list = 5176 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 5177 5178 sc->debug_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug", 5179 ICE_CTLFLAG_DEBUG | CTLFLAG_RD, 5180 NULL, "Debug Sysctls"); 5181 debug_list = SYSCTL_CHILDREN(sc->debug_sysctls); 5182 5183 SYSCTL_ADD_U64(ctx, debug_list, OID_AUTO, "debug_mask", 5184 CTLFLAG_RW | CTLFLAG_TUN, &sc->hw.debug_mask, 0, 5185 "Debug message enable/disable mask"); 5186 5187 /* Load the default value from the global sysctl first */ 5188 sc->enable_tx_fc_filter = ice_enable_tx_fc_filter; 5189 5190 SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_fc_filter", 5191 CTLFLAG_RDTUN, &sc->enable_tx_fc_filter, 0, 5192 "Drop Ethertype 0x8808 control frames originating from software on this PF"); 5193 5194 /* Load the default value from the global sysctl first */ 5195 sc->enable_tx_lldp_filter = ice_enable_tx_lldp_filter; 5196 5197 SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_lldp_filter", 5198 CTLFLAG_RDTUN, &sc->enable_tx_lldp_filter, 0, 5199 "Drop Ethertype 0x88cc LLDP frames originating from software on this PF"); 5200 5201 } 5202 5203 #define ICE_SYSCTL_HELP_REQUEST_RESET \ 5204 "\nRequest the driver to initiate a reset." \ 5205 "\n\tpfr - Initiate a PF reset" \ 5206 "\n\tcorer - Initiate a CORE reset" \ 5207 "\n\tglobr - Initiate a GLOBAL reset" 5208 5209 /** 5210 * @var rl_sysctl_ticks 5211 * @brief timestamp for latest reset request sysctl call 5212 * 5213 * Helps rate-limit the call to the sysctl which resets the device 5214 */ 5215 int rl_sysctl_ticks = 0; 5216 5217 /** 5218 * ice_sysctl_request_reset - Request that the driver initiate a reset 5219 * @oidp: sysctl oid structure 5220 * @arg1: pointer to private data structure 5221 * @arg2: unused 5222 * @req: sysctl request pointer 5223 * 5224 * Callback for "request_reset" sysctl to request that the driver initiate 5225 * a reset. Expects to be passed one of the following strings 5226 * 5227 * "pfr" - Initiate a PF reset 5228 * "corer" - Initiate a CORE reset 5229 * "globr" - Initiate a Global reset 5230 */ 5231 static int 5232 ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS) 5233 { 5234 struct ice_softc *sc = (struct ice_softc *)arg1; 5235 struct ice_hw *hw = &sc->hw; 5236 enum ice_status status; 5237 enum ice_reset_req reset_type = ICE_RESET_INVAL; 5238 const char *reset_message; 5239 int error = 0; 5240 5241 /* Buffer to store the requested reset string. Must contain enough 5242 * space to store the largest expected reset string, which currently 5243 * means 6 bytes of space. 5244 */ 5245 char reset[6] = ""; 5246 5247 UNREFERENCED_PARAMETER(arg2); 5248 5249 error = priv_check(curthread, PRIV_DRIVER); 5250 if (error) 5251 return (error); 5252 5253 if (ice_driver_is_detaching(sc)) 5254 return (ESHUTDOWN); 5255 5256 /* Read in the requested reset type. */ 5257 error = sysctl_handle_string(oidp, reset, sizeof(reset), req); 5258 if ((error) || (req->newptr == NULL)) 5259 return (error); 5260 5261 if (strcmp(reset, "pfr") == 0) { 5262 reset_message = "Requesting a PF reset"; 5263 reset_type = ICE_RESET_PFR; 5264 } else if (strcmp(reset, "corer") == 0) { 5265 reset_message = "Initiating a CORE reset"; 5266 reset_type = ICE_RESET_CORER; 5267 } else if (strcmp(reset, "globr") == 0) { 5268 reset_message = "Initiating a GLOBAL reset"; 5269 reset_type = ICE_RESET_GLOBR; 5270 } else if (strcmp(reset, "empr") == 0) { 5271 device_printf(sc->dev, "Triggering an EMP reset via software is not currently supported\n"); 5272 return (EOPNOTSUPP); 5273 } 5274 5275 if (reset_type == ICE_RESET_INVAL) { 5276 device_printf(sc->dev, "%s is not a valid reset request\n", reset); 5277 return (EINVAL); 5278 } 5279 5280 /* 5281 * Rate-limit the frequency at which this function is called. 5282 * Assuming this is called successfully once, typically, 5283 * everything should be handled within the allotted time frame. 5284 * However, in the odd setup situations, we've also put in 5285 * guards for when the reset has finished, but we're in the 5286 * process of rebuilding. And instead of queueing an intent, 5287 * simply error out and let the caller retry, if so desired. 5288 */ 5289 if (TICKS_2_MSEC(ticks - rl_sysctl_ticks) < 500) { 5290 device_printf(sc->dev, 5291 "Call frequency too high. Operation aborted.\n"); 5292 return (EBUSY); 5293 } 5294 rl_sysctl_ticks = ticks; 5295 5296 if (TICKS_2_MSEC(ticks - sc->rebuild_ticks) < 100) { 5297 device_printf(sc->dev, "Device rebuilding. Operation aborted.\n"); 5298 return (EBUSY); 5299 } 5300 5301 if (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) { 5302 device_printf(sc->dev, "Device in reset. Operation aborted.\n"); 5303 return (EBUSY); 5304 } 5305 5306 device_printf(sc->dev, "%s\n", reset_message); 5307 5308 /* Initiate the PF reset during the admin status task */ 5309 if (reset_type == ICE_RESET_PFR) { 5310 ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); 5311 return (0); 5312 } 5313 5314 /* 5315 * Other types of resets including CORE and GLOBAL resets trigger an 5316 * interrupt on all PFs. Initiate the reset now. Preparation and 5317 * rebuild logic will be handled by the admin status task. 5318 */ 5319 status = ice_reset(hw, reset_type); 5320 5321 /* 5322 * Resets can take a long time and we still don't want another call 5323 * to this function before we settle down. 5324 */ 5325 rl_sysctl_ticks = ticks; 5326 5327 if (status) { 5328 device_printf(sc->dev, "failed to initiate device reset, err %s\n", 5329 ice_status_str(status)); 5330 ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); 5331 return (EFAULT); 5332 } 5333 5334 return (0); 5335 } 5336 5337 /** 5338 * ice_add_debug_sysctls - Add sysctls helpful for debugging the device driver 5339 * @sc: device private structure 5340 * 5341 * Add sysctls related to debugging the device driver. Generally these should 5342 * simply be sysctls which dump internal driver state, to aid in understanding 5343 * what the driver is doing. 5344 */ 5345 static void 5346 ice_add_debug_sysctls(struct ice_softc *sc) 5347 { 5348 struct sysctl_oid *sw_node; 5349 struct sysctl_oid_list *debug_list, *sw_list; 5350 device_t dev = sc->dev; 5351 5352 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 5353 5354 debug_list = SYSCTL_CHILDREN(sc->debug_sysctls); 5355 5356 SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "request_reset", 5357 CTLTYPE_STRING | CTLFLAG_WR, sc, 0, 5358 ice_sysctl_request_reset, "A", 5359 ICE_SYSCTL_HELP_REQUEST_RESET); 5360 5361 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "pfr_count", CTLFLAG_RD, 5362 &sc->soft_stats.pfr_count, 0, "# of PF resets handled"); 5363 5364 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "corer_count", CTLFLAG_RD, 5365 &sc->soft_stats.corer_count, 0, "# of CORE resets handled"); 5366 5367 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "globr_count", CTLFLAG_RD, 5368 &sc->soft_stats.globr_count, 0, "# of Global resets handled"); 5369 5370 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "empr_count", CTLFLAG_RD, 5371 &sc->soft_stats.empr_count, 0, "# of EMP resets handled"); 5372 5373 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "tx_mdd_count", CTLFLAG_RD, 5374 &sc->soft_stats.tx_mdd_count, 0, "# of Tx MDD events detected"); 5375 5376 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "rx_mdd_count", CTLFLAG_RD, 5377 &sc->soft_stats.rx_mdd_count, 0, "# of Rx MDD events detected"); 5378 5379 SYSCTL_ADD_PROC(ctx, debug_list, 5380 OID_AUTO, "state", CTLTYPE_STRING | CTLFLAG_RD, 5381 sc, 0, ice_sysctl_dump_state_flags, "A", "Driver State Flags"); 5382 5383 SYSCTL_ADD_PROC(ctx, debug_list, 5384 OID_AUTO, "phy_type_low", CTLTYPE_U64 | CTLFLAG_RW, 5385 sc, 0, ice_sysctl_phy_type_low, "QU", 5386 "PHY type Low from Get PHY Caps/Set PHY Cfg"); 5387 5388 SYSCTL_ADD_PROC(ctx, debug_list, 5389 OID_AUTO, "phy_type_high", CTLTYPE_U64 | CTLFLAG_RW, 5390 sc, 0, ice_sysctl_phy_type_high, "QU", 5391 "PHY type High from Get PHY Caps/Set PHY Cfg"); 5392 5393 SYSCTL_ADD_PROC(ctx, debug_list, 5394 OID_AUTO, "phy_sw_caps", CTLTYPE_STRUCT | CTLFLAG_RD, 5395 sc, 0, ice_sysctl_phy_sw_caps, "", 5396 "Get PHY Capabilities (Software configuration)"); 5397 5398 SYSCTL_ADD_PROC(ctx, debug_list, 5399 OID_AUTO, "phy_nvm_caps", CTLTYPE_STRUCT | CTLFLAG_RD, 5400 sc, 0, ice_sysctl_phy_nvm_caps, "", 5401 "Get PHY Capabilities (NVM configuration)"); 5402 5403 SYSCTL_ADD_PROC(ctx, debug_list, 5404 OID_AUTO, "phy_topo_caps", CTLTYPE_STRUCT | CTLFLAG_RD, 5405 sc, 0, ice_sysctl_phy_topo_caps, "", 5406 "Get PHY Capabilities (Topology configuration)"); 5407 5408 SYSCTL_ADD_PROC(ctx, debug_list, 5409 OID_AUTO, "phy_link_status", CTLTYPE_STRUCT | CTLFLAG_RD, 5410 sc, 0, ice_sysctl_phy_link_status, "", 5411 "Get PHY Link Status"); 5412 5413 SYSCTL_ADD_PROC(ctx, debug_list, 5414 OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD, 5415 sc, 0, ice_sysctl_read_i2c_diag_data, "A", 5416 "Dump selected diagnostic data from FW"); 5417 5418 SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "fw_build", CTLFLAG_RD, 5419 &sc->hw.fw_build, 0, "FW Build ID"); 5420 5421 SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "os_ddp_version", CTLTYPE_STRING | CTLFLAG_RD, 5422 sc, 0, ice_sysctl_os_pkg_version, "A", 5423 "DDP package name and version found in ice_ddp"); 5424 5425 SYSCTL_ADD_PROC(ctx, debug_list, 5426 OID_AUTO, "cur_lldp_persist_status", CTLTYPE_STRING | CTLFLAG_RD, 5427 sc, 0, ice_sysctl_fw_cur_lldp_persist_status, "A", "Current LLDP persistent status"); 5428 5429 SYSCTL_ADD_PROC(ctx, debug_list, 5430 OID_AUTO, "dflt_lldp_persist_status", CTLTYPE_STRING | CTLFLAG_RD, 5431 sc, 0, ice_sysctl_fw_dflt_lldp_persist_status, "A", "Default LLDP persistent status"); 5432 5433 SYSCTL_ADD_PROC(ctx, debug_list, 5434 OID_AUTO, "negotiated_fc", CTLTYPE_STRING | CTLFLAG_RD, 5435 sc, 0, ice_sysctl_negotiated_fc, "A", "Current Negotiated Flow Control mode"); 5436 5437 sw_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "switch", 5438 CTLFLAG_RD, NULL, "Switch Configuration"); 5439 sw_list = SYSCTL_CHILDREN(sw_node); 5440 5441 SYSCTL_ADD_PROC(ctx, sw_list, 5442 OID_AUTO, "mac_filters", CTLTYPE_STRING | CTLFLAG_RD, 5443 sc, 0, ice_sysctl_dump_mac_filters, "A", "MAC Filters"); 5444 5445 SYSCTL_ADD_PROC(ctx, sw_list, 5446 OID_AUTO, "vlan_filters", CTLTYPE_STRING | CTLFLAG_RD, 5447 sc, 0, ice_sysctl_dump_vlan_filters, "A", "VLAN Filters"); 5448 5449 SYSCTL_ADD_PROC(ctx, sw_list, 5450 OID_AUTO, "ethertype_filters", CTLTYPE_STRING | CTLFLAG_RD, 5451 sc, 0, ice_sysctl_dump_ethertype_filters, "A", "Ethertype Filters"); 5452 5453 SYSCTL_ADD_PROC(ctx, sw_list, 5454 OID_AUTO, "ethertype_mac_filters", CTLTYPE_STRING | CTLFLAG_RD, 5455 sc, 0, ice_sysctl_dump_ethertype_mac_filters, "A", "Ethertype/MAC Filters"); 5456 5457 } 5458 5459 /** 5460 * ice_vsi_disable_tx - Disable (unconfigure) Tx queues for a VSI 5461 * @vsi: the VSI to disable 5462 * 5463 * Disables the Tx queues associated with this VSI. Essentially the opposite 5464 * of ice_cfg_vsi_for_tx. 5465 */ 5466 int 5467 ice_vsi_disable_tx(struct ice_vsi *vsi) 5468 { 5469 struct ice_softc *sc = vsi->sc; 5470 struct ice_hw *hw = &sc->hw; 5471 enum ice_status status; 5472 u32 *q_teids; 5473 u16 *q_ids, *q_handles; 5474 int i, err = 0; 5475 5476 if (vsi->num_tx_queues > 255) 5477 return (ENOSYS); 5478 5479 q_teids = (u32 *)malloc(sizeof(*q_teids) * vsi->num_tx_queues, 5480 M_ICE, M_NOWAIT|M_ZERO); 5481 if (!q_teids) 5482 return (ENOMEM); 5483 5484 q_ids = (u16 *)malloc(sizeof(*q_ids) * vsi->num_tx_queues, 5485 M_ICE, M_NOWAIT|M_ZERO); 5486 if (!q_ids) { 5487 err = (ENOMEM); 5488 goto free_q_teids; 5489 } 5490 5491 q_handles = (u16 *)malloc(sizeof(*q_handles) * vsi->num_tx_queues, 5492 M_ICE, M_NOWAIT|M_ZERO); 5493 if (!q_handles) { 5494 err = (ENOMEM); 5495 goto free_q_ids; 5496 } 5497 5498 5499 for (i = 0; i < vsi->num_tx_queues; i++) { 5500 struct ice_tx_queue *txq = &vsi->tx_queues[i]; 5501 5502 q_ids[i] = vsi->tx_qmap[i]; 5503 q_handles[i] = i; 5504 q_teids[i] = txq->q_teid; 5505 } 5506 5507 status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, vsi->num_tx_queues, 5508 q_handles, q_ids, q_teids, ICE_NO_RESET, 0, NULL); 5509 if (status == ICE_ERR_DOES_NOT_EXIST) { 5510 ; /* Queues have already been disabled, no need to report this as an error */ 5511 } else if (status == ICE_ERR_RESET_ONGOING) { 5512 device_printf(sc->dev, 5513 "Reset in progress. LAN Tx queues already disabled\n"); 5514 } else if (status) { 5515 device_printf(sc->dev, 5516 "Failed to disable LAN Tx queues: err %s aq_err %s\n", 5517 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5518 err = (ENODEV); 5519 } 5520 5521 /* free_q_handles: */ 5522 free(q_handles, M_ICE); 5523 free_q_ids: 5524 free(q_ids, M_ICE); 5525 free_q_teids: 5526 free(q_teids, M_ICE); 5527 5528 return err; 5529 } 5530 5531 /** 5532 * ice_vsi_set_rss_params - Set the RSS parameters for the VSI 5533 * @vsi: the VSI to configure 5534 * 5535 * Sets the RSS table size and lookup table type for the VSI based on its 5536 * VSI type. 5537 */ 5538 static void 5539 ice_vsi_set_rss_params(struct ice_vsi *vsi) 5540 { 5541 struct ice_softc *sc = vsi->sc; 5542 struct ice_hw_common_caps *cap; 5543 5544 cap = &sc->hw.func_caps.common_cap; 5545 5546 switch (vsi->type) { 5547 case ICE_VSI_PF: 5548 /* The PF VSI inherits RSS instance of the PF */ 5549 vsi->rss_table_size = cap->rss_table_size; 5550 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; 5551 break; 5552 case ICE_VSI_VF: 5553 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; 5554 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; 5555 break; 5556 default: 5557 device_printf(sc->dev, 5558 "VSI %d: RSS not supported for VSI type %d\n", 5559 vsi->idx, vsi->type); 5560 break; 5561 } 5562 } 5563 5564 /** 5565 * ice_vsi_add_txqs_ctx - Create a sysctl context and node to store txq sysctls 5566 * @vsi: The VSI to add the context for 5567 * 5568 * Creates a sysctl context for storing txq sysctls. Additionally creates 5569 * a node rooted at the given VSI's main sysctl node. This context will be 5570 * used to store per-txq sysctls which may need to be released during the 5571 * driver's lifetime. 5572 */ 5573 void 5574 ice_vsi_add_txqs_ctx(struct ice_vsi *vsi) 5575 { 5576 struct sysctl_oid_list *vsi_list; 5577 5578 sysctl_ctx_init(&vsi->txqs_ctx); 5579 5580 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 5581 5582 vsi->txqs_node = SYSCTL_ADD_NODE(&vsi->txqs_ctx, vsi_list, OID_AUTO, "txqs", 5583 CTLFLAG_RD, NULL, "Tx Queues"); 5584 } 5585 5586 /** 5587 * ice_vsi_add_rxqs_ctx - Create a sysctl context and node to store rxq sysctls 5588 * @vsi: The VSI to add the context for 5589 * 5590 * Creates a sysctl context for storing rxq sysctls. Additionally creates 5591 * a node rooted at the given VSI's main sysctl node. This context will be 5592 * used to store per-rxq sysctls which may need to be released during the 5593 * driver's lifetime. 5594 */ 5595 void 5596 ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi) 5597 { 5598 struct sysctl_oid_list *vsi_list; 5599 5600 sysctl_ctx_init(&vsi->rxqs_ctx); 5601 5602 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 5603 5604 vsi->rxqs_node = SYSCTL_ADD_NODE(&vsi->rxqs_ctx, vsi_list, OID_AUTO, "rxqs", 5605 CTLFLAG_RD, NULL, "Rx Queues"); 5606 } 5607 5608 /** 5609 * ice_vsi_del_txqs_ctx - Delete the Tx queue sysctl context for this VSI 5610 * @vsi: The VSI to delete from 5611 * 5612 * Frees the txq sysctl context created for storing the per-queue Tx sysctls. 5613 * Must be called prior to freeing the Tx queue memory, in order to avoid 5614 * having sysctls point at stale memory. 5615 */ 5616 void 5617 ice_vsi_del_txqs_ctx(struct ice_vsi *vsi) 5618 { 5619 device_t dev = vsi->sc->dev; 5620 int err; 5621 5622 if (vsi->txqs_node) { 5623 err = sysctl_ctx_free(&vsi->txqs_ctx); 5624 if (err) 5625 device_printf(dev, "failed to free VSI %d txqs_ctx, err %s\n", 5626 vsi->idx, ice_err_str(err)); 5627 vsi->txqs_node = NULL; 5628 } 5629 } 5630 5631 /** 5632 * ice_vsi_del_rxqs_ctx - Delete the Rx queue sysctl context for this VSI 5633 * @vsi: The VSI to delete from 5634 * 5635 * Frees the rxq sysctl context created for storing the per-queue Rx sysctls. 5636 * Must be called prior to freeing the Rx queue memory, in order to avoid 5637 * having sysctls point at stale memory. 5638 */ 5639 void 5640 ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi) 5641 { 5642 device_t dev = vsi->sc->dev; 5643 int err; 5644 5645 if (vsi->rxqs_node) { 5646 err = sysctl_ctx_free(&vsi->rxqs_ctx); 5647 if (err) 5648 device_printf(dev, "failed to free VSI %d rxqs_ctx, err %s\n", 5649 vsi->idx, ice_err_str(err)); 5650 vsi->rxqs_node = NULL; 5651 } 5652 } 5653 5654 /** 5655 * ice_add_txq_sysctls - Add per-queue sysctls for a Tx queue 5656 * @txq: pointer to the Tx queue 5657 * 5658 * Add per-queue sysctls for a given Tx queue. Can't be called during 5659 * ice_add_vsi_sysctls, since the queue memory has not yet been setup. 5660 */ 5661 void 5662 ice_add_txq_sysctls(struct ice_tx_queue *txq) 5663 { 5664 struct ice_vsi *vsi = txq->vsi; 5665 struct sysctl_ctx_list *ctx = &vsi->txqs_ctx; 5666 struct sysctl_oid_list *txqs_list, *this_txq_list; 5667 struct sysctl_oid *txq_node; 5668 char txq_name[32], txq_desc[32]; 5669 5670 const struct ice_sysctl_info ctls[] = { 5671 { &txq->stats.tx_packets, "tx_packets", "Queue Packets Transmitted" }, 5672 { &txq->stats.tx_bytes, "tx_bytes", "Queue Bytes Transmitted" }, 5673 { &txq->stats.mss_too_small, "mss_too_small", "TSO sends with an MSS less than 64" }, 5674 { 0, 0, 0 } 5675 }; 5676 5677 const struct ice_sysctl_info *entry = ctls; 5678 5679 txqs_list = SYSCTL_CHILDREN(vsi->txqs_node); 5680 5681 snprintf(txq_name, sizeof(txq_name), "%u", txq->me); 5682 snprintf(txq_desc, sizeof(txq_desc), "Tx Queue %u", txq->me); 5683 txq_node = SYSCTL_ADD_NODE(ctx, txqs_list, OID_AUTO, txq_name, 5684 CTLFLAG_RD, NULL, txq_desc); 5685 this_txq_list = SYSCTL_CHILDREN(txq_node); 5686 5687 /* Add the Tx queue statistics */ 5688 while (entry->stat != 0) { 5689 SYSCTL_ADD_U64(ctx, this_txq_list, OID_AUTO, entry->name, 5690 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 5691 entry->description); 5692 entry++; 5693 } 5694 } 5695 5696 /** 5697 * ice_add_rxq_sysctls - Add per-queue sysctls for an Rx queue 5698 * @rxq: pointer to the Rx queue 5699 * 5700 * Add per-queue sysctls for a given Rx queue. Can't be called during 5701 * ice_add_vsi_sysctls, since the queue memory has not yet been setup. 5702 */ 5703 void 5704 ice_add_rxq_sysctls(struct ice_rx_queue *rxq) 5705 { 5706 struct ice_vsi *vsi = rxq->vsi; 5707 struct sysctl_ctx_list *ctx = &vsi->rxqs_ctx; 5708 struct sysctl_oid_list *rxqs_list, *this_rxq_list; 5709 struct sysctl_oid *rxq_node; 5710 char rxq_name[32], rxq_desc[32]; 5711 5712 const struct ice_sysctl_info ctls[] = { 5713 { &rxq->stats.rx_packets, "rx_packets", "Queue Packets Received" }, 5714 { &rxq->stats.rx_bytes, "rx_bytes", "Queue Bytes Received" }, 5715 { &rxq->stats.desc_errs, "rx_desc_errs", "Queue Rx Descriptor Errors" }, 5716 { 0, 0, 0 } 5717 }; 5718 5719 const struct ice_sysctl_info *entry = ctls; 5720 5721 rxqs_list = SYSCTL_CHILDREN(vsi->rxqs_node); 5722 5723 snprintf(rxq_name, sizeof(rxq_name), "%u", rxq->me); 5724 snprintf(rxq_desc, sizeof(rxq_desc), "Rx Queue %u", rxq->me); 5725 rxq_node = SYSCTL_ADD_NODE(ctx, rxqs_list, OID_AUTO, rxq_name, 5726 CTLFLAG_RD, NULL, rxq_desc); 5727 this_rxq_list = SYSCTL_CHILDREN(rxq_node); 5728 5729 /* Add the Rx queue statistics */ 5730 while (entry->stat != 0) { 5731 SYSCTL_ADD_U64(ctx, this_rxq_list, OID_AUTO, entry->name, 5732 CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, 5733 entry->description); 5734 entry++; 5735 } 5736 } 5737 5738 /** 5739 * ice_get_default_rss_key - Obtain a default RSS key 5740 * @seed: storage for the RSS key data 5741 * 5742 * Copies a pre-generated RSS key into the seed memory. The seed pointer must 5743 * point to a block of memory that is at least 40 bytes in size. 5744 * 5745 * The key isn't randomly generated each time this function is called because 5746 * that makes the RSS key change every time we reconfigure RSS. This does mean 5747 * that we're hard coding a possibly 'well known' key. We might want to 5748 * investigate randomly generating this key once during the first call. 5749 */ 5750 static void 5751 ice_get_default_rss_key(u8 *seed) 5752 { 5753 const u8 default_seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE] = { 5754 0x39, 0xed, 0xff, 0x4d, 0x43, 0x58, 0x42, 0xc3, 0x5f, 0xb8, 5755 0xa5, 0x32, 0x95, 0x65, 0x81, 0xcd, 0x36, 0x79, 0x71, 0x97, 5756 0xde, 0xa4, 0x41, 0x40, 0x6f, 0x27, 0xe9, 0x81, 0x13, 0xa0, 5757 0x95, 0x93, 0x5b, 0x1e, 0x9d, 0x27, 0x9d, 0x24, 0x84, 0xb5, 5758 }; 5759 5760 bcopy(default_seed, seed, ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); 5761 } 5762 5763 /** 5764 * ice_set_rss_key - Configure a given VSI with the default RSS key 5765 * @vsi: the VSI to configure 5766 * 5767 * Program the hardware RSS key. We use rss_getkey to grab the kernel RSS key. 5768 * If the kernel RSS interface is not available, this will fall back to our 5769 * pre-generated hash seed from ice_get_default_rss_key(). 5770 */ 5771 static int 5772 ice_set_rss_key(struct ice_vsi *vsi) 5773 { 5774 struct ice_aqc_get_set_rss_keys keydata = { .standard_rss_key = {0} }; 5775 struct ice_softc *sc = vsi->sc; 5776 struct ice_hw *hw = &sc->hw; 5777 enum ice_status status; 5778 5779 /* 5780 * If the RSS kernel interface is disabled, this will return the 5781 * default RSS key above. 5782 */ 5783 rss_getkey(keydata.standard_rss_key); 5784 5785 status = ice_aq_set_rss_key(hw, vsi->idx, &keydata); 5786 if (status) { 5787 device_printf(sc->dev, 5788 "ice_aq_set_rss_key status %s, error %s\n", 5789 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5790 return (EIO); 5791 } 5792 5793 return (0); 5794 } 5795 5796 /** 5797 * ice_set_rss_flow_flds - Program the RSS hash flows after package init 5798 * @vsi: the VSI to configure 5799 * 5800 * If the package file is initialized, the default RSS flows are reset. We 5801 * need to reprogram the expected hash configuration. We'll use 5802 * rss_gethashconfig() to determine which flows to enable. If RSS kernel 5803 * support is not enabled, this macro will fall back to suitable defaults. 5804 */ 5805 static void 5806 ice_set_rss_flow_flds(struct ice_vsi *vsi) 5807 { 5808 struct ice_softc *sc = vsi->sc; 5809 struct ice_hw *hw = &sc->hw; 5810 device_t dev = sc->dev; 5811 enum ice_status status; 5812 u_int rss_hash_config; 5813 5814 rss_hash_config = rss_gethashconfig(); 5815 5816 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) { 5817 status = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, 5818 ICE_FLOW_SEG_HDR_IPV4); 5819 if (status) 5820 device_printf(dev, 5821 "ice_add_rss_cfg on VSI %d failed for ipv4 flow, err %s aq_err %s\n", 5822 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5823 } 5824 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) { 5825 status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, 5826 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4); 5827 if (status) 5828 device_printf(dev, 5829 "ice_add_rss_cfg on VSI %d failed for tcp4 flow, err %s aq_err %s\n", 5830 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5831 } 5832 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) { 5833 status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, 5834 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); 5835 if (status) 5836 device_printf(dev, 5837 "ice_add_rss_cfg on VSI %d failed for udp4 flow, err %s aq_err %s\n", 5838 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5839 } 5840 if (rss_hash_config & (RSS_HASHTYPE_RSS_IPV6 | RSS_HASHTYPE_RSS_IPV6_EX)) { 5841 status = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, 5842 ICE_FLOW_SEG_HDR_IPV6); 5843 if (status) 5844 device_printf(dev, 5845 "ice_add_rss_cfg on VSI %d failed for ipv6 flow, err %s aq_err %s\n", 5846 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5847 } 5848 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) { 5849 status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, 5850 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6); 5851 if (status) 5852 device_printf(dev, 5853 "ice_add_rss_cfg on VSI %d failed for tcp6 flow, err %s aq_err %s\n", 5854 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5855 } 5856 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) { 5857 status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, 5858 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6); 5859 if (status) 5860 device_printf(dev, 5861 "ice_add_rss_cfg on VSI %d failed for udp6 flow, err %s aq_err %s\n", 5862 vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5863 } 5864 5865 /* Warn about RSS hash types which are not supported */ 5866 /* coverity[dead_error_condition] */ 5867 if (rss_hash_config & ~ICE_DEFAULT_RSS_HASH_CONFIG) { 5868 device_printf(dev, 5869 "ice_add_rss_cfg on VSI %d could not configure every requested hash type\n", 5870 vsi->idx); 5871 } 5872 } 5873 5874 /** 5875 * ice_set_rss_lut - Program the RSS lookup table for a VSI 5876 * @vsi: the VSI to configure 5877 * 5878 * Programs the RSS lookup table for a given VSI. We use 5879 * rss_get_indirection_to_bucket which will use the indirection table provided 5880 * by the kernel RSS interface when available. If the kernel RSS interface is 5881 * not available, we will fall back to a simple round-robin fashion queue 5882 * assignment. 5883 */ 5884 static int 5885 ice_set_rss_lut(struct ice_vsi *vsi) 5886 { 5887 struct ice_softc *sc = vsi->sc; 5888 struct ice_hw *hw = &sc->hw; 5889 device_t dev = sc->dev; 5890 enum ice_status status; 5891 int i, err = 0; 5892 u8 *lut; 5893 5894 lut = (u8 *)malloc(vsi->rss_table_size, M_ICE, M_NOWAIT|M_ZERO); 5895 if (!lut) { 5896 device_printf(dev, "Failed to allocate RSS lut memory\n"); 5897 return (ENOMEM); 5898 } 5899 5900 /* Populate the LUT with max no. of queues. If the RSS kernel 5901 * interface is disabled, this will assign the lookup table in 5902 * a simple round robin fashion 5903 */ 5904 for (i = 0; i < vsi->rss_table_size; i++) { 5905 /* XXX: this needs to be changed if num_rx_queues ever counts 5906 * more than just the RSS queues */ 5907 lut[i] = rss_get_indirection_to_bucket(i) % vsi->num_rx_queues; 5908 } 5909 5910 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, 5911 lut, vsi->rss_table_size); 5912 if (status) { 5913 device_printf(dev, 5914 "Cannot set RSS lut, err %s aq_err %s\n", 5915 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 5916 err = (EIO); 5917 } 5918 5919 free(lut, M_ICE); 5920 return err; 5921 } 5922 5923 /** 5924 * ice_config_rss - Configure RSS for a VSI 5925 * @vsi: the VSI to configure 5926 * 5927 * If FEATURE_RSS is enabled, configures the RSS lookup table and hash key for 5928 * a given VSI. 5929 */ 5930 int 5931 ice_config_rss(struct ice_vsi *vsi) 5932 { 5933 int err; 5934 5935 /* Nothing to do, if RSS is not enabled */ 5936 if (!ice_is_bit_set(vsi->sc->feat_en, ICE_FEATURE_RSS)) 5937 return 0; 5938 5939 err = ice_set_rss_key(vsi); 5940 if (err) 5941 return err; 5942 5943 ice_set_rss_flow_flds(vsi); 5944 5945 return ice_set_rss_lut(vsi); 5946 } 5947 5948 /** 5949 * ice_log_pkg_init - Log a message about status of DDP initialization 5950 * @sc: the device softc pointer 5951 * @pkg_status: the status result of ice_copy_and_init_pkg 5952 * 5953 * Called by ice_load_pkg after an attempt to download the DDP package 5954 * contents to the device. Determines whether the download was successful or 5955 * not and logs an appropriate message for the system administrator. 5956 * 5957 * @post if a DDP package was previously downloaded on another port and it 5958 * is not compatible with this driver, pkg_status will be updated to reflect 5959 * this, and the driver will transition to safe mode. 5960 */ 5961 void 5962 ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status) 5963 { 5964 struct ice_hw *hw = &sc->hw; 5965 device_t dev = sc->dev; 5966 struct sbuf *active_pkg, *os_pkg; 5967 5968 active_pkg = sbuf_new_auto(); 5969 ice_active_pkg_version_str(hw, active_pkg); 5970 sbuf_finish(active_pkg); 5971 5972 os_pkg = sbuf_new_auto(); 5973 ice_os_pkg_version_str(hw, os_pkg); 5974 sbuf_finish(os_pkg); 5975 5976 switch (*pkg_status) { 5977 case ICE_SUCCESS: 5978 /* The package download AdminQ command returned success because 5979 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is 5980 * already a package loaded on the device. 5981 */ 5982 if (hw->pkg_ver.major == hw->active_pkg_ver.major && 5983 hw->pkg_ver.minor == hw->active_pkg_ver.minor && 5984 hw->pkg_ver.update == hw->active_pkg_ver.update && 5985 hw->pkg_ver.draft == hw->active_pkg_ver.draft && 5986 !memcmp(hw->pkg_name, hw->active_pkg_name, 5987 sizeof(hw->pkg_name))) { 5988 switch (hw->pkg_dwnld_status) { 5989 case ICE_AQ_RC_OK: 5990 device_printf(dev, 5991 "The DDP package was successfully loaded: %s.\n", 5992 sbuf_data(active_pkg)); 5993 break; 5994 case ICE_AQ_RC_EEXIST: 5995 device_printf(dev, 5996 "DDP package already present on device: %s.\n", 5997 sbuf_data(active_pkg)); 5998 break; 5999 default: 6000 /* We do not expect this to occur, but the 6001 * extra messaging is here in case something 6002 * changes in the ice_init_pkg flow. 6003 */ 6004 device_printf(dev, 6005 "DDP package already present on device: %s. An unexpected error occurred, pkg_dwnld_status %s.\n", 6006 sbuf_data(active_pkg), 6007 ice_aq_str(hw->pkg_dwnld_status)); 6008 break; 6009 } 6010 } else if (pkg_ver_compatible(&hw->active_pkg_ver) == 0) { 6011 device_printf(dev, 6012 "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package %s. The ice_ddp module has package: %s.\n", 6013 sbuf_data(active_pkg), 6014 sbuf_data(os_pkg)); 6015 } else if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) { 6016 device_printf(dev, 6017 "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6018 sbuf_data(active_pkg), 6019 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6020 *pkg_status = ICE_ERR_NOT_SUPPORTED; 6021 } else { 6022 device_printf(dev, 6023 "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6024 sbuf_data(active_pkg), 6025 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6026 *pkg_status = ICE_ERR_NOT_SUPPORTED; 6027 } 6028 break; 6029 case ICE_ERR_NOT_SUPPORTED: 6030 /* 6031 * This assumes that the active_pkg_ver will not be 6032 * initialized if the ice_ddp package version is not 6033 * supported. 6034 */ 6035 if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) { 6036 /* The ice_ddp version is not supported */ 6037 if (pkg_ver_compatible(&hw->pkg_ver) > 0) { 6038 device_printf(dev, 6039 "The DDP package in the ice_ddp module is higher than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated driver. Entering Safe Mode.\n", 6040 sbuf_data(os_pkg), 6041 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6042 } else if (pkg_ver_compatible(&hw->pkg_ver) < 0) { 6043 device_printf(dev, 6044 "The DDP package in the ice_ddp module is lower than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated ice_ddp module. Entering Safe Mode.\n", 6045 sbuf_data(os_pkg), 6046 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6047 } else { 6048 device_printf(dev, 6049 "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6050 ice_status_str(*pkg_status), 6051 ice_aq_str(hw->pkg_dwnld_status), 6052 sbuf_data(os_pkg), 6053 sbuf_data(active_pkg), 6054 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6055 } 6056 } else { 6057 if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) { 6058 device_printf(dev, 6059 "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6060 sbuf_data(active_pkg), 6061 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6062 } else if (pkg_ver_compatible(&hw->active_pkg_ver) < 0) { 6063 device_printf(dev, 6064 "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6065 sbuf_data(active_pkg), 6066 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6067 } else { 6068 device_printf(dev, 6069 "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", 6070 ice_status_str(*pkg_status), 6071 ice_aq_str(hw->pkg_dwnld_status), 6072 sbuf_data(os_pkg), 6073 sbuf_data(active_pkg), 6074 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); 6075 } 6076 } 6077 break; 6078 case ICE_ERR_CFG: 6079 case ICE_ERR_BUF_TOO_SHORT: 6080 case ICE_ERR_PARAM: 6081 device_printf(dev, 6082 "The DDP package in the ice_ddp module is invalid. Entering Safe Mode\n"); 6083 break; 6084 case ICE_ERR_FW_DDP_MISMATCH: 6085 device_printf(dev, 6086 "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); 6087 break; 6088 case ICE_ERR_AQ_ERROR: 6089 switch (hw->pkg_dwnld_status) { 6090 case ICE_AQ_RC_ENOSEC: 6091 case ICE_AQ_RC_EBADSIG: 6092 device_printf(dev, 6093 "The DDP package in the ice_ddp module cannot be loaded because its signature is not valid. Please use a valid ice_ddp module. Entering Safe Mode.\n"); 6094 goto free_sbufs; 6095 case ICE_AQ_RC_ESVN: 6096 device_printf(dev, 6097 "The DDP package in the ice_ddp module could not be loaded because its security revision is too low. Please use an updated ice_ddp module. Entering Safe Mode.\n"); 6098 goto free_sbufs; 6099 case ICE_AQ_RC_EBADMAN: 6100 case ICE_AQ_RC_EBADBUF: 6101 device_printf(dev, 6102 "An error occurred on the device while loading the DDP package. Entering Safe Mode.\n"); 6103 goto free_sbufs; 6104 default: 6105 break; 6106 } 6107 /* fall-through */ 6108 default: 6109 device_printf(dev, 6110 "An unknown error (%s aq_err %s) occurred when loading the DDP package. Entering Safe Mode.\n", 6111 ice_status_str(*pkg_status), 6112 ice_aq_str(hw->pkg_dwnld_status)); 6113 break; 6114 } 6115 6116 free_sbufs: 6117 sbuf_delete(active_pkg); 6118 sbuf_delete(os_pkg); 6119 } 6120 6121 /** 6122 * ice_load_pkg_file - Load the DDP package file using firmware_get 6123 * @sc: device private softc 6124 * 6125 * Use firmware_get to load the DDP package memory and then request that 6126 * firmware download the package contents and program the relevant hardware 6127 * bits. 6128 * 6129 * This function makes a copy of the DDP package memory which is tracked in 6130 * the ice_hw structure. The copy will be managed and released by 6131 * ice_deinit_hw(). This allows the firmware reference to be immediately 6132 * released using firmware_put. 6133 */ 6134 void 6135 ice_load_pkg_file(struct ice_softc *sc) 6136 { 6137 struct ice_hw *hw = &sc->hw; 6138 device_t dev = sc->dev; 6139 enum ice_status status; 6140 const struct firmware *pkg; 6141 6142 pkg = firmware_get("ice_ddp"); 6143 if (!pkg) { 6144 device_printf(dev, "The DDP package module (ice_ddp) failed to load or could not be found. Entering Safe Mode.\n"); 6145 if (cold) 6146 device_printf(dev, 6147 "The DDP package module cannot be automatically loaded while booting. You may want to specify ice_ddp_load=\"YES\" in your loader.conf\n"); 6148 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); 6149 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); 6150 return; 6151 } 6152 6153 /* Copy and download the pkg contents */ 6154 status = ice_copy_and_init_pkg(hw, (const u8 *)pkg->data, pkg->datasize); 6155 6156 /* Release the firmware reference */ 6157 firmware_put(pkg, FIRMWARE_UNLOAD); 6158 6159 /* Check the active DDP package version and log a message */ 6160 ice_log_pkg_init(sc, &status); 6161 6162 /* Place the driver into safe mode */ 6163 if (status != ICE_SUCCESS) { 6164 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); 6165 ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); 6166 } 6167 } 6168 6169 /** 6170 * ice_get_ifnet_counter - Retrieve counter value for a given ifnet counter 6171 * @vsi: the vsi to retrieve the value for 6172 * @counter: the counter type to retrieve 6173 * 6174 * Returns the value for a given ifnet counter. To do so, we calculate the 6175 * value based on the matching hardware statistics. 6176 */ 6177 uint64_t 6178 ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter) 6179 { 6180 struct ice_hw_port_stats *hs = &vsi->sc->stats.cur; 6181 struct ice_eth_stats *es = &vsi->hw_stats.cur; 6182 6183 /* For some statistics, especially those related to error flows, we do 6184 * not have per-VSI counters. In this case, we just report the global 6185 * counters. 6186 */ 6187 6188 switch (counter) { 6189 case IFCOUNTER_IPACKETS: 6190 return (es->rx_unicast + es->rx_multicast + es->rx_broadcast); 6191 case IFCOUNTER_IERRORS: 6192 return (hs->crc_errors + hs->illegal_bytes + 6193 hs->mac_local_faults + hs->mac_remote_faults + 6194 hs->rx_len_errors + hs->rx_undersize + 6195 hs->rx_oversize + hs->rx_fragments + hs->rx_jabber); 6196 case IFCOUNTER_OPACKETS: 6197 return (es->tx_unicast + es->tx_multicast + es->tx_broadcast); 6198 case IFCOUNTER_OERRORS: 6199 return (es->tx_errors); 6200 case IFCOUNTER_COLLISIONS: 6201 return (0); 6202 case IFCOUNTER_IBYTES: 6203 return (es->rx_bytes); 6204 case IFCOUNTER_OBYTES: 6205 return (es->tx_bytes); 6206 case IFCOUNTER_IMCASTS: 6207 return (es->rx_multicast); 6208 case IFCOUNTER_OMCASTS: 6209 return (es->tx_multicast); 6210 case IFCOUNTER_IQDROPS: 6211 return (es->rx_discards); 6212 case IFCOUNTER_OQDROPS: 6213 return (hs->tx_dropped_link_down); 6214 case IFCOUNTER_NOPROTO: 6215 return (es->rx_unknown_protocol); 6216 default: 6217 return if_get_counter_default(vsi->sc->ifp, counter); 6218 } 6219 } 6220 6221 /** 6222 * ice_save_pci_info - Save PCI configuration fields in HW struct 6223 * @hw: the ice_hw struct to save the PCI information in 6224 * @dev: the device to get the PCI information from 6225 * 6226 * This should only be called once, early in the device attach 6227 * process. 6228 */ 6229 void 6230 ice_save_pci_info(struct ice_hw *hw, device_t dev) 6231 { 6232 hw->vendor_id = pci_get_vendor(dev); 6233 hw->device_id = pci_get_device(dev); 6234 hw->subsystem_vendor_id = pci_get_subvendor(dev); 6235 hw->subsystem_device_id = pci_get_subdevice(dev); 6236 hw->revision_id = pci_get_revid(dev); 6237 hw->bus.device = pci_get_slot(dev); 6238 hw->bus.func = pci_get_function(dev); 6239 } 6240 6241 /** 6242 * ice_replay_all_vsi_cfg - Replace configuration for all VSIs after reset 6243 * @sc: the device softc 6244 * 6245 * Replace the configuration for each VSI, and then cleanup replay 6246 * information. Called after a hardware reset in order to reconfigure the 6247 * active VSIs. 6248 */ 6249 int 6250 ice_replay_all_vsi_cfg(struct ice_softc *sc) 6251 { 6252 struct ice_hw *hw = &sc->hw; 6253 enum ice_status status; 6254 int i; 6255 6256 for (i = 0 ; i < sc->num_available_vsi; i++) { 6257 struct ice_vsi *vsi = sc->all_vsi[i]; 6258 6259 if (!vsi) 6260 continue; 6261 6262 status = ice_replay_vsi(hw, vsi->idx); 6263 if (status) { 6264 device_printf(sc->dev, "Failed to replay VSI %d, err %s aq_err %s\n", 6265 vsi->idx, ice_status_str(status), 6266 ice_aq_str(hw->adminq.sq_last_status)); 6267 return (EIO); 6268 } 6269 } 6270 6271 /* Cleanup replay filters after successful reconfiguration */ 6272 ice_replay_post(hw); 6273 return (0); 6274 } 6275 6276 /** 6277 * ice_clean_vsi_rss_cfg - Cleanup RSS configuration for a given VSI 6278 * @vsi: pointer to the VSI structure 6279 * 6280 * Cleanup the advanced RSS configuration for a given VSI. This is necessary 6281 * during driver removal to ensure that all RSS resources are properly 6282 * released. 6283 * 6284 * @remark this function doesn't report an error as it is expected to be 6285 * called during driver reset and unload, and there isn't much the driver can 6286 * do if freeing RSS resources fails. 6287 */ 6288 static void 6289 ice_clean_vsi_rss_cfg(struct ice_vsi *vsi) 6290 { 6291 struct ice_softc *sc = vsi->sc; 6292 struct ice_hw *hw = &sc->hw; 6293 device_t dev = sc->dev; 6294 enum ice_status status; 6295 6296 status = ice_rem_vsi_rss_cfg(hw, vsi->idx); 6297 if (status) 6298 device_printf(dev, 6299 "Failed to remove RSS configuration for VSI %d, err %s\n", 6300 vsi->idx, ice_status_str(status)); 6301 6302 /* Remove this VSI from the RSS list */ 6303 ice_rem_vsi_rss_list(hw, vsi->idx); 6304 } 6305 6306 /** 6307 * ice_clean_all_vsi_rss_cfg - Cleanup RSS configuration for all VSIs 6308 * @sc: the device softc pointer 6309 * 6310 * Cleanup the advanced RSS configuration for all VSIs on a given PF 6311 * interface. 6312 * 6313 * @remark This should be called while preparing for a reset, to cleanup stale 6314 * RSS configuration for all VSIs. 6315 */ 6316 void 6317 ice_clean_all_vsi_rss_cfg(struct ice_softc *sc) 6318 { 6319 int i; 6320 6321 /* No need to cleanup if RSS is not enabled */ 6322 if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS)) 6323 return; 6324 6325 for (i = 0; i < sc->num_available_vsi; i++) { 6326 struct ice_vsi *vsi = sc->all_vsi[i]; 6327 6328 if (vsi) 6329 ice_clean_vsi_rss_cfg(vsi); 6330 } 6331 } 6332 6333 /** 6334 * ice_requested_fec_mode - Return the requested FEC mode as a string 6335 * @pi: The port info structure 6336 * 6337 * Return a string representing the requested FEC mode. 6338 */ 6339 static const char * 6340 ice_requested_fec_mode(struct ice_port_info *pi) 6341 { 6342 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 6343 enum ice_status status; 6344 6345 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, 6346 &pcaps, NULL); 6347 if (status) 6348 /* Just report unknown if we can't get capabilities */ 6349 return "Unknown"; 6350 6351 /* Check if RS-FEC has been requested first */ 6352 if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 6353 ICE_AQC_PHY_FEC_25G_RS_544_REQ)) 6354 return ice_fec_str(ICE_FEC_RS); 6355 6356 /* If RS FEC has not been requested, then check BASE-R */ 6357 if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 6358 ICE_AQC_PHY_FEC_25G_KR_REQ)) 6359 return ice_fec_str(ICE_FEC_BASER); 6360 6361 return ice_fec_str(ICE_FEC_NONE); 6362 } 6363 6364 /** 6365 * ice_negotiated_fec_mode - Return the negotiated FEC mode as a string 6366 * @pi: The port info structure 6367 * 6368 * Return a string representing the current FEC mode. 6369 */ 6370 static const char * 6371 ice_negotiated_fec_mode(struct ice_port_info *pi) 6372 { 6373 /* First, check if RS has been requested first */ 6374 if (pi->phy.link_info.fec_info & (ICE_AQ_LINK_25G_RS_528_FEC_EN | 6375 ICE_AQ_LINK_25G_RS_544_FEC_EN)) 6376 return ice_fec_str(ICE_FEC_RS); 6377 6378 /* If RS FEC has not been requested, then check BASE-R */ 6379 if (pi->phy.link_info.fec_info & ICE_AQ_LINK_25G_KR_FEC_EN) 6380 return ice_fec_str(ICE_FEC_BASER); 6381 6382 return ice_fec_str(ICE_FEC_NONE); 6383 } 6384 6385 /** 6386 * ice_autoneg_mode - Return string indicating of autoneg completed 6387 * @pi: The port info structure 6388 * 6389 * Return "True" if autonegotiation is completed, "False" otherwise. 6390 */ 6391 static const char * 6392 ice_autoneg_mode(struct ice_port_info *pi) 6393 { 6394 if (pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) 6395 return "True"; 6396 else 6397 return "False"; 6398 } 6399 6400 /** 6401 * ice_flowcontrol_mode - Return string indicating the Flow Control mode 6402 * @pi: The port info structure 6403 * 6404 * Returns the current Flow Control mode as a string. 6405 */ 6406 static const char * 6407 ice_flowcontrol_mode(struct ice_port_info *pi) 6408 { 6409 return ice_fc_str(pi->fc.current_mode); 6410 } 6411 6412 /** 6413 * ice_link_up_msg - Log a link up message with associated info 6414 * @sc: the device private softc 6415 * 6416 * Log a link up message with LOG_NOTICE message level. Include information 6417 * about the duplex, FEC mode, autonegotiation and flow control. 6418 */ 6419 void 6420 ice_link_up_msg(struct ice_softc *sc) 6421 { 6422 struct ice_hw *hw = &sc->hw; 6423 struct ifnet *ifp = sc->ifp; 6424 const char *speed, *req_fec, *neg_fec, *autoneg, *flowcontrol; 6425 6426 speed = ice_aq_speed_to_str(hw->port_info); 6427 req_fec = ice_requested_fec_mode(hw->port_info); 6428 neg_fec = ice_negotiated_fec_mode(hw->port_info); 6429 autoneg = ice_autoneg_mode(hw->port_info); 6430 flowcontrol = ice_flowcontrol_mode(hw->port_info); 6431 6432 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", 6433 ifp->if_xname, speed, req_fec, neg_fec, autoneg, flowcontrol); 6434 } 6435 6436 /** 6437 * ice_update_laa_mac - Update MAC address if Locally Administered 6438 * @sc: the device softc 6439 * 6440 * Update the device MAC address when a Locally Administered Address is 6441 * assigned. 6442 * 6443 * This function does *not* update the MAC filter list itself. Instead, it 6444 * should be called after ice_rm_pf_default_mac_filters, so that the previous 6445 * address filter will be removed, and before ice_cfg_pf_default_mac_filters, 6446 * so that the new address filter will be assigned. 6447 */ 6448 int 6449 ice_update_laa_mac(struct ice_softc *sc) 6450 { 6451 const u8 *lladdr = (const u8 *)IF_LLADDR(sc->ifp); 6452 struct ice_hw *hw = &sc->hw; 6453 enum ice_status status; 6454 6455 /* If the address is the same, then there is nothing to update */ 6456 if (!memcmp(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN)) 6457 return (0); 6458 6459 /* Reject Multicast addresses */ 6460 if (ETHER_IS_MULTICAST(lladdr)) 6461 return (EINVAL); 6462 6463 status = ice_aq_manage_mac_write(hw, lladdr, ICE_AQC_MAN_MAC_UPDATE_LAA_WOL, NULL); 6464 if (status) { 6465 device_printf(sc->dev, "Failed to write mac %6D to firmware, err %s aq_err %s\n", 6466 lladdr, ":", ice_status_str(status), 6467 ice_aq_str(hw->adminq.sq_last_status)); 6468 return (EFAULT); 6469 } 6470 6471 /* Copy the address into place of the LAN address. */ 6472 bcopy(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN); 6473 6474 return (0); 6475 } 6476 6477 /** 6478 * ice_get_and_print_bus_info - Save (PCI) bus info and print messages 6479 * @sc: device softc 6480 * 6481 * This will potentially print out a warning message if bus bandwidth 6482 * is insufficient for full-speed operation. 6483 * 6484 * This should only be called once, during the attach process, after 6485 * hw->port_info has been filled out with port link topology information 6486 * (from the Get PHY Capabilities Admin Queue command). 6487 */ 6488 void 6489 ice_get_and_print_bus_info(struct ice_softc *sc) 6490 { 6491 struct ice_hw *hw = &sc->hw; 6492 device_t dev = sc->dev; 6493 u16 pci_link_status; 6494 int offset; 6495 6496 pci_find_cap(dev, PCIY_EXPRESS, &offset); 6497 pci_link_status = pci_read_config(dev, offset + PCIER_LINK_STA, 2); 6498 6499 /* Fill out hw struct with PCIE link status info */ 6500 ice_set_pci_link_status_data(hw, pci_link_status); 6501 6502 /* Use info to print out bandwidth messages */ 6503 ice_print_bus_link_data(dev, hw); 6504 6505 if (ice_pcie_bandwidth_check(sc)) { 6506 device_printf(dev, 6507 "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); 6508 device_printf(dev, 6509 "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 6510 } 6511 } 6512 6513 /** 6514 * ice_pcie_bus_speed_to_rate - Convert driver bus speed enum value to 6515 * a 64-bit baudrate. 6516 * @speed: enum value to convert 6517 * 6518 * This only goes up to PCIE Gen 4. 6519 */ 6520 static uint64_t 6521 ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed) 6522 { 6523 /* If the PCI-E speed is Gen1 or Gen2, then report 6524 * only 80% of bus speed to account for encoding overhead. 6525 */ 6526 switch (speed) { 6527 case ice_pcie_speed_2_5GT: 6528 return IF_Gbps(2); 6529 case ice_pcie_speed_5_0GT: 6530 return IF_Gbps(4); 6531 case ice_pcie_speed_8_0GT: 6532 return IF_Gbps(8); 6533 case ice_pcie_speed_16_0GT: 6534 return IF_Gbps(16); 6535 case ice_pcie_speed_unknown: 6536 default: 6537 return 0; 6538 } 6539 } 6540 6541 /** 6542 * ice_pcie_lnk_width_to_int - Convert driver pci-e width enum value to 6543 * a 32-bit number. 6544 * @width: enum value to convert 6545 */ 6546 static int 6547 ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width) 6548 { 6549 switch (width) { 6550 case ice_pcie_lnk_x1: 6551 return (1); 6552 case ice_pcie_lnk_x2: 6553 return (2); 6554 case ice_pcie_lnk_x4: 6555 return (4); 6556 case ice_pcie_lnk_x8: 6557 return (8); 6558 case ice_pcie_lnk_x12: 6559 return (12); 6560 case ice_pcie_lnk_x16: 6561 return (16); 6562 case ice_pcie_lnk_x32: 6563 return (32); 6564 case ice_pcie_lnk_width_resrv: 6565 case ice_pcie_lnk_width_unknown: 6566 default: 6567 return (0); 6568 } 6569 } 6570 6571 /** 6572 * ice_pcie_bandwidth_check - Check if PCI-E bandwidth is sufficient for 6573 * full-speed device operation. 6574 * @sc: adapter softc 6575 * 6576 * Returns 0 if sufficient; 1 if not. 6577 */ 6578 static uint8_t 6579 ice_pcie_bandwidth_check(struct ice_softc *sc) 6580 { 6581 struct ice_hw *hw = &sc->hw; 6582 int num_ports, pcie_width; 6583 u64 pcie_speed, port_speed; 6584 6585 MPASS(hw->port_info); 6586 6587 num_ports = bitcount32(hw->func_caps.common_cap.valid_functions); 6588 port_speed = ice_phy_types_to_max_rate(hw->port_info); 6589 pcie_speed = ice_pcie_bus_speed_to_rate(hw->bus.speed); 6590 pcie_width = ice_pcie_lnk_width_to_int(hw->bus.width); 6591 6592 /* 6593 * If 2x100, clamp ports to 1 -- 2nd port is intended for 6594 * failover. 6595 */ 6596 if (port_speed == IF_Gbps(100)) 6597 num_ports = 1; 6598 6599 return !!((num_ports * port_speed) > pcie_speed * pcie_width); 6600 } 6601 6602 /** 6603 * ice_print_bus_link_data - Print PCI-E bandwidth information 6604 * @dev: device to print string for 6605 * @hw: hw struct with PCI-e link information 6606 */ 6607 static void 6608 ice_print_bus_link_data(device_t dev, struct ice_hw *hw) 6609 { 6610 device_printf(dev, "PCI Express Bus: Speed %s %s\n", 6611 ((hw->bus.speed == ice_pcie_speed_16_0GT) ? "16.0GT/s" : 6612 (hw->bus.speed == ice_pcie_speed_8_0GT) ? "8.0GT/s" : 6613 (hw->bus.speed == ice_pcie_speed_5_0GT) ? "5.0GT/s" : 6614 (hw->bus.speed == ice_pcie_speed_2_5GT) ? "2.5GT/s" : "Unknown"), 6615 (hw->bus.width == ice_pcie_lnk_x32) ? "Width x32" : 6616 (hw->bus.width == ice_pcie_lnk_x16) ? "Width x16" : 6617 (hw->bus.width == ice_pcie_lnk_x12) ? "Width x12" : 6618 (hw->bus.width == ice_pcie_lnk_x8) ? "Width x8" : 6619 (hw->bus.width == ice_pcie_lnk_x4) ? "Width x4" : 6620 (hw->bus.width == ice_pcie_lnk_x2) ? "Width x2" : 6621 (hw->bus.width == ice_pcie_lnk_x1) ? "Width x1" : "Width Unknown"); 6622 } 6623 6624 /** 6625 * ice_set_pci_link_status_data - store PCI bus info 6626 * @hw: pointer to hardware structure 6627 * @link_status: the link status word from PCI config space 6628 * 6629 * Stores the PCI bus info (speed, width, type) within the ice_hw structure 6630 **/ 6631 static void 6632 ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status) 6633 { 6634 u16 reg; 6635 6636 hw->bus.type = ice_bus_pci_express; 6637 6638 reg = (link_status & PCIEM_LINK_STA_WIDTH) >> 4; 6639 6640 switch (reg) { 6641 case ice_pcie_lnk_x1: 6642 case ice_pcie_lnk_x2: 6643 case ice_pcie_lnk_x4: 6644 case ice_pcie_lnk_x8: 6645 case ice_pcie_lnk_x12: 6646 case ice_pcie_lnk_x16: 6647 case ice_pcie_lnk_x32: 6648 hw->bus.width = (enum ice_pcie_link_width)reg; 6649 break; 6650 default: 6651 hw->bus.width = ice_pcie_lnk_width_unknown; 6652 break; 6653 } 6654 6655 reg = (link_status & PCIEM_LINK_STA_SPEED) + 0x14; 6656 6657 switch (reg) { 6658 case ice_pcie_speed_2_5GT: 6659 case ice_pcie_speed_5_0GT: 6660 case ice_pcie_speed_8_0GT: 6661 case ice_pcie_speed_16_0GT: 6662 hw->bus.speed = (enum ice_pcie_bus_speed)reg; 6663 break; 6664 default: 6665 hw->bus.speed = ice_pcie_speed_unknown; 6666 break; 6667 } 6668 } 6669 6670 /** 6671 * ice_init_link_events - Initialize Link Status Events mask 6672 * @sc: the device softc 6673 * 6674 * Initialize the Link Status Events mask to disable notification of link 6675 * events we don't care about in software. Also request that link status 6676 * events be enabled. 6677 */ 6678 int 6679 ice_init_link_events(struct ice_softc *sc) 6680 { 6681 struct ice_hw *hw = &sc->hw; 6682 enum ice_status status; 6683 u16 wanted_events; 6684 6685 /* Set the bits for the events that we want to be notified by */ 6686 wanted_events = (ICE_AQ_LINK_EVENT_UPDOWN | 6687 ICE_AQ_LINK_EVENT_MEDIA_NA | 6688 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL); 6689 6690 /* request that every event except the wanted events be masked */ 6691 status = ice_aq_set_event_mask(hw, hw->port_info->lport, ~wanted_events, NULL); 6692 if (status) { 6693 device_printf(sc->dev, 6694 "Failed to set link status event mask, err %s aq_err %s\n", 6695 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 6696 return (EIO); 6697 } 6698 6699 /* Request link info with the LSE bit set to enable link status events */ 6700 status = ice_aq_get_link_info(hw->port_info, true, NULL, NULL); 6701 if (status) { 6702 device_printf(sc->dev, 6703 "Failed to enable link status events, err %s aq_err %s\n", 6704 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 6705 return (EIO); 6706 } 6707 6708 return (0); 6709 } 6710 6711 /** 6712 * ice_handle_mdd_event - Handle possibly malicious events 6713 * @sc: the device softc 6714 * 6715 * Called by the admin task if an MDD detection interrupt is triggered. 6716 * Identifies possibly malicious events coming from VFs. Also triggers for 6717 * similar incorrect behavior from the PF as well. 6718 */ 6719 void 6720 ice_handle_mdd_event(struct ice_softc *sc) 6721 { 6722 struct ice_hw *hw = &sc->hw; 6723 bool mdd_detected = false, request_reinit = false; 6724 device_t dev = sc->dev; 6725 u32 reg; 6726 6727 if (!ice_testandclear_state(&sc->state, ICE_STATE_MDD_PENDING)) 6728 return; 6729 6730 reg = rd32(hw, GL_MDET_TX_TCLAN); 6731 if (reg & GL_MDET_TX_TCLAN_VALID_M) { 6732 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> GL_MDET_TX_TCLAN_PF_NUM_S; 6733 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> GL_MDET_TX_TCLAN_VF_NUM_S; 6734 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> GL_MDET_TX_TCLAN_MAL_TYPE_S; 6735 u16 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >> GL_MDET_TX_TCLAN_QNUM_S; 6736 6737 device_printf(dev, "Malicious Driver Detection Tx Descriptor check event '%s' on Tx queue %u PF# %u VF# %u\n", 6738 ice_mdd_tx_tclan_str(event), queue, pf_num, vf_num); 6739 6740 /* Only clear this event if it matches this PF, that way other 6741 * PFs can read the event and determine VF and queue number. 6742 */ 6743 if (pf_num == hw->pf_id) 6744 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); 6745 6746 mdd_detected = true; 6747 } 6748 6749 /* Determine what triggered the MDD event */ 6750 reg = rd32(hw, GL_MDET_TX_PQM); 6751 if (reg & GL_MDET_TX_PQM_VALID_M) { 6752 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> GL_MDET_TX_PQM_PF_NUM_S; 6753 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> GL_MDET_TX_PQM_VF_NUM_S; 6754 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> GL_MDET_TX_PQM_MAL_TYPE_S; 6755 u16 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >> GL_MDET_TX_PQM_QNUM_S; 6756 6757 device_printf(dev, "Malicious Driver Detection Tx Quanta check event '%s' on Tx queue %u PF# %u VF# %u\n", 6758 ice_mdd_tx_pqm_str(event), queue, pf_num, vf_num); 6759 6760 /* Only clear this event if it matches this PF, that way other 6761 * PFs can read the event and determine VF and queue number. 6762 */ 6763 if (pf_num == hw->pf_id) 6764 wr32(hw, GL_MDET_TX_PQM, 0xffffffff); 6765 6766 mdd_detected = true; 6767 } 6768 6769 reg = rd32(hw, GL_MDET_RX); 6770 if (reg & GL_MDET_RX_VALID_M) { 6771 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> GL_MDET_RX_PF_NUM_S; 6772 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> GL_MDET_RX_VF_NUM_S; 6773 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> GL_MDET_RX_MAL_TYPE_S; 6774 u16 queue = (reg & GL_MDET_RX_QNUM_M) >> GL_MDET_RX_QNUM_S; 6775 6776 device_printf(dev, "Malicious Driver Detection Rx event '%s' on Rx queue %u PF# %u VF# %u\n", 6777 ice_mdd_rx_str(event), queue, pf_num, vf_num); 6778 6779 /* Only clear this event if it matches this PF, that way other 6780 * PFs can read the event and determine VF and queue number. 6781 */ 6782 if (pf_num == hw->pf_id) 6783 wr32(hw, GL_MDET_RX, 0xffffffff); 6784 6785 mdd_detected = true; 6786 } 6787 6788 /* Now, confirm that this event actually affects this PF, by checking 6789 * the PF registers. 6790 */ 6791 if (mdd_detected) { 6792 reg = rd32(hw, PF_MDET_TX_TCLAN); 6793 if (reg & PF_MDET_TX_TCLAN_VALID_M) { 6794 wr32(hw, PF_MDET_TX_TCLAN, 0xffff); 6795 sc->soft_stats.tx_mdd_count++; 6796 request_reinit = true; 6797 } 6798 6799 reg = rd32(hw, PF_MDET_TX_PQM); 6800 if (reg & PF_MDET_TX_PQM_VALID_M) { 6801 wr32(hw, PF_MDET_TX_PQM, 0xffff); 6802 sc->soft_stats.tx_mdd_count++; 6803 request_reinit = true; 6804 } 6805 6806 reg = rd32(hw, PF_MDET_RX); 6807 if (reg & PF_MDET_RX_VALID_M) { 6808 wr32(hw, PF_MDET_RX, 0xffff); 6809 sc->soft_stats.rx_mdd_count++; 6810 request_reinit = true; 6811 } 6812 } 6813 6814 /* TODO: Implement logic to detect and handle events caused by VFs. */ 6815 6816 /* request that the upper stack re-initialize the Tx/Rx queues */ 6817 if (request_reinit) 6818 ice_request_stack_reinit(sc); 6819 6820 ice_flush(hw); 6821 } 6822 6823 /** 6824 * ice_init_dcb_setup - Initialize DCB settings for HW 6825 * @sc: the device softc 6826 * 6827 * This needs to be called after the fw_lldp_agent sysctl is added, since that 6828 * can update the device's LLDP agent status if a tunable value is set. 6829 * 6830 * Get and store the initial state of DCB settings on driver load. Print out 6831 * informational messages as well. 6832 */ 6833 void 6834 ice_init_dcb_setup(struct ice_softc *sc) 6835 { 6836 struct ice_hw *hw = &sc->hw; 6837 device_t dev = sc->dev; 6838 bool dcbx_agent_status; 6839 enum ice_status status; 6840 6841 /* Don't do anything if DCB isn't supported */ 6842 if (!hw->func_caps.common_cap.dcb) { 6843 device_printf(dev, "%s: No DCB support\n", 6844 __func__); 6845 return; 6846 } 6847 6848 hw->port_info->dcbx_status = ice_get_dcbx_status(hw); 6849 if (hw->port_info->dcbx_status != ICE_DCBX_STATUS_DONE && 6850 hw->port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) { 6851 /* 6852 * Start DCBX agent, but not LLDP. The return value isn't 6853 * checked here because a more detailed dcbx agent status is 6854 * retrieved and checked in ice_init_dcb() and below. 6855 */ 6856 ice_aq_start_stop_dcbx(hw, true, &dcbx_agent_status, NULL); 6857 } 6858 6859 /* This sets hw->port_info->is_sw_lldp */ 6860 status = ice_init_dcb(hw, true); 6861 6862 /* If there is an error, then FW LLDP is not in a usable state */ 6863 if (status != 0 && status != ICE_ERR_NOT_READY) { 6864 /* Don't print an error message if the return code from the AQ 6865 * cmd performed in ice_init_dcb() is is EPERM; that means the 6866 * FW LLDP engine is disabled, and that is a valid state. 6867 */ 6868 if (!(status == ICE_ERR_AQ_ERROR && 6869 hw->adminq.sq_last_status == ICE_AQ_RC_EPERM)) { 6870 device_printf(dev, "DCB init failed, err %s aq_err %s\n", 6871 ice_status_str(status), 6872 ice_aq_str(hw->adminq.sq_last_status)); 6873 } 6874 hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; 6875 } 6876 6877 switch (hw->port_info->dcbx_status) { 6878 case ICE_DCBX_STATUS_DIS: 6879 ice_debug(hw, ICE_DBG_DCB, "DCBX disabled\n"); 6880 break; 6881 case ICE_DCBX_STATUS_NOT_STARTED: 6882 ice_debug(hw, ICE_DBG_DCB, "DCBX not started\n"); 6883 break; 6884 case ICE_DCBX_STATUS_MULTIPLE_PEERS: 6885 ice_debug(hw, ICE_DBG_DCB, "DCBX detected multiple peers\n"); 6886 break; 6887 default: 6888 break; 6889 } 6890 6891 /* LLDP disabled in FW */ 6892 if (hw->port_info->is_sw_lldp) { 6893 ice_add_rx_lldp_filter(sc); 6894 device_printf(dev, "Firmware LLDP agent disabled\n"); 6895 } else { 6896 ice_del_rx_lldp_filter(sc); 6897 } 6898 } 6899 6900 /** 6901 * ice_handle_mib_change_event - helper function to log LLDP MIB change events 6902 * @sc: device softc 6903 * @event: event received on a control queue 6904 * 6905 * Prints out the type of an LLDP MIB change event in a DCB debug message. 6906 * 6907 * XXX: Should be extended to do more if the driver decides to notify other SW 6908 * of LLDP MIB changes, or needs to extract info from the MIB. 6909 */ 6910 static void 6911 ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *event) 6912 { 6913 struct ice_aqc_lldp_get_mib *params = 6914 (struct ice_aqc_lldp_get_mib *)&event->desc.params.lldp_get_mib; 6915 u8 mib_type, bridge_type, tx_status; 6916 6917 /* XXX: To get the contents of the MIB that caused the event, set the 6918 * ICE_DBG_AQ debug mask and read that output 6919 */ 6920 static const char* mib_type_strings[] = { 6921 "Local MIB", 6922 "Remote MIB", 6923 "Reserved", 6924 "Reserved" 6925 }; 6926 static const char* bridge_type_strings[] = { 6927 "Nearest Bridge", 6928 "Non-TPMR Bridge", 6929 "Reserved", 6930 "Reserved" 6931 }; 6932 static const char* tx_status_strings[] = { 6933 "Port's TX active", 6934 "Port's TX suspended and drained", 6935 "Reserved", 6936 "Port's TX suspended and srained; blocked TC pipe flushed" 6937 }; 6938 6939 mib_type = (params->type & ICE_AQ_LLDP_MIB_TYPE_M) >> 6940 ICE_AQ_LLDP_MIB_TYPE_S; 6941 bridge_type = (params->type & ICE_AQ_LLDP_BRID_TYPE_M) >> 6942 ICE_AQ_LLDP_BRID_TYPE_S; 6943 tx_status = (params->type & ICE_AQ_LLDP_TX_M) >> 6944 ICE_AQ_LLDP_TX_S; 6945 6946 ice_debug(&sc->hw, ICE_DBG_DCB, "LLDP MIB Change Event (%s, %s, %s)\n", 6947 mib_type_strings[mib_type], bridge_type_strings[bridge_type], 6948 tx_status_strings[tx_status]); 6949 } 6950 6951 /** 6952 * ice_send_version - Send driver version to firmware 6953 * @sc: the device private softc 6954 * 6955 * Send the driver version to the firmware. This must be called as early as 6956 * possible after ice_init_hw(). 6957 */ 6958 int 6959 ice_send_version(struct ice_softc *sc) 6960 { 6961 struct ice_driver_ver driver_version = {0}; 6962 struct ice_hw *hw = &sc->hw; 6963 device_t dev = sc->dev; 6964 enum ice_status status; 6965 6966 driver_version.major_ver = ice_major_version; 6967 driver_version.minor_ver = ice_minor_version; 6968 driver_version.build_ver = ice_patch_version; 6969 driver_version.subbuild_ver = ice_rc_version; 6970 6971 strlcpy((char *)driver_version.driver_string, ice_driver_version, 6972 sizeof(driver_version.driver_string)); 6973 6974 status = ice_aq_send_driver_ver(hw, &driver_version, NULL); 6975 if (status) { 6976 device_printf(dev, "Unable to send driver version to firmware, err %s aq_err %s\n", 6977 ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); 6978 return (EIO); 6979 } 6980 6981 return (0); 6982 } 6983 6984 /** 6985 * ice_handle_lan_overflow_event - helper function to log LAN overflow events 6986 * @sc: device softc 6987 * @event: event received on a control queue 6988 * 6989 * Prints out a message when a LAN overflow event is detected on a receive 6990 * queue. 6991 */ 6992 static void 6993 ice_handle_lan_overflow_event(struct ice_softc *sc, struct ice_rq_event_info *event) 6994 { 6995 struct ice_aqc_event_lan_overflow *params = 6996 (struct ice_aqc_event_lan_overflow *)&event->desc.params.lan_overflow; 6997 struct ice_hw *hw = &sc->hw; 6998 6999 ice_debug(hw, ICE_DBG_DCB, "LAN overflow event detected, prtdcb_ruptq=0x%08x, qtx_ctl=0x%08x\n", 7000 LE32_TO_CPU(params->prtdcb_ruptq), 7001 LE32_TO_CPU(params->qtx_ctl)); 7002 } 7003 7004 /** 7005 * ice_add_ethertype_to_list - Add an Ethertype filter to a filter list 7006 * @vsi: the VSI to target packets to 7007 * @list: the list to add the filter to 7008 * @ethertype: the Ethertype to filter on 7009 * @direction: The direction of the filter (Tx or Rx) 7010 * @action: the action to take 7011 * 7012 * Add an Ethertype filter to a filter list. Used to forward a series of 7013 * filters to the firmware for configuring the switch. 7014 * 7015 * Returns 0 on success, and an error code on failure. 7016 */ 7017 static int 7018 ice_add_ethertype_to_list(struct ice_vsi *vsi, struct ice_list_head *list, 7019 u16 ethertype, u16 direction, 7020 enum ice_sw_fwd_act_type action) 7021 { 7022 struct ice_fltr_list_entry *entry; 7023 7024 MPASS((direction == ICE_FLTR_TX) || (direction == ICE_FLTR_RX)); 7025 7026 entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO); 7027 if (!entry) 7028 return (ENOMEM); 7029 7030 entry->fltr_info.flag = direction; 7031 entry->fltr_info.src_id = ICE_SRC_ID_VSI; 7032 entry->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; 7033 entry->fltr_info.fltr_act = action; 7034 entry->fltr_info.vsi_handle = vsi->idx; 7035 entry->fltr_info.l_data.ethertype_mac.ethertype = ethertype; 7036 7037 LIST_ADD(&entry->list_entry, list); 7038 7039 return 0; 7040 } 7041 7042 #define ETHERTYPE_PAUSE_FRAMES 0x8808 7043 #define ETHERTYPE_LLDP_FRAMES 0x88cc 7044 7045 /** 7046 * ice_cfg_pf_ethertype_filters - Configure switch to drop ethertypes 7047 * @sc: the device private softc 7048 * 7049 * Configure the switch to drop PAUSE frames and LLDP frames transmitted from 7050 * the host. This prevents malicious VFs from sending these frames and being 7051 * able to control or configure the network. 7052 */ 7053 int 7054 ice_cfg_pf_ethertype_filters(struct ice_softc *sc) 7055 { 7056 struct ice_list_head ethertype_list; 7057 struct ice_vsi *vsi = &sc->pf_vsi; 7058 struct ice_hw *hw = &sc->hw; 7059 device_t dev = sc->dev; 7060 enum ice_status status; 7061 int err = 0; 7062 7063 INIT_LIST_HEAD(ðertype_list); 7064 7065 /* 7066 * Note that the switch filters will ignore the VSI index for the drop 7067 * action, so we only need to program drop filters once for the main 7068 * VSI. 7069 */ 7070 7071 /* Configure switch to drop all Tx pause frames coming from any VSI. */ 7072 if (sc->enable_tx_fc_filter) { 7073 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7074 ETHERTYPE_PAUSE_FRAMES, 7075 ICE_FLTR_TX, ICE_DROP_PACKET); 7076 if (err) 7077 goto free_ethertype_list; 7078 } 7079 7080 /* Configure switch to drop LLDP frames coming from any VSI */ 7081 if (sc->enable_tx_lldp_filter) { 7082 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7083 ETHERTYPE_LLDP_FRAMES, 7084 ICE_FLTR_TX, ICE_DROP_PACKET); 7085 if (err) 7086 goto free_ethertype_list; 7087 } 7088 7089 status = ice_add_eth_mac(hw, ðertype_list); 7090 if (status) { 7091 device_printf(dev, 7092 "Failed to add Tx Ethertype filters, err %s aq_err %s\n", 7093 ice_status_str(status), 7094 ice_aq_str(hw->adminq.sq_last_status)); 7095 err = (EIO); 7096 } 7097 7098 free_ethertype_list: 7099 ice_free_fltr_list(ðertype_list); 7100 return err; 7101 } 7102 7103 /** 7104 * ice_add_rx_lldp_filter - add ethertype filter for Rx LLDP frames 7105 * @sc: the device private structure 7106 * 7107 * Add a switch ethertype filter which forwards the LLDP frames to the main PF 7108 * VSI. Called when the fw_lldp_agent is disabled, to allow the LLDP frames to 7109 * be forwarded to the stack. 7110 */ 7111 static void 7112 ice_add_rx_lldp_filter(struct ice_softc *sc) 7113 { 7114 struct ice_list_head ethertype_list; 7115 struct ice_vsi *vsi = &sc->pf_vsi; 7116 struct ice_hw *hw = &sc->hw; 7117 device_t dev = sc->dev; 7118 enum ice_status status; 7119 int err; 7120 7121 INIT_LIST_HEAD(ðertype_list); 7122 7123 /* Forward Rx LLDP frames to the stack */ 7124 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7125 ETHERTYPE_LLDP_FRAMES, 7126 ICE_FLTR_RX, ICE_FWD_TO_VSI); 7127 if (err) { 7128 device_printf(dev, 7129 "Failed to add Rx LLDP filter, err %s\n", 7130 ice_err_str(err)); 7131 goto free_ethertype_list; 7132 } 7133 7134 status = ice_add_eth_mac(hw, ðertype_list); 7135 if (status == ICE_ERR_ALREADY_EXISTS) { 7136 ; /* Don't complain if we try to add a filter that already exists */ 7137 } else if (status) { 7138 device_printf(dev, 7139 "Failed to add Rx LLDP filter, err %s aq_err %s\n", 7140 ice_status_str(status), 7141 ice_aq_str(hw->adminq.sq_last_status)); 7142 } 7143 7144 free_ethertype_list: 7145 ice_free_fltr_list(ðertype_list); 7146 } 7147 7148 /** 7149 * ice_del_rx_lldp_filter - Remove ethertype filter for Rx LLDP frames 7150 * @sc: the device private structure 7151 * 7152 * Remove the switch filter forwarding LLDP frames to the main PF VSI, called 7153 * when the firmware LLDP agent is enabled, to stop routing LLDP frames to the 7154 * stack. 7155 */ 7156 static void 7157 ice_del_rx_lldp_filter(struct ice_softc *sc) 7158 { 7159 struct ice_list_head ethertype_list; 7160 struct ice_vsi *vsi = &sc->pf_vsi; 7161 struct ice_hw *hw = &sc->hw; 7162 device_t dev = sc->dev; 7163 enum ice_status status; 7164 int err; 7165 7166 INIT_LIST_HEAD(ðertype_list); 7167 7168 /* Remove filter forwarding Rx LLDP frames to the stack */ 7169 err = ice_add_ethertype_to_list(vsi, ðertype_list, 7170 ETHERTYPE_LLDP_FRAMES, 7171 ICE_FLTR_RX, ICE_FWD_TO_VSI); 7172 if (err) { 7173 device_printf(dev, 7174 "Failed to remove Rx LLDP filter, err %s\n", 7175 ice_err_str(err)); 7176 goto free_ethertype_list; 7177 } 7178 7179 status = ice_remove_eth_mac(hw, ðertype_list); 7180 if (status == ICE_ERR_DOES_NOT_EXIST) { 7181 ; /* Don't complain if we try to remove a filter that doesn't exist */ 7182 } else if (status) { 7183 device_printf(dev, 7184 "Failed to remove Rx LLDP filter, err %s aq_err %s\n", 7185 ice_status_str(status), 7186 ice_aq_str(hw->adminq.sq_last_status)); 7187 } 7188 7189 free_ethertype_list: 7190 ice_free_fltr_list(ðertype_list); 7191 } 7192 7193 /** 7194 * ice_init_link_configuration -- Setup link in different ways depending 7195 * on whether media is available or not. 7196 * @sc: device private structure 7197 * 7198 * Called at the end of the attach process to either set default link 7199 * parameters if there is media available, or force HW link down and 7200 * set a state bit if there is no media. 7201 */ 7202 void 7203 ice_init_link_configuration(struct ice_softc *sc) 7204 { 7205 struct ice_port_info *pi = sc->hw.port_info; 7206 struct ice_hw *hw = &sc->hw; 7207 device_t dev = sc->dev; 7208 enum ice_status status; 7209 7210 pi->phy.get_link_info = true; 7211 status = ice_get_link_status(pi, &sc->link_up); 7212 if (status != ICE_SUCCESS) { 7213 device_printf(dev, 7214 "%s: ice_get_link_status failed; status %s, aq_err %s\n", 7215 __func__, ice_status_str(status), 7216 ice_aq_str(hw->adminq.sq_last_status)); 7217 return; 7218 } 7219 7220 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { 7221 ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA); 7222 /* Apply default link settings */ 7223 ice_apply_saved_phy_cfg(sc); 7224 } else { 7225 /* Set link down, and poll for media available in timer. This prevents the 7226 * driver from receiving spurious link-related events. 7227 */ 7228 ice_set_state(&sc->state, ICE_STATE_NO_MEDIA); 7229 status = ice_aq_set_link_restart_an(pi, false, NULL); 7230 if (status != ICE_SUCCESS) 7231 device_printf(dev, 7232 "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", 7233 __func__, ice_status_str(status), 7234 ice_aq_str(hw->adminq.sq_last_status)); 7235 } 7236 } 7237 7238 /** 7239 * ice_apply_saved_phy_req_to_cfg -- Write saved user PHY settings to cfg data 7240 * @pi: port info struct 7241 * @pcaps: TOPO_CAPS capability data to use for defaults 7242 * @cfg: new PHY config data to be modified 7243 * 7244 * Applies user settings for advertised speeds to the PHY type fields in the 7245 * supplied PHY config struct. It uses the data from pcaps to check if the 7246 * saved settings are invalid and uses the pcaps data instead if they are 7247 * invalid. 7248 */ 7249 static void 7250 ice_apply_saved_phy_req_to_cfg(struct ice_port_info *pi, 7251 struct ice_aqc_get_phy_caps_data *pcaps, 7252 struct ice_aqc_set_phy_cfg_data *cfg) 7253 { 7254 u64 phy_low = 0, phy_high = 0; 7255 7256 ice_update_phy_type(&phy_low, &phy_high, pi->phy.curr_user_speed_req); 7257 cfg->phy_type_low = pcaps->phy_type_low & htole64(phy_low); 7258 cfg->phy_type_high = pcaps->phy_type_high & htole64(phy_high); 7259 7260 /* Can't use saved user speed request; use NVM default PHY capabilities */ 7261 if (!cfg->phy_type_low && !cfg->phy_type_high) { 7262 cfg->phy_type_low = pcaps->phy_type_low; 7263 cfg->phy_type_high = pcaps->phy_type_high; 7264 } 7265 } 7266 7267 /** 7268 * ice_apply_saved_fec_req_to_cfg -- Write saved user FEC mode to cfg data 7269 * @pi: port info struct 7270 * @pcaps: TOPO_CAPS capability data to use for defaults 7271 * @cfg: new PHY config data to be modified 7272 * 7273 * Applies user setting for FEC mode to PHY config struct. It uses the data 7274 * from pcaps to check if the saved settings are invalid and uses the pcaps 7275 * data instead if they are invalid. 7276 */ 7277 static void 7278 ice_apply_saved_fec_req_to_cfg(struct ice_port_info *pi, 7279 struct ice_aqc_get_phy_caps_data *pcaps, 7280 struct ice_aqc_set_phy_cfg_data *cfg) 7281 { 7282 ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); 7283 7284 /* Can't use saved user FEC mode; use NVM default PHY capabilities */ 7285 if (cfg->link_fec_opt && 7286 !(cfg->link_fec_opt & pcaps->link_fec_options)) { 7287 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 7288 cfg->link_fec_opt = pcaps->link_fec_options; 7289 } 7290 } 7291 7292 /** 7293 * ice_apply_saved_fc_req_to_cfg -- Write saved user flow control mode to cfg data 7294 * @pi: port info struct 7295 * @cfg: new PHY config data to be modified 7296 * 7297 * Applies user setting for flow control mode to PHY config struct. There are 7298 * no invalid flow control mode settings; if there are, then this function 7299 * treats them like "ICE_FC_NONE". 7300 */ 7301 static void 7302 ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, 7303 struct ice_aqc_set_phy_cfg_data *cfg) 7304 { 7305 cfg->caps &= ~(ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | 7306 ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY); 7307 7308 switch (pi->phy.curr_user_fc_req) { 7309 case ICE_FC_FULL: 7310 cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | 7311 ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; 7312 break; 7313 case ICE_FC_RX_PAUSE: 7314 cfg->caps |= ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; 7315 break; 7316 case ICE_FC_TX_PAUSE: 7317 cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY; 7318 break; 7319 default: 7320 /* ICE_FC_NONE */ 7321 break; 7322 } 7323 } 7324 7325 /** 7326 * ice_apply_saved_user_req_to_cfg -- Apply all saved user settings to AQ cfg data 7327 * @pi: port info struct 7328 * @pcaps: TOPO_CAPS capability data to use for defaults 7329 * @cfg: new PHY config data to be modified 7330 * 7331 * Applies user settings for advertised speeds, FEC mode, and flow control 7332 * mode to the supplied PHY config struct; it uses the data from pcaps to check 7333 * if the saved settings are invalid and uses the pcaps data instead if they 7334 * are invalid. 7335 */ 7336 static void 7337 ice_apply_saved_user_req_to_cfg(struct ice_port_info *pi, 7338 struct ice_aqc_get_phy_caps_data *pcaps, 7339 struct ice_aqc_set_phy_cfg_data *cfg) 7340 { 7341 ice_apply_saved_phy_req_to_cfg(pi, pcaps, cfg); 7342 ice_apply_saved_fec_req_to_cfg(pi, pcaps, cfg); 7343 ice_apply_saved_fc_req_to_cfg(pi, cfg); 7344 } 7345 7346 /** 7347 * ice_apply_saved_phy_cfg -- Re-apply user PHY config settings 7348 * @sc: device private structure 7349 * 7350 * Takes the saved user PHY config settings, overwrites the NVM 7351 * default with them if they're valid, and uses the Set PHY Config AQ command 7352 * to apply them. 7353 * 7354 * Intended for use when media is inserted. 7355 * 7356 * @pre Port has media available 7357 */ 7358 void 7359 ice_apply_saved_phy_cfg(struct ice_softc *sc) 7360 { 7361 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 7362 struct ice_port_info *pi = sc->hw.port_info; 7363 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 7364 struct ice_hw *hw = &sc->hw; 7365 device_t dev = sc->dev; 7366 enum ice_status status; 7367 7368 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 7369 &pcaps, NULL); 7370 if (status != ICE_SUCCESS) { 7371 device_printf(dev, 7372 "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", 7373 __func__, ice_status_str(status), 7374 ice_aq_str(hw->adminq.sq_last_status)); 7375 return; 7376 } 7377 7378 /* Setup new PHY config */ 7379 ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); 7380 7381 /* Apply settings requested by user */ 7382 ice_apply_saved_user_req_to_cfg(pi, &pcaps, &cfg); 7383 7384 /* Enable link and re-negotiate it */ 7385 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; 7386 7387 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 7388 if (status != ICE_SUCCESS) { 7389 if ((status == ICE_ERR_AQ_ERROR) && 7390 (hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)) 7391 device_printf(dev, 7392 "%s: User PHY cfg not applied; no media in port\n", 7393 __func__); 7394 else 7395 device_printf(dev, 7396 "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", 7397 __func__, ice_status_str(status), 7398 ice_aq_str(hw->adminq.sq_last_status)); 7399 } 7400 } 7401 7402 /** 7403 * ice_print_ldo_tlv - Print out LDO TLV information 7404 * @sc: device private structure 7405 * @tlv: LDO TLV information from the adapter NVM 7406 * 7407 * Dump out the information in tlv to the kernel message buffer; intended for 7408 * debugging purposes. 7409 */ 7410 static void 7411 ice_print_ldo_tlv(struct ice_softc *sc, struct ice_link_default_override_tlv *tlv) 7412 { 7413 device_t dev = sc->dev; 7414 7415 device_printf(dev, "TLV: -options 0x%02x\n", tlv->options); 7416 device_printf(dev, " -phy_config 0x%02x\n", tlv->phy_config); 7417 device_printf(dev, " -fec_options 0x%02x\n", tlv->fec_options); 7418 device_printf(dev, " -phy_high 0x%016llx\n", 7419 (unsigned long long)tlv->phy_type_high); 7420 device_printf(dev, " -phy_low 0x%016llx\n", 7421 (unsigned long long)tlv->phy_type_low); 7422 } 7423 7424 /** 7425 * ice_set_link_management_mode -- Strict or lenient link management 7426 * @sc: device private structure 7427 * 7428 * Some NVMs give the adapter the option to advertise a superset of link 7429 * configurations. This checks to see if that option is enabled. 7430 * Further, the NVM could also provide a specific set of configurations 7431 * to try; these are cached in the driver's private structure if they 7432 * are available. 7433 */ 7434 void 7435 ice_set_link_management_mode(struct ice_softc *sc) 7436 { 7437 struct ice_port_info *pi = sc->hw.port_info; 7438 device_t dev = sc->dev; 7439 struct ice_link_default_override_tlv tlv = { 0 }; 7440 enum ice_status status; 7441 7442 /* Port must be in strict mode if FW version is below a certain 7443 * version. (i.e. Don't set lenient mode features) 7444 */ 7445 if (!(ice_fw_supports_link_override(&sc->hw))) 7446 return; 7447 7448 status = ice_get_link_default_override(&tlv, pi); 7449 if (status != ICE_SUCCESS) { 7450 device_printf(dev, 7451 "%s: ice_get_link_default_override failed; status %s, aq_err %s\n", 7452 __func__, ice_status_str(status), 7453 ice_aq_str(sc->hw.adminq.sq_last_status)); 7454 return; 7455 } 7456 7457 if (sc->hw.debug_mask & ICE_DBG_LINK) 7458 ice_print_ldo_tlv(sc, &tlv); 7459 7460 /* Set lenient link mode */ 7461 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LENIENT_LINK_MODE) && 7462 (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE))) 7463 ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_en); 7464 7465 /* Default overrides only work if in lenient link mode */ 7466 if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_DEFAULT_OVERRIDE) && 7467 ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE) && 7468 (tlv.options & ICE_LINK_OVERRIDE_EN)) 7469 ice_set_bit(ICE_FEATURE_DEFAULT_OVERRIDE, sc->feat_en); 7470 7471 /* Cache the LDO TLV structure in the driver, since it won't change 7472 * during the driver's lifetime. 7473 */ 7474 sc->ldo_tlv = tlv; 7475 } 7476 7477 /** 7478 * ice_init_saved_phy_cfg -- Set cached user PHY cfg settings with NVM defaults 7479 * @sc: device private structure 7480 * 7481 * This should be called before the tunables for these link settings 7482 * (e.g. advertise_speed) are added -- so that these defaults don't overwrite 7483 * the cached values that the sysctl handlers will write. 7484 * 7485 * This also needs to be called before ice_init_link_configuration, to ensure 7486 * that there are sane values that can be written if there is media available 7487 * in the port. 7488 */ 7489 void 7490 ice_init_saved_phy_cfg(struct ice_softc *sc) 7491 { 7492 struct ice_port_info *pi = sc->hw.port_info; 7493 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 7494 struct ice_hw *hw = &sc->hw; 7495 device_t dev = sc->dev; 7496 enum ice_status status; 7497 u64 phy_low, phy_high; 7498 7499 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, 7500 &pcaps, NULL); 7501 if (status != ICE_SUCCESS) { 7502 device_printf(dev, 7503 "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", 7504 __func__, ice_status_str(status), 7505 ice_aq_str(hw->adminq.sq_last_status)); 7506 return; 7507 } 7508 7509 phy_low = le64toh(pcaps.phy_type_low); 7510 phy_high = le64toh(pcaps.phy_type_high); 7511 7512 /* Save off initial config parameters */ 7513 pi->phy.curr_user_speed_req = 7514 ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); 7515 pi->phy.curr_user_fec_req = ice_caps_to_fec_mode(pcaps.caps, 7516 pcaps.link_fec_options); 7517 pi->phy.curr_user_fc_req = ice_caps_to_fc_mode(pcaps.caps); 7518 } 7519 7520 /** 7521 * ice_module_init - Driver callback to handle module load 7522 * 7523 * Callback for handling module load events. This function should initialize 7524 * any data structures that are used for the life of the device driver. 7525 */ 7526 static int 7527 ice_module_init(void) 7528 { 7529 return (0); 7530 } 7531 7532 /** 7533 * ice_module_exit - Driver callback to handle module exit 7534 * 7535 * Callback for handling module unload events. This function should release 7536 * any resources initialized during ice_module_init. 7537 * 7538 * If this function returns non-zero, the module will not be unloaded. It 7539 * should only return such a value if the module cannot be unloaded at all, 7540 * such as due to outstanding memory references that cannot be revoked. 7541 */ 7542 static int 7543 ice_module_exit(void) 7544 { 7545 return (0); 7546 } 7547 7548 /** 7549 * ice_module_event_handler - Callback for module events 7550 * @mod: unused module_t parameter 7551 * @what: the event requested 7552 * @arg: unused event argument 7553 * 7554 * Callback used to handle module events from the stack. Used to allow the 7555 * driver to define custom behavior that should happen at module load and 7556 * unload. 7557 */ 7558 int 7559 ice_module_event_handler(module_t __unused mod, int what, void __unused *arg) 7560 { 7561 switch (what) { 7562 case MOD_LOAD: 7563 return ice_module_init(); 7564 case MOD_UNLOAD: 7565 return ice_module_exit(); 7566 default: 7567 /* TODO: do we need to handle MOD_QUIESCE and MOD_SHUTDOWN? */ 7568 return (EOPNOTSUPP); 7569 } 7570 } 7571 7572 /** 7573 * ice_handle_nvm_access_ioctl - Handle an NVM access ioctl request 7574 * @sc: the device private softc 7575 * @ifd: ifdrv ioctl request pointer 7576 */ 7577 int 7578 ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd) 7579 { 7580 union ice_nvm_access_data *data; 7581 struct ice_nvm_access_cmd *cmd; 7582 size_t ifd_len = ifd->ifd_len, malloc_len; 7583 struct ice_hw *hw = &sc->hw; 7584 device_t dev = sc->dev; 7585 enum ice_status status; 7586 u8 *nvm_buffer; 7587 int err; 7588 7589 /* 7590 * ifioctl forwards SIOCxDRVSPEC to iflib without performing 7591 * a privilege check. In turn, iflib forwards the ioctl to the driver 7592 * without performing a privilege check. Perform one here to ensure 7593 * that non-privileged threads cannot access this interface. 7594 */ 7595 err = priv_check(curthread, PRIV_DRIVER); 7596 if (err) 7597 return (err); 7598 7599 if (ifd_len < sizeof(struct ice_nvm_access_cmd)) { 7600 device_printf(dev, "%s: ifdrv length is too small. Got %zu, but expected %zu\n", 7601 __func__, ifd_len, sizeof(struct ice_nvm_access_cmd)); 7602 return (EINVAL); 7603 } 7604 7605 if (ifd->ifd_data == NULL) { 7606 device_printf(dev, "%s: ifd data buffer not present.\n", 7607 __func__); 7608 return (EINVAL); 7609 } 7610 7611 /* 7612 * If everything works correctly, ice_handle_nvm_access should not 7613 * modify data past the size of the ioctl length. However, it could 7614 * lead to memory corruption if it did. Make sure to allocate at least 7615 * enough space for the command and data regardless. This 7616 * ensures that any access to the data union will not access invalid 7617 * memory. 7618 */ 7619 malloc_len = max(ifd_len, sizeof(*data) + sizeof(*cmd)); 7620 7621 nvm_buffer = (u8 *)malloc(malloc_len, M_ICE, M_ZERO | M_WAITOK); 7622 if (!nvm_buffer) 7623 return (ENOMEM); 7624 7625 /* Copy the NVM access command and data in from user space */ 7626 /* coverity[tainted_data_argument] */ 7627 err = copyin(ifd->ifd_data, nvm_buffer, ifd_len); 7628 if (err) { 7629 device_printf(dev, "%s: Copying request from user space failed, err %s\n", 7630 __func__, ice_err_str(err)); 7631 goto cleanup_free_nvm_buffer; 7632 } 7633 7634 /* 7635 * The NVM command structure is immediately followed by data which 7636 * varies in size based on the command. 7637 */ 7638 cmd = (struct ice_nvm_access_cmd *)nvm_buffer; 7639 data = (union ice_nvm_access_data *)(nvm_buffer + sizeof(struct ice_nvm_access_cmd)); 7640 7641 /* Handle the NVM access request */ 7642 status = ice_handle_nvm_access(hw, cmd, data); 7643 if (status) 7644 ice_debug(hw, ICE_DBG_NVM, 7645 "NVM access request failed, err %s\n", 7646 ice_status_str(status)); 7647 7648 /* Copy the possibly modified contents of the handled request out */ 7649 err = copyout(nvm_buffer, ifd->ifd_data, ifd_len); 7650 if (err) { 7651 device_printf(dev, "%s: Copying response back to user space failed, err %s\n", 7652 __func__, ice_err_str(err)); 7653 goto cleanup_free_nvm_buffer; 7654 } 7655 7656 /* Convert private status to an error code for proper ioctl response */ 7657 switch (status) { 7658 case ICE_SUCCESS: 7659 err = (0); 7660 break; 7661 case ICE_ERR_NO_MEMORY: 7662 err = (ENOMEM); 7663 break; 7664 case ICE_ERR_OUT_OF_RANGE: 7665 err = (ENOTTY); 7666 break; 7667 case ICE_ERR_PARAM: 7668 default: 7669 err = (EINVAL); 7670 break; 7671 } 7672 7673 cleanup_free_nvm_buffer: 7674 free(nvm_buffer, M_ICE); 7675 return err; 7676 } 7677 7678 /** 7679 * ice_read_sff_eeprom - Read data from SFF eeprom 7680 * @sc: device softc 7681 * @dev_addr: I2C device address (typically 0xA0 or 0xA2) 7682 * @offset: offset into the eeprom 7683 * @data: pointer to data buffer to store read data in 7684 * @length: length to read; max length is 16 7685 * 7686 * Read from the SFF eeprom in the module for this PF's port. For more details 7687 * on the contents of an SFF eeprom, refer to SFF-8724 (SFP), SFF-8636 (QSFP), 7688 * and SFF-8024 (both). 7689 */ 7690 int 7691 ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length) 7692 { 7693 struct ice_hw *hw = &sc->hw; 7694 int error = 0, retries = 0; 7695 enum ice_status status; 7696 u16 lport; 7697 7698 if (length > 16) 7699 return (EINVAL); 7700 7701 if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) 7702 return (ENOSYS); 7703 7704 if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) 7705 return (ENXIO); 7706 7707 /* Set bit to indicate lport value is valid */ 7708 lport = hw->port_info->lport | (0x1 << 8); 7709 7710 do { 7711 status = ice_aq_sff_eeprom(hw, lport, dev_addr, 7712 offset, 0, 0, data, length, 7713 false, NULL); 7714 if (!status) { 7715 error = 0; 7716 break; 7717 } 7718 if (status == ICE_ERR_AQ_ERROR && 7719 hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { 7720 error = EBUSY; 7721 continue; 7722 } 7723 if (status == ICE_ERR_AQ_ERROR && 7724 hw->adminq.sq_last_status == ICE_AQ_RC_EACCES) { 7725 /* FW says I2C access isn't supported */ 7726 error = EACCES; 7727 break; 7728 } 7729 if (status == ICE_ERR_AQ_ERROR && 7730 hw->adminq.sq_last_status == ICE_AQ_RC_EPERM) { 7731 device_printf(sc->dev, 7732 "%s: Module pointer location specified in command does not permit the required operation.\n", 7733 __func__); 7734 error = EPERM; 7735 break; 7736 } else { 7737 device_printf(sc->dev, 7738 "%s: Error reading I2C data: err %s aq_err %s\n", 7739 __func__, ice_status_str(status), 7740 ice_aq_str(hw->adminq.sq_last_status)); 7741 error = EIO; 7742 break; 7743 } 7744 } while (retries++ < ICE_I2C_MAX_RETRIES); 7745 7746 if (error == EBUSY) 7747 device_printf(sc->dev, 7748 "%s: Error reading I2C data after %d retries\n", 7749 __func__, ICE_I2C_MAX_RETRIES); 7750 7751 return (error); 7752 } 7753 7754 /** 7755 * ice_handle_i2c_req - Driver independent I2C request handler 7756 * @sc: device softc 7757 * @req: The I2C parameters to use 7758 * 7759 * Read from the port's I2C eeprom using the parameters from the ioctl. 7760 */ 7761 int 7762 ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req) 7763 { 7764 return ice_read_sff_eeprom(sc, req->dev_addr, req->offset, req->data, req->len); 7765 } 7766 7767 /** 7768 * ice_sysctl_read_i2c_diag_data - Read some module diagnostic data via i2c 7769 * @oidp: sysctl oid structure 7770 * @arg1: pointer to private data structure 7771 * @arg2: unused 7772 * @req: sysctl request pointer 7773 * 7774 * Read 8 bytes of diagnostic data from the SFF eeprom in the (Q)SFP module 7775 * inserted into the port. 7776 * 7777 * | SFP A2 | QSFP Lower Page 7778 * ------------|---------|---------------- 7779 * Temperature | 96-97 | 22-23 7780 * Vcc | 98-99 | 26-27 7781 * TX power | 102-103 | 34-35..40-41 7782 * RX power | 104-105 | 50-51..56-57 7783 */ 7784 static int 7785 ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) 7786 { 7787 struct ice_softc *sc = (struct ice_softc *)arg1; 7788 device_t dev = sc->dev; 7789 struct sbuf *sbuf; 7790 int error = 0; 7791 u8 data[16]; 7792 7793 UNREFERENCED_PARAMETER(arg2); 7794 UNREFERENCED_PARAMETER(oidp); 7795 7796 if (ice_driver_is_detaching(sc)) 7797 return (ESHUTDOWN); 7798 7799 if (req->oldptr == NULL) { 7800 error = SYSCTL_OUT(req, 0, 128); 7801 return (error); 7802 } 7803 7804 error = ice_read_sff_eeprom(sc, 0xA0, 0, data, 1); 7805 if (error) 7806 return (error); 7807 7808 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ 7809 if (data[0] == 0x3) { 7810 /* 7811 * Check for: 7812 * - Internally calibrated data 7813 * - Diagnostic monitoring is implemented 7814 */ 7815 ice_read_sff_eeprom(sc, 0xA0, 92, data, 1); 7816 if (!(data[0] & 0x60)) { 7817 device_printf(dev, "Module doesn't support diagnostics: 0xA0[92] = %02X\n", data[0]); 7818 return (ENODEV); 7819 } 7820 7821 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 7822 7823 ice_read_sff_eeprom(sc, 0xA2, 96, data, 4); 7824 for (int i = 0; i < 4; i++) 7825 sbuf_printf(sbuf, "%02X ", data[i]); 7826 7827 ice_read_sff_eeprom(sc, 0xA2, 102, data, 4); 7828 for (int i = 0; i < 4; i++) 7829 sbuf_printf(sbuf, "%02X ", data[i]); 7830 } else if (data[0] == 0xD || data[0] == 0x11) { 7831 /* 7832 * QSFP+ modules are always internally calibrated, and must indicate 7833 * what types of diagnostic monitoring are implemented 7834 */ 7835 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 7836 7837 ice_read_sff_eeprom(sc, 0xA0, 22, data, 2); 7838 for (int i = 0; i < 2; i++) 7839 sbuf_printf(sbuf, "%02X ", data[i]); 7840 7841 ice_read_sff_eeprom(sc, 0xA0, 26, data, 2); 7842 for (int i = 0; i < 2; i++) 7843 sbuf_printf(sbuf, "%02X ", data[i]); 7844 7845 ice_read_sff_eeprom(sc, 0xA0, 34, data, 2); 7846 for (int i = 0; i < 2; i++) 7847 sbuf_printf(sbuf, "%02X ", data[i]); 7848 7849 ice_read_sff_eeprom(sc, 0xA0, 50, data, 2); 7850 for (int i = 0; i < 2; i++) 7851 sbuf_printf(sbuf, "%02X ", data[i]); 7852 } else { 7853 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", data[0]); 7854 return (ENODEV); 7855 } 7856 7857 sbuf_finish(sbuf); 7858 sbuf_delete(sbuf); 7859 7860 return (0); 7861 } 7862 7863 /** 7864 * ice_alloc_intr_tracking - Setup interrupt tracking structures 7865 * @sc: device softc structure 7866 * 7867 * Sets up the resource manager for keeping track of interrupt allocations, 7868 * and initializes the tracking maps for the PF's interrupt allocations. 7869 * 7870 * Unlike the scheme for queues, this is done in one step since both the 7871 * manager and the maps both have the same lifetime. 7872 * 7873 * @returns 0 on success, or an error code on failure. 7874 */ 7875 int 7876 ice_alloc_intr_tracking(struct ice_softc *sc) 7877 { 7878 struct ice_hw *hw = &sc->hw; 7879 device_t dev = sc->dev; 7880 int err; 7881 7882 /* Initialize the interrupt allocation manager */ 7883 err = ice_resmgr_init_contig_only(&sc->imgr, 7884 hw->func_caps.common_cap.num_msix_vectors); 7885 if (err) { 7886 device_printf(dev, "Unable to initialize PF interrupt manager: %s\n", 7887 ice_err_str(err)); 7888 return (err); 7889 } 7890 7891 /* Allocate PF interrupt mapping storage */ 7892 if (!(sc->pf_imap = 7893 (u16 *)malloc(sizeof(u16) * hw->func_caps.common_cap.num_msix_vectors, 7894 M_ICE, M_NOWAIT))) { 7895 device_printf(dev, "Unable to allocate PF imap memory\n"); 7896 err = ENOMEM; 7897 goto free_imgr; 7898 } 7899 for (u32 i = 0; i < hw->func_caps.common_cap.num_msix_vectors; i++) { 7900 sc->pf_imap[i] = ICE_INVALID_RES_IDX; 7901 } 7902 7903 return (0); 7904 7905 free_imgr: 7906 ice_resmgr_destroy(&sc->imgr); 7907 return (err); 7908 } 7909 7910 /** 7911 * ice_free_intr_tracking - Free PF interrupt tracking structures 7912 * @sc: device softc structure 7913 * 7914 * Frees the interrupt resource allocation manager and the PF's owned maps. 7915 * 7916 * VF maps are released when the owning VF's are destroyed, which should always 7917 * happen before this function is called. 7918 */ 7919 void 7920 ice_free_intr_tracking(struct ice_softc *sc) 7921 { 7922 if (sc->pf_imap) { 7923 ice_resmgr_release_map(&sc->imgr, sc->pf_imap, 7924 sc->lan_vectors); 7925 free(sc->pf_imap, M_ICE); 7926 sc->pf_imap = NULL; 7927 } 7928 7929 ice_resmgr_destroy(&sc->imgr); 7930 } 7931 7932 /** 7933 * ice_apply_supported_speed_filter - Mask off unsupported speeds 7934 * @phy_type_low: bit-field for the low quad word of PHY types 7935 * @phy_type_high: bit-field for the high quad word of PHY types 7936 * 7937 * Given the two quad words containing the supported PHY types, 7938 * this function will mask off the speeds that are not currently 7939 * supported by the device. 7940 */ 7941 static void 7942 ice_apply_supported_speed_filter(u64 *phy_type_low, u64 *phy_type_high) 7943 { 7944 u64 phylow_mask; 7945 7946 /* We won't offer anything lower than 1G for any part, 7947 * but we also won't offer anything under 25G for 100G 7948 * parts. 7949 */ 7950 phylow_mask = ~(ICE_PHY_TYPE_LOW_1000BASE_T - 1); 7951 if (*phy_type_high || 7952 *phy_type_low & ~(ICE_PHY_TYPE_LOW_100GBASE_CR4 - 1)) 7953 phylow_mask = ~(ICE_PHY_TYPE_LOW_25GBASE_T - 1); 7954 *phy_type_low &= phylow_mask; 7955 } 7956 7957 /** 7958 * ice_get_phy_types - Report appropriate PHY types 7959 * @sc: device softc structure 7960 * @phy_type_low: bit-field for the low quad word of PHY types 7961 * @phy_type_high: bit-field for the high quad word of PHY types 7962 * 7963 * Populate the two quad words with bits representing the PHY types 7964 * supported by the device. This is really just a wrapper around 7965 * the ice_aq_get_phy_caps() that chooses the appropriate report 7966 * mode (lenient or strict) and reports back only the relevant PHY 7967 * types. In lenient mode the capabilities are retrieved with the 7968 * NVM_CAP report mode, otherwise they're retrieved using the 7969 * TOPO_CAP report mode (NVM intersected with current media). 7970 * 7971 * @returns 0 on success, or an error code on failure. 7972 */ 7973 static enum ice_status 7974 ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high) 7975 { 7976 struct ice_aqc_get_phy_caps_data pcaps = { 0 }; 7977 struct ice_port_info *pi = sc->hw.port_info; 7978 device_t dev = sc->dev; 7979 enum ice_status status; 7980 u8 report_mode; 7981 7982 if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) 7983 report_mode = ICE_AQC_REPORT_NVM_CAP; 7984 else 7985 report_mode = ICE_AQC_REPORT_TOPO_CAP; 7986 status = ice_aq_get_phy_caps(pi, false, report_mode, &pcaps, NULL); 7987 if (status != ICE_SUCCESS) { 7988 device_printf(dev, 7989 "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", 7990 __func__, (report_mode) ? "TOPO_CAP" : "NVM_CAP", 7991 ice_status_str(status), 7992 ice_aq_str(sc->hw.adminq.sq_last_status)); 7993 return (status); 7994 } 7995 7996 *phy_type_low = le64toh(pcaps.phy_type_low); 7997 *phy_type_high = le64toh(pcaps.phy_type_high); 7998 7999 return (ICE_SUCCESS); 8000 } 8001