1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2021, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 /** 34 * @file ice_lib.h 35 * @brief header for generic device and sysctl functions 36 * 37 * Contains definitions and function declarations for the ice_lib.c file. It 38 * does not depend on the iflib networking stack. 39 */ 40 41 #ifndef _ICE_LIB_H_ 42 #define _ICE_LIB_H_ 43 44 #include <sys/types.h> 45 #include <sys/bus.h> 46 #include <sys/rman.h> 47 #include <sys/socket.h> 48 #include <sys/sbuf.h> 49 #include <sys/sysctl.h> 50 #include <sys/syslog.h> 51 #include <sys/module.h> 52 #include <sys/proc.h> 53 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/if_media.h> 57 #include <net/ethernet.h> 58 59 #include <sys/bitstring.h> 60 61 #include "ice_dcb.h" 62 #include "ice_type.h" 63 #include "ice_common.h" 64 #include "ice_flow.h" 65 #include "ice_sched.h" 66 #include "ice_resmgr.h" 67 68 #include "ice_rss.h" 69 70 /* Hide debug sysctls unless INVARIANTS is enabled */ 71 #ifdef INVARIANTS 72 #define ICE_CTLFLAG_DEBUG 0 73 #else 74 #define ICE_CTLFLAG_DEBUG CTLFLAG_SKIP 75 #endif 76 77 /** 78 * for_each_set_bit - For loop over each set bit in a bit string 79 * @bit: storage for the bit index 80 * @data: address of data block to loop over 81 * @nbits: maximum number of bits to loop over 82 * 83 * macro to create a for loop over a bit string, which runs the body once for 84 * each bit that is set in the string. The bit variable will be set to the 85 * index of each set bit in the string, with zero representing the first bit. 86 */ 87 #define for_each_set_bit(bit, data, nbits) \ 88 for (bit_ffs((bitstr_t *)(data), (nbits), &(bit)); \ 89 (bit) != -1; \ 90 bit_ffs_at((bitstr_t *)(data), (bit) + 1, (nbits), &(bit))) 91 92 /** 93 * @var broadcastaddr 94 * @brief broadcast MAC address 95 * 96 * constant defining the broadcast MAC address, used for programming the 97 * broadcast address as a MAC filter for the PF VSI. 98 */ 99 static const u8 broadcastaddr[ETHER_ADDR_LEN] = { 100 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 101 }; 102 103 MALLOC_DECLARE(M_ICE); 104 105 extern const char ice_driver_version[]; 106 extern const uint8_t ice_major_version; 107 extern const uint8_t ice_minor_version; 108 extern const uint8_t ice_patch_version; 109 extern const uint8_t ice_rc_version; 110 111 /* global sysctl indicating whether the Tx FC filter should be enabled */ 112 extern bool ice_enable_tx_fc_filter; 113 114 /* global sysctl indicating whether the Tx LLDP filter should be enabled */ 115 extern bool ice_enable_tx_lldp_filter; 116 117 /* global sysctl indicating whether FW health status events should be enabled */ 118 extern bool ice_enable_health_events; 119 120 /** 121 * @struct ice_bar_info 122 * @brief PCI BAR mapping information 123 * 124 * Contains data about a PCI BAR that the driver has mapped for use. 125 */ 126 struct ice_bar_info { 127 struct resource *res; 128 bus_space_tag_t tag; 129 bus_space_handle_t handle; 130 bus_size_t size; 131 int rid; 132 }; 133 134 /* Alignment for queues */ 135 #define DBA_ALIGN 128 136 137 /* Maximum TSO size is (256K)-1 */ 138 #define ICE_TSO_SIZE ((256*1024) - 1) 139 140 /* Minimum size for TSO MSS */ 141 #define ICE_MIN_TSO_MSS 64 142 143 #define ICE_MAX_TX_SEGS 8 144 #define ICE_MAX_TSO_SEGS 128 145 146 #define ICE_MAX_DMA_SEG_SIZE ((16*1024) - 1) 147 148 #define ICE_MAX_RX_SEGS 5 149 150 #define ICE_MAX_TSO_HDR_SEGS 3 151 152 #define ICE_MSIX_BAR 3 153 154 #define ICE_DEFAULT_DESC_COUNT 1024 155 #define ICE_MAX_DESC_COUNT 8160 156 #define ICE_MIN_DESC_COUNT 64 157 #define ICE_DESC_COUNT_INCR 32 158 159 /* List of hardware offloads we support */ 160 #define ICE_CSUM_OFFLOAD (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | CSUM_IP_SCTP | \ 161 CSUM_IP6_TCP| CSUM_IP6_UDP | CSUM_IP6_SCTP | \ 162 CSUM_IP_TSO | CSUM_IP6_TSO) 163 164 /* Macros to decide what kind of hardware offload to enable */ 165 #define ICE_CSUM_TCP (CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP) 166 #define ICE_CSUM_UDP (CSUM_IP_UDP|CSUM_IP6_UDP) 167 #define ICE_CSUM_SCTP (CSUM_IP_SCTP|CSUM_IP6_SCTP) 168 #define ICE_CSUM_IP (CSUM_IP|CSUM_IP_TSO) 169 170 /* List of known RX CSUM offload flags */ 171 #define ICE_RX_CSUM_FLAGS (CSUM_L3_CALC | CSUM_L3_VALID | CSUM_L4_CALC | \ 172 CSUM_L4_VALID | CSUM_L5_CALC | CSUM_L5_VALID | \ 173 CSUM_COALESCED) 174 175 /* List of interface capabilities supported by ice hardware */ 176 #define ICE_FULL_CAPS \ 177 (IFCAP_TSO4 | IFCAP_TSO6 | \ 178 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | \ 179 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | \ 180 IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | \ 181 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | \ 182 IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU | IFCAP_LRO) 183 184 /* Safe mode disables support for hardware checksums and TSO */ 185 #define ICE_SAFE_CAPS \ 186 (ICE_FULL_CAPS & ~(IFCAP_HWCSUM | IFCAP_TSO | \ 187 IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM)) 188 189 #define ICE_CAPS(sc) \ 190 (ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE) ? ICE_SAFE_CAPS : ICE_FULL_CAPS) 191 192 /** 193 * ICE_NVM_ACCESS 194 * @brief Private ioctl command number for NVM access ioctls 195 * 196 * The ioctl command number used by NVM update for accessing the driver for 197 * NVM access commands. 198 */ 199 #define ICE_NVM_ACCESS \ 200 (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5) 201 202 #define ICE_AQ_LEN 512 203 #define ICE_MBXQ_LEN 512 204 #define ICE_SBQ_LEN 512 205 206 #define ICE_CTRLQ_WORK_LIMIT 256 207 208 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) 209 210 /* wait up to 50 microseconds for queue state change */ 211 #define ICE_Q_WAIT_RETRY_LIMIT 5 212 213 #define ICE_UP_TABLE_TRANSLATE(val, i) \ 214 (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ 215 ICE_AQ_VSI_UP_TABLE_UP##i##_M) 216 217 /* 218 * For now, set this to the hardware maximum. Each function gets a smaller 219 * number assigned to it in hw->func_caps.guar_num_vsi, though there 220 * appears to be no guarantee that is the maximum number that a function 221 * can use. 222 */ 223 #define ICE_MAX_VSI_AVAILABLE 768 224 225 /* Maximum size of a single frame (for Tx and Rx) */ 226 #define ICE_MAX_FRAME_SIZE ICE_AQ_SET_MAC_FRAME_SIZE_MAX 227 228 /* Maximum MTU size */ 229 #define ICE_MAX_MTU (ICE_MAX_FRAME_SIZE - \ 230 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) 231 232 /* 233 * Hardware requires that TSO packets have an segment size of at least 64 234 * bytes. To avoid sending bad frames to the hardware, the driver forces the 235 * MSS for all TSO packets to have a segment size of at least 64 bytes. 236 * 237 * However, if the MTU is reduced below a certain size, then the resulting 238 * larger MSS can result in transmitting segmented frames with a packet size 239 * larger than the MTU. 240 * 241 * Avoid this by preventing the MTU from being lowered below this limit. 242 * Alternative solutions require changing the TCP stack to disable offloading 243 * the segmentation when the requested segment size goes below 64 bytes. 244 */ 245 #define ICE_MIN_MTU 112 246 247 #define ICE_DEFAULT_VF_QUEUES 4 248 249 /* 250 * There are three settings that can be updated independently or 251 * altogether: Link speed, FEC, and Flow Control. These macros allow 252 * the caller to specify which setting(s) to update. 253 */ 254 #define ICE_APPLY_LS BIT(0) 255 #define ICE_APPLY_FEC BIT(1) 256 #define ICE_APPLY_FC BIT(2) 257 #define ICE_APPLY_LS_FEC (ICE_APPLY_LS | ICE_APPLY_FEC) 258 #define ICE_APPLY_LS_FC (ICE_APPLY_LS | ICE_APPLY_FC) 259 #define ICE_APPLY_FEC_FC (ICE_APPLY_FEC | ICE_APPLY_FC) 260 #define ICE_APPLY_LS_FEC_FC (ICE_APPLY_LS_FEC | ICE_APPLY_FC) 261 262 /** 263 * @enum ice_dyn_idx_t 264 * @brief Dynamic Control ITR indexes 265 * 266 * This enum matches hardware bits and is meant to be used by DYN_CTLN 267 * registers and QINT registers or more generally anywhere in the manual 268 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any 269 * register but instead is a special value meaning "don't update" ITR0/1/2. 270 */ 271 enum ice_dyn_idx_t { 272 ICE_IDX_ITR0 = 0, 273 ICE_IDX_ITR1 = 1, 274 ICE_IDX_ITR2 = 2, 275 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ 276 }; 277 278 /* By convenction ITR0 is used for RX, and ITR1 is used for TX */ 279 #define ICE_RX_ITR ICE_IDX_ITR0 280 #define ICE_TX_ITR ICE_IDX_ITR1 281 282 #define ICE_ITR_MAX 8160 283 284 /* Define the default Tx and Rx ITR as 50us (translates to ~20k int/sec max) */ 285 #define ICE_DFLT_TX_ITR 50 286 #define ICE_DFLT_RX_ITR 50 287 288 /** 289 * ice_itr_to_reg - Convert an ITR setting into its register equivalent 290 * @hw: The device HW structure 291 * @itr_setting: the ITR setting to convert 292 * 293 * Based on the hardware ITR granularity, convert an ITR setting into the 294 * correct value to prepare programming to the HW. 295 */ 296 static inline u16 ice_itr_to_reg(struct ice_hw *hw, u16 itr_setting) 297 { 298 return itr_setting / hw->itr_gran; 299 } 300 301 /** 302 * @enum ice_rx_dtype 303 * @brief DTYPE header split options 304 * 305 * This enum matches the Rx context bits to define whether header split is 306 * enabled or not. 307 */ 308 enum ice_rx_dtype { 309 ICE_RX_DTYPE_NO_SPLIT = 0, 310 ICE_RX_DTYPE_HEADER_SPLIT = 1, 311 ICE_RX_DTYPE_SPLIT_ALWAYS = 2, 312 }; 313 314 /* Strings used for displaying FEC mode 315 * 316 * Use ice_fec_str() to get these unless these need to be embedded in a 317 * string constant. 318 */ 319 #define ICE_FEC_STRING_AUTO "Auto" 320 #define ICE_FEC_STRING_RS "RS-FEC" 321 #define ICE_FEC_STRING_BASER "FC-FEC/BASE-R" 322 #define ICE_FEC_STRING_NONE "None" 323 324 /* Strings used for displaying Flow Control mode 325 * 326 * Use ice_fc_str() to get these unless these need to be embedded in a 327 * string constant. 328 */ 329 #define ICE_FC_STRING_FULL "Full" 330 #define ICE_FC_STRING_TX "Tx" 331 #define ICE_FC_STRING_RX "Rx" 332 #define ICE_FC_STRING_NONE "None" 333 334 /* 335 * The number of times the ice_handle_i2c_req function will retry reading 336 * I2C data via the Admin Queue before returning EBUSY. 337 */ 338 #define ICE_I2C_MAX_RETRIES 10 339 340 /* 341 * The Start LLDP Agent AQ command will fail if it's sent too soon after 342 * the LLDP agent is stopped. The period between the stop and start 343 * commands must currently be at least 2 seconds. 344 */ 345 #define ICE_START_LLDP_RETRY_WAIT (2 * hz) 346 347 /* 348 * The ice_(set|clear)_vsi_promisc() function expects a mask of promiscuous 349 * modes to operate on. This mask is the default one for the driver, where 350 * promiscuous is enabled/disabled for all types of non-VLAN-tagged/VLAN 0 351 * traffic. 352 */ 353 #define ICE_VSI_PROMISC_MASK (ICE_PROMISC_UCAST_TX | \ 354 ICE_PROMISC_UCAST_RX | \ 355 ICE_PROMISC_MCAST_TX | \ 356 ICE_PROMISC_MCAST_RX) 357 358 struct ice_softc; 359 360 /** 361 * @enum ice_rx_cso_stat 362 * @brief software checksum offload statistics 363 * 364 * Enumeration of possible checksum offload statistics captured by software 365 * during the Rx path. 366 */ 367 enum ice_rx_cso_stat { 368 ICE_CSO_STAT_RX_IP4_ERR, 369 ICE_CSO_STAT_RX_IP6_ERR, 370 ICE_CSO_STAT_RX_L3_ERR, 371 ICE_CSO_STAT_RX_TCP_ERR, 372 ICE_CSO_STAT_RX_UDP_ERR, 373 ICE_CSO_STAT_RX_SCTP_ERR, 374 ICE_CSO_STAT_RX_L4_ERR, 375 ICE_CSO_STAT_RX_COUNT 376 }; 377 378 /** 379 * @enum ice_tx_cso_stat 380 * @brief software checksum offload statistics 381 * 382 * Enumeration of possible checksum offload statistics captured by software 383 * during the Tx path. 384 */ 385 enum ice_tx_cso_stat { 386 ICE_CSO_STAT_TX_TCP, 387 ICE_CSO_STAT_TX_UDP, 388 ICE_CSO_STAT_TX_SCTP, 389 ICE_CSO_STAT_TX_IP4, 390 ICE_CSO_STAT_TX_IP6, 391 ICE_CSO_STAT_TX_L3_ERR, 392 ICE_CSO_STAT_TX_L4_ERR, 393 ICE_CSO_STAT_TX_COUNT 394 }; 395 396 /** 397 * @struct tx_stats 398 * @brief software Tx statistics 399 * 400 * Contains software counted Tx statistics for a single queue 401 */ 402 struct tx_stats { 403 /* Soft Stats */ 404 u64 tx_bytes; 405 u64 tx_packets; 406 u64 mss_too_small; 407 u64 cso[ICE_CSO_STAT_TX_COUNT]; 408 }; 409 410 /** 411 * @struct rx_stats 412 * @brief software Rx statistics 413 * 414 * Contains software counted Rx statistics for a single queue 415 */ 416 struct rx_stats { 417 /* Soft Stats */ 418 u64 rx_packets; 419 u64 rx_bytes; 420 u64 desc_errs; 421 u64 cso[ICE_CSO_STAT_RX_COUNT]; 422 }; 423 424 /** 425 * @struct ice_vsi_hw_stats 426 * @brief hardware statistics for a VSI 427 * 428 * Stores statistics that are generated by hardware for a VSI. 429 */ 430 struct ice_vsi_hw_stats { 431 struct ice_eth_stats prev; 432 struct ice_eth_stats cur; 433 bool offsets_loaded; 434 }; 435 436 /** 437 * @struct ice_pf_hw_stats 438 * @brief hardware statistics for a PF 439 * 440 * Stores statistics that are generated by hardware for each PF. 441 */ 442 struct ice_pf_hw_stats { 443 struct ice_hw_port_stats prev; 444 struct ice_hw_port_stats cur; 445 bool offsets_loaded; 446 }; 447 448 /** 449 * @struct ice_pf_sw_stats 450 * @brief software statistics for a PF 451 * 452 * Contains software generated statistics relevant to a PF. 453 */ 454 struct ice_pf_sw_stats { 455 /* # of reset events handled, by type */ 456 u32 corer_count; 457 u32 globr_count; 458 u32 empr_count; 459 u32 pfr_count; 460 461 /* # of detected MDD events for Tx and Rx */ 462 u32 tx_mdd_count; 463 u32 rx_mdd_count; 464 }; 465 466 /** 467 * @struct ice_vsi 468 * @brief VSI structure 469 * 470 * Contains data relevant to a single VSI 471 */ 472 struct ice_vsi { 473 /* back pointer to the softc */ 474 struct ice_softc *sc; 475 476 bool dynamic; /* if true, dynamically allocated */ 477 478 enum ice_vsi_type type; /* type of this VSI */ 479 u16 idx; /* software index to sc->all_vsi[] */ 480 481 u16 *tx_qmap; /* Tx VSI to PF queue mapping */ 482 u16 *rx_qmap; /* Rx VSI to PF queue mapping */ 483 484 bitstr_t *vmap; /* Vector(s) assigned to VSI */ 485 486 enum ice_resmgr_alloc_type qmap_type; 487 488 struct ice_tx_queue *tx_queues; /* Tx queue array */ 489 struct ice_rx_queue *rx_queues; /* Rx queue array */ 490 491 int num_tx_queues; 492 int num_rx_queues; 493 int num_vectors; 494 495 int16_t rx_itr; 496 int16_t tx_itr; 497 498 /* RSS configuration */ 499 u16 rss_table_size; /* HW RSS table size */ 500 u8 rss_lut_type; /* Used to configure Get/Set RSS LUT AQ call */ 501 502 int max_frame_size; 503 u16 mbuf_sz; 504 505 struct ice_aqc_vsi_props info; 506 507 /* context for per-VSI sysctls */ 508 struct sysctl_ctx_list ctx; 509 struct sysctl_oid *vsi_node; 510 511 /* context for per-txq sysctls */ 512 struct sysctl_ctx_list txqs_ctx; 513 struct sysctl_oid *txqs_node; 514 515 /* context for per-rxq sysctls */ 516 struct sysctl_ctx_list rxqs_ctx; 517 struct sysctl_oid *rxqs_node; 518 519 /* VSI-level stats */ 520 struct ice_vsi_hw_stats hw_stats; 521 }; 522 523 /** 524 * @enum ice_state 525 * @brief Driver state flags 526 * 527 * Used to indicate the status of various driver events. Intended to be 528 * modified only using atomic operations, so that we can use it even in places 529 * which aren't locked. 530 */ 531 enum ice_state { 532 ICE_STATE_CONTROLQ_EVENT_PENDING, 533 ICE_STATE_VFLR_PENDING, 534 ICE_STATE_MDD_PENDING, 535 ICE_STATE_RESET_OICR_RECV, 536 ICE_STATE_RESET_PFR_REQ, 537 ICE_STATE_PREPARED_FOR_RESET, 538 ICE_STATE_RESET_FAILED, 539 ICE_STATE_DRIVER_INITIALIZED, 540 ICE_STATE_NO_MEDIA, 541 ICE_STATE_RECOVERY_MODE, 542 ICE_STATE_ROLLBACK_MODE, 543 ICE_STATE_LINK_STATUS_REPORTED, 544 ICE_STATE_DETACHING, 545 ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING, 546 ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER, 547 /* This entry must be last */ 548 ICE_STATE_LAST, 549 }; 550 551 /* Functions for setting and checking driver state. Note the functions take 552 * bit positions, not bitmasks. The atomic_testandset_32 and 553 * atomic_testandclear_32 operations require bit positions, while the 554 * atomic_set_32 and atomic_clear_32 require bitmasks. This can easily lead to 555 * programming error, so we provide wrapper functions to avoid this. 556 */ 557 558 /** 559 * ice_set_state - Set the specified state 560 * @s: the state bitmap 561 * @bit: the state to set 562 * 563 * Atomically update the state bitmap with the specified bit set. 564 */ 565 static inline void 566 ice_set_state(volatile u32 *s, enum ice_state bit) 567 { 568 /* atomic_set_32 expects a bitmask */ 569 atomic_set_32(s, BIT(bit)); 570 } 571 572 /** 573 * ice_clear_state - Clear the specified state 574 * @s: the state bitmap 575 * @bit: the state to clear 576 * 577 * Atomically update the state bitmap with the specified bit cleared. 578 */ 579 static inline void 580 ice_clear_state(volatile u32 *s, enum ice_state bit) 581 { 582 /* atomic_clear_32 expects a bitmask */ 583 atomic_clear_32(s, BIT(bit)); 584 } 585 586 /** 587 * ice_testandset_state - Test and set the specified state 588 * @s: the state bitmap 589 * @bit: the bit to test 590 * 591 * Atomically update the state bitmap, setting the specified bit. Returns the 592 * previous value of the bit. 593 */ 594 static inline u32 595 ice_testandset_state(volatile u32 *s, enum ice_state bit) 596 { 597 /* atomic_testandset_32 expects a bit position */ 598 return atomic_testandset_32(s, bit); 599 } 600 601 /** 602 * ice_testandclear_state - Test and clear the specified state 603 * @s: the state bitmap 604 * @bit: the bit to test 605 * 606 * Atomically update the state bitmap, clearing the specified bit. Returns the 607 * previous value of the bit. 608 */ 609 static inline u32 610 ice_testandclear_state(volatile u32 *s, enum ice_state bit) 611 { 612 /* atomic_testandclear_32 expects a bit position */ 613 return atomic_testandclear_32(s, bit); 614 } 615 616 /** 617 * ice_test_state - Test the specified state 618 * @s: the state bitmap 619 * @bit: the bit to test 620 * 621 * Return true if the state is set, false otherwise. Use this only if the flow 622 * does not need to update the state. If you must update the state as well, 623 * prefer ice_testandset_state or ice_testandclear_state. 624 */ 625 static inline u32 626 ice_test_state(volatile u32 *s, enum ice_state bit) 627 { 628 return (*s & BIT(bit)) ? true : false; 629 } 630 631 /** 632 * @struct ice_str_buf 633 * @brief static length buffer for string returning 634 * 635 * Structure containing a fixed size string buffer, used to implement 636 * numeric->string conversion functions that may want to return non-constant 637 * strings. 638 * 639 * This allows returning a fixed size string that is generated by a conversion 640 * function, and then copied to the used location without needing to use an 641 * explicit local variable passed by reference. 642 */ 643 struct ice_str_buf { 644 char str[ICE_STR_BUF_LEN]; 645 }; 646 647 struct ice_str_buf _ice_aq_str(enum ice_aq_err aq_err); 648 struct ice_str_buf _ice_status_str(enum ice_status status); 649 struct ice_str_buf _ice_err_str(int err); 650 struct ice_str_buf _ice_fltr_flag_str(u16 flag); 651 struct ice_str_buf _ice_mdd_tx_tclan_str(u8 event); 652 struct ice_str_buf _ice_mdd_tx_pqm_str(u8 event); 653 struct ice_str_buf _ice_mdd_rx_str(u8 event); 654 struct ice_str_buf _ice_fw_lldp_status(u32 lldp_status); 655 656 #define ice_aq_str(err) _ice_aq_str(err).str 657 #define ice_status_str(err) _ice_status_str(err).str 658 #define ice_err_str(err) _ice_err_str(err).str 659 #define ice_fltr_flag_str(flag) _ice_fltr_flag_str(flag).str 660 661 #define ice_mdd_tx_tclan_str(event) _ice_mdd_tx_tclan_str(event).str 662 #define ice_mdd_tx_pqm_str(event) _ice_mdd_tx_pqm_str(event).str 663 #define ice_mdd_rx_str(event) _ice_mdd_rx_str(event).str 664 665 #define ice_fw_lldp_status(lldp_status) _ice_fw_lldp_status(lldp_status).str 666 667 /** 668 * ice_enable_intr - Enable interrupts for given vector 669 * @hw: the device private HW structure 670 * @vector: the interrupt index in PF space 671 * 672 * In MSI or Legacy interrupt mode, interrupt 0 is the only valid index. 673 */ 674 static inline void 675 ice_enable_intr(struct ice_hw *hw, int vector) 676 { 677 u32 dyn_ctl; 678 679 /* Use ITR_NONE so that ITR configuration is not changed. */ 680 dyn_ctl = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 681 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); 682 wr32(hw, GLINT_DYN_CTL(vector), dyn_ctl); 683 } 684 685 /** 686 * ice_disable_intr - Disable interrupts for given vector 687 * @hw: the device private HW structure 688 * @vector: the interrupt index in PF space 689 * 690 * In MSI or Legacy interrupt mode, interrupt 0 is the only valid index. 691 */ 692 static inline void 693 ice_disable_intr(struct ice_hw *hw, int vector) 694 { 695 u32 dyn_ctl; 696 697 /* Use ITR_NONE so that ITR configuration is not changed. */ 698 dyn_ctl = ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S; 699 wr32(hw, GLINT_DYN_CTL(vector), dyn_ctl); 700 } 701 702 /** 703 * ice_is_tx_desc_done - determine if a Tx descriptor is done 704 * @txd: the Tx descriptor to check 705 * 706 * Returns true if hardware is done with a Tx descriptor and software is 707 * capable of re-using it. 708 */ 709 static inline bool 710 ice_is_tx_desc_done(struct ice_tx_desc *txd) 711 { 712 return (((txd->cmd_type_offset_bsz & ICE_TXD_QW1_DTYPE_M) 713 >> ICE_TXD_QW1_DTYPE_S) == ICE_TX_DESC_DTYPE_DESC_DONE); 714 } 715 716 /** 717 * ice_get_pf_id - Get the PF id from the hardware registers 718 * @hw: the ice hardware structure 719 * 720 * Reads the PF_FUNC_RID register and extracts the function number from it. 721 * Intended to be used in cases where hw->pf_id hasn't yet been assigned by 722 * ice_init_hw. 723 * 724 * @pre this function should be called only after PCI register access has been 725 * setup, and prior to ice_init_hw. After hardware has been initialized, the 726 * cached hw->pf_id value can be used. 727 */ 728 static inline u8 729 ice_get_pf_id(struct ice_hw *hw) 730 { 731 return (u8)((rd32(hw, PF_FUNC_RID) & PF_FUNC_RID_FUNCTION_NUMBER_M) >> 732 PF_FUNC_RID_FUNCTION_NUMBER_S); 733 } 734 735 /* Details of how to re-initialize depend on the networking stack */ 736 void ice_request_stack_reinit(struct ice_softc *sc); 737 738 /* Details of how to check if the network stack is detaching us */ 739 bool ice_driver_is_detaching(struct ice_softc *sc); 740 741 int ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending); 742 int ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num); 743 void ice_free_bar(device_t dev, struct ice_bar_info *bar); 744 void ice_set_ctrlq_len(struct ice_hw *hw); 745 void ice_release_vsi(struct ice_vsi *vsi); 746 struct ice_vsi *ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type); 747 int ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues, 748 const int max_rx_queues); 749 void ice_free_vsi_qmaps(struct ice_vsi *vsi); 750 int ice_initialize_vsi(struct ice_vsi *vsi); 751 void ice_deinit_vsi(struct ice_vsi *vsi); 752 uint64_t ice_aq_speed_to_rate(struct ice_port_info *pi); 753 int ice_get_phy_type_low(uint64_t phy_type_low); 754 int ice_get_phy_type_high(uint64_t phy_type_high); 755 enum ice_status ice_add_media_types(struct ice_softc *sc, struct ifmedia *media); 756 void ice_configure_rxq_interrupts(struct ice_vsi *vsi); 757 void ice_configure_txq_interrupts(struct ice_vsi *vsi); 758 void ice_flush_rxq_interrupts(struct ice_vsi *vsi); 759 void ice_flush_txq_interrupts(struct ice_vsi *vsi); 760 int ice_cfg_vsi_for_tx(struct ice_vsi *vsi); 761 int ice_cfg_vsi_for_rx(struct ice_vsi *vsi); 762 int ice_control_rx_queues(struct ice_vsi *vsi, bool enable); 763 int ice_cfg_pf_default_mac_filters(struct ice_softc *sc); 764 int ice_rm_pf_default_mac_filters(struct ice_softc *sc); 765 void ice_print_nvm_version(struct ice_softc *sc); 766 void ice_update_vsi_hw_stats(struct ice_vsi *vsi); 767 void ice_reset_vsi_stats(struct ice_vsi *vsi); 768 void ice_update_pf_stats(struct ice_softc *sc); 769 void ice_reset_pf_stats(struct ice_softc *sc); 770 void ice_add_device_sysctls(struct ice_softc *sc); 771 void ice_log_hmc_error(struct ice_hw *hw, device_t dev); 772 void ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, 773 struct sysctl_oid *parent, 774 struct ice_eth_stats *stats); 775 void ice_add_vsi_sysctls(struct ice_vsi *vsi); 776 void ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, 777 struct sysctl_oid *parent, 778 struct ice_hw_port_stats *stats); 779 void ice_configure_misc_interrupts(struct ice_softc *sc); 780 int ice_sync_multicast_filters(struct ice_softc *sc); 781 enum ice_status ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid); 782 enum ice_status ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid); 783 void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent); 784 void ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi); 785 void ice_add_device_tunables(struct ice_softc *sc); 786 int ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr); 787 int ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr); 788 int ice_vsi_disable_tx(struct ice_vsi *vsi); 789 void ice_vsi_add_txqs_ctx(struct ice_vsi *vsi); 790 void ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi); 791 void ice_vsi_del_txqs_ctx(struct ice_vsi *vsi); 792 void ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi); 793 void ice_add_txq_sysctls(struct ice_tx_queue *txq); 794 void ice_add_rxq_sysctls(struct ice_rx_queue *rxq); 795 int ice_config_rss(struct ice_vsi *vsi); 796 void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc); 797 void ice_load_pkg_file(struct ice_softc *sc); 798 void ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status); 799 uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter); 800 void ice_save_pci_info(struct ice_hw *hw, device_t dev); 801 int ice_replay_all_vsi_cfg(struct ice_softc *sc); 802 void ice_link_up_msg(struct ice_softc *sc); 803 int ice_update_laa_mac(struct ice_softc *sc); 804 void ice_get_and_print_bus_info(struct ice_softc *sc); 805 const char *ice_fec_str(enum ice_fec_mode mode); 806 const char *ice_fc_str(enum ice_fc_mode mode); 807 const char *ice_fwd_act_str(enum ice_sw_fwd_act_type action); 808 const char *ice_state_to_str(enum ice_state state); 809 int ice_init_link_events(struct ice_softc *sc); 810 void ice_configure_rx_itr(struct ice_vsi *vsi); 811 void ice_configure_tx_itr(struct ice_vsi *vsi); 812 void ice_setup_pf_vsi(struct ice_softc *sc); 813 void ice_handle_mdd_event(struct ice_softc *sc); 814 void ice_init_dcb_setup(struct ice_softc *sc); 815 int ice_send_version(struct ice_softc *sc); 816 int ice_cfg_pf_ethertype_filters(struct ice_softc *sc); 817 void ice_init_link_configuration(struct ice_softc *sc); 818 void ice_init_saved_phy_cfg(struct ice_softc *sc); 819 int ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings); 820 void ice_set_link_management_mode(struct ice_softc *sc); 821 int ice_module_event_handler(module_t mod, int what, void *arg); 822 int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd); 823 int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req); 824 int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length); 825 int ice_alloc_intr_tracking(struct ice_softc *sc); 826 void ice_free_intr_tracking(struct ice_softc *sc); 827 void ice_set_default_local_lldp_mib(struct ice_softc *sc); 828 void ice_init_health_events(struct ice_softc *sc); 829 830 #endif /* _ICE_LIB_H_ */ 831