1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #ifndef _ICE_H_ 5 #define _ICE_H_ 6 7 #include <linux/types.h> 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/firmware.h> 12 #include <linux/netdevice.h> 13 #include <linux/compiler.h> 14 #include <linux/etherdevice.h> 15 #include <linux/skbuff.h> 16 #include <linux/cpumask.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/if_vlan.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/pci.h> 21 #include <linux/workqueue.h> 22 #include <linux/wait.h> 23 #include <linux/interrupt.h> 24 #include <linux/ethtool.h> 25 #include <linux/timer.h> 26 #include <linux/delay.h> 27 #include <linux/bitmap.h> 28 #include <linux/log2.h> 29 #include <linux/ip.h> 30 #include <linux/sctp.h> 31 #include <linux/ipv6.h> 32 #include <linux/pkt_sched.h> 33 #include <linux/if_bridge.h> 34 #include <linux/ctype.h> 35 #include <linux/linkmode.h> 36 #include <linux/bpf.h> 37 #include <linux/btf.h> 38 #include <linux/auxiliary_bus.h> 39 #include <linux/avf/virtchnl.h> 40 #include <linux/cpu_rmap.h> 41 #include <linux/dim.h> 42 #include <linux/gnss.h> 43 #include <net/pkt_cls.h> 44 #include <net/pkt_sched.h> 45 #include <net/tc_act/tc_mirred.h> 46 #include <net/tc_act/tc_gact.h> 47 #include <net/ip.h> 48 #include <net/devlink.h> 49 #include <net/ipv6.h> 50 #include <net/xdp_sock.h> 51 #include <net/xdp_sock_drv.h> 52 #include <net/geneve.h> 53 #include <net/gre.h> 54 #include <net/udp_tunnel.h> 55 #include <net/vxlan.h> 56 #include <net/gtp.h> 57 #include <linux/ppp_defs.h> 58 #include "ice_devids.h" 59 #include "ice_type.h" 60 #include "ice_txrx.h" 61 #include "ice_dcb.h" 62 #include "ice_switch.h" 63 #include "ice_common.h" 64 #include "ice_flow.h" 65 #include "ice_sched.h" 66 #include "ice_idc_int.h" 67 #include "ice_sriov.h" 68 #include "ice_vf_mbx.h" 69 #include "ice_ptp.h" 70 #include "ice_fdir.h" 71 #include "ice_xsk.h" 72 #include "ice_arfs.h" 73 #include "ice_repr.h" 74 #include "ice_eswitch.h" 75 #include "ice_lag.h" 76 #include "ice_vsi_vlan_ops.h" 77 #include "ice_gnss.h" 78 #include "ice_irq.h" 79 #include "ice_dpll.h" 80 #include "ice_adapter.h" 81 #include "devlink/health.h" 82 83 #define ICE_BAR0 0 84 #define ICE_REQ_DESC_MULTIPLE 32 85 #define ICE_MIN_NUM_DESC 64 86 #define ICE_MAX_NUM_DESC 8160 87 #define ICE_DFLT_MIN_RX_DESC 512 88 #define ICE_DFLT_NUM_TX_DESC 256 89 #define ICE_DFLT_NUM_RX_DESC 2048 90 91 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) 92 #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) 93 #define ICE_AQ_LEN 192 94 #define ICE_MBXSQ_LEN 64 95 #define ICE_SBQ_LEN 64 96 #define ICE_MIN_LAN_TXRX_MSIX 1 97 #define ICE_MIN_LAN_OICR_MSIX 1 98 #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) 99 #define ICE_FDIR_MSIX 2 100 #define ICE_RDMA_NUM_AEQ_MSIX 4 101 #define ICE_MIN_RDMA_MSIX 2 102 #define ICE_ESWITCH_MSIX 1 103 #define ICE_NO_VSI 0xffff 104 #define ICE_VSI_MAP_CONTIG 0 105 #define ICE_VSI_MAP_SCATTER 1 106 #define ICE_MAX_SCATTER_TXQS 16 107 #define ICE_MAX_SCATTER_RXQS 16 108 #define ICE_Q_WAIT_RETRY_LIMIT 10 109 #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) 110 #define ICE_MAX_LG_RSS_QS 256 111 #define ICE_INVAL_Q_INDEX 0xffff 112 113 #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ 114 115 #define ICE_CHNL_START_TC 1 116 117 #define ICE_MAX_RESET_WAIT 20 118 119 #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) 120 121 #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 122 123 #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD) 124 125 #define ICE_MAX_TSO_SIZE 131072 126 127 #define ICE_UP_TABLE_TRANSLATE(val, i) \ 128 (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ 129 ICE_AQ_VSI_UP_TABLE_UP##i##_M) 130 131 #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i])) 132 #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i])) 133 #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i])) 134 #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i])) 135 136 /* Minimum BW limit is 500 Kbps for any scheduler node */ 137 #define ICE_MIN_BW_LIMIT 500 138 /* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes. 139 * use it to convert user specified BW limit into Kbps 140 */ 141 #define ICE_BW_KBPS_DIVISOR 125 142 143 /* Default recipes have priority 4 and below, hence priority values between 5..7 144 * can be used as filter priority for advanced switch filter (advanced switch 145 * filters need new recipe to be created for specified extraction sequence 146 * because default recipe extraction sequence does not represent custom 147 * extraction) 148 */ 149 #define ICE_SWITCH_FLTR_PRIO_QUEUE 7 150 /* prio 6 is reserved for future use (e.g. switch filter with L3 fields + 151 * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as 152 * SYN/FIN/RST)) 153 */ 154 #define ICE_SWITCH_FLTR_PRIO_RSVD 6 155 #define ICE_SWITCH_FLTR_PRIO_VSI 5 156 #define ICE_SWITCH_FLTR_PRIO_QGRP ICE_SWITCH_FLTR_PRIO_VSI 157 158 /* Macro for each VSI in a PF */ 159 #define ice_for_each_vsi(pf, i) \ 160 for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) 161 162 /* Macros for each Tx/Xdp/Rx ring in a VSI */ 163 #define ice_for_each_txq(vsi, i) \ 164 for ((i) = 0; (i) < (vsi)->num_txq; (i)++) 165 166 #define ice_for_each_xdp_txq(vsi, i) \ 167 for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++) 168 169 #define ice_for_each_rxq(vsi, i) \ 170 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) 171 172 /* Macros for each allocated Tx/Rx ring whether used or not in a VSI */ 173 #define ice_for_each_alloc_txq(vsi, i) \ 174 for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) 175 176 #define ice_for_each_alloc_rxq(vsi, i) \ 177 for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) 178 179 #define ice_for_each_q_vector(vsi, i) \ 180 for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++) 181 182 #define ice_for_each_chnl_tc(i) \ 183 for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++) 184 185 #define ICE_UCAST_PROMISC_BITS ICE_PROMISC_UCAST_RX 186 187 #define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_RX | \ 188 ICE_PROMISC_VLAN_RX) 189 190 #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX) 191 192 #define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \ 193 ICE_PROMISC_MCAST_RX | \ 194 ICE_PROMISC_VLAN_TX | \ 195 ICE_PROMISC_VLAN_RX) 196 197 #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) 198 199 #define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned) 200 201 enum ice_feature { 202 ICE_F_DSCP, 203 ICE_F_PHY_RCLK, 204 ICE_F_SMA_CTRL, 205 ICE_F_CGU, 206 ICE_F_GNSS, 207 ICE_F_ROCE_LAG, 208 ICE_F_SRIOV_LAG, 209 ICE_F_MBX_LIMIT, 210 ICE_F_MAX 211 }; 212 213 DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key); 214 215 struct ice_channel { 216 struct list_head list; 217 u8 type; 218 u16 sw_id; 219 u16 base_q; 220 u16 num_rxq; 221 u16 num_txq; 222 u16 vsi_num; 223 u8 ena_tc; 224 struct ice_aqc_vsi_props info; 225 u64 max_tx_rate; 226 u64 min_tx_rate; 227 atomic_t num_sb_fltr; 228 struct ice_vsi *ch_vsi; 229 }; 230 231 struct ice_txq_meta { 232 u32 q_teid; /* Tx-scheduler element identifier */ 233 u16 q_id; /* Entry in VSI's txq_map bitmap */ 234 u16 q_handle; /* Relative index of Tx queue within TC */ 235 u16 vsi_idx; /* VSI index that Tx queue belongs to */ 236 u8 tc; /* TC number that Tx queue belongs to */ 237 }; 238 239 struct ice_tc_info { 240 u16 qoffset; 241 u16 qcount_tx; 242 u16 qcount_rx; 243 u8 netdev_tc; 244 }; 245 246 struct ice_tc_cfg { 247 u8 numtc; /* Total number of enabled TCs */ 248 u16 ena_tc; /* Tx map */ 249 struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS]; 250 }; 251 252 struct ice_qs_cfg { 253 struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */ 254 unsigned long *pf_map; 255 unsigned long pf_map_size; 256 unsigned int q_count; 257 unsigned int scatter_count; 258 u16 *vsi_map; 259 u16 vsi_map_offset; 260 u8 mapping_mode; 261 }; 262 263 struct ice_sw { 264 struct ice_pf *pf; 265 u16 sw_id; /* switch ID for this switch */ 266 u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */ 267 }; 268 269 enum ice_pf_state { 270 ICE_TESTING, 271 ICE_DOWN, 272 ICE_NEEDS_RESTART, 273 ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ 274 ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ 275 ICE_PFR_REQ, /* set by driver */ 276 ICE_CORER_REQ, /* set by driver */ 277 ICE_GLOBR_REQ, /* set by driver */ 278 ICE_CORER_RECV, /* set by OICR handler */ 279 ICE_GLOBR_RECV, /* set by OICR handler */ 280 ICE_EMPR_RECV, /* set by OICR handler */ 281 ICE_SUSPENDED, /* set on module remove path */ 282 ICE_RESET_FAILED, /* set by reset/rebuild */ 283 /* When checking for the PF to be in a nominal operating state, the 284 * bits that are grouped at the beginning of the list need to be 285 * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will 286 * be checked. If you need to add a bit into consideration for nominal 287 * operating state, it must be added before 288 * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position 289 * without appropriate consideration. 290 */ 291 ICE_STATE_NOMINAL_CHECK_BITS, 292 ICE_ADMINQ_EVENT_PENDING, 293 ICE_MAILBOXQ_EVENT_PENDING, 294 ICE_SIDEBANDQ_EVENT_PENDING, 295 ICE_MDD_EVENT_PENDING, 296 ICE_VFLR_EVENT_PENDING, 297 ICE_FLTR_OVERFLOW_PROMISC, 298 ICE_VF_DIS, 299 ICE_CFG_BUSY, 300 ICE_SERVICE_SCHED, 301 ICE_SERVICE_DIS, 302 ICE_FD_FLUSH_REQ, 303 ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ 304 ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ 305 ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ 306 ICE_LINK_DEFAULT_OVERRIDE_PENDING, 307 ICE_PHY_INIT_COMPLETE, 308 ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ 309 ICE_AUX_ERR_PENDING, 310 ICE_STATE_NBITS /* must be last */ 311 }; 312 313 enum ice_vsi_state { 314 ICE_VSI_DOWN, 315 ICE_VSI_NEEDS_RESTART, 316 ICE_VSI_NETDEV_ALLOCD, 317 ICE_VSI_NETDEV_REGISTERED, 318 ICE_VSI_UMAC_FLTR_CHANGED, 319 ICE_VSI_MMAC_FLTR_CHANGED, 320 ICE_VSI_PROMISC_CHANGED, 321 ICE_VSI_REBUILD_PENDING, 322 ICE_VSI_STATE_NBITS /* must be last */ 323 }; 324 325 struct ice_vsi_stats { 326 struct ice_ring_stats **tx_ring_stats; /* Tx ring stats array */ 327 struct ice_ring_stats **rx_ring_stats; /* Rx ring stats array */ 328 }; 329 330 /* struct that defines a VSI, associated with a dev */ 331 struct ice_vsi { 332 struct net_device *netdev; 333 struct ice_sw *vsw; /* switch this VSI is on */ 334 struct ice_pf *back; /* back pointer to PF */ 335 struct ice_rx_ring **rx_rings; /* Rx ring array */ 336 struct ice_tx_ring **tx_rings; /* Tx ring array */ 337 struct ice_q_vector **q_vectors; /* q_vector array */ 338 339 irqreturn_t (*irq_handler)(int irq, void *data); 340 341 u64 tx_linearize; 342 DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS); 343 unsigned int current_netdev_flags; 344 u32 tx_restart; 345 u32 tx_busy; 346 u32 rx_buf_failed; 347 u32 rx_page_failed; 348 u16 num_q_vectors; 349 /* tell if only dynamic irq allocation is allowed */ 350 bool irq_dyn_alloc; 351 352 u16 vsi_num; /* HW (absolute) index of this VSI */ 353 u16 idx; /* software index in pf->vsi[] */ 354 355 u16 num_gfltr; 356 u16 num_bfltr; 357 358 /* RSS config */ 359 u16 rss_table_size; /* HW RSS table size */ 360 u16 rss_size; /* Allocated RSS queues */ 361 u8 rss_hfunc; /* User configured hash type */ 362 u8 *rss_hkey_user; /* User configured hash keys */ 363 u8 *rss_lut_user; /* User configured lookup table entries */ 364 u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */ 365 366 /* aRFS members only allocated for the PF VSI */ 367 #define ICE_MAX_ARFS_LIST 1024 368 #define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1) 369 struct hlist_head *arfs_fltr_list; 370 struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs; 371 spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ 372 atomic_t *arfs_last_fltr_id; 373 374 struct ice_aqc_vsi_props info; /* VSI properties */ 375 struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ 376 377 /* VSI stats */ 378 struct rtnl_link_stats64 net_stats; 379 struct rtnl_link_stats64 net_stats_prev; 380 struct ice_eth_stats eth_stats; 381 struct ice_eth_stats eth_stats_prev; 382 383 struct list_head tmp_sync_list; /* MAC filters to be synced */ 384 struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ 385 386 u8 irqs_ready:1; 387 u8 current_isup:1; /* Sync 'link up' logging */ 388 u8 stat_offsets_loaded:1; 389 struct ice_vsi_vlan_ops inner_vlan_ops; 390 struct ice_vsi_vlan_ops outer_vlan_ops; 391 u16 num_vlan; 392 393 /* queue information */ 394 u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 395 u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 396 u16 *txq_map; /* index in pf->avail_txqs */ 397 u16 *rxq_map; /* index in pf->avail_rxqs */ 398 u16 alloc_txq; /* Allocated Tx queues */ 399 u16 num_txq; /* Used Tx queues */ 400 u16 alloc_rxq; /* Allocated Rx queues */ 401 u16 num_rxq; /* Used Rx queues */ 402 u16 req_txq; /* User requested Tx queues */ 403 u16 req_rxq; /* User requested Rx queues */ 404 u16 num_rx_desc; 405 u16 num_tx_desc; 406 u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; 407 struct ice_tc_cfg tc_cfg; 408 struct bpf_prog *xdp_prog; 409 struct ice_tx_ring **xdp_rings; /* XDP ring array */ 410 u16 num_xdp_txq; /* Used XDP queues */ 411 u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 412 struct mutex xdp_state_lock; 413 414 struct net_device **target_netdevs; 415 416 struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */ 417 418 /* Channel Specific Fields */ 419 struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC]; 420 u16 cnt_q_avail; 421 u16 next_base_q; /* next queue to be used for channel setup */ 422 struct list_head ch_list; 423 u16 num_chnl_rxq; 424 u16 num_chnl_txq; 425 u16 ch_rss_size; 426 u16 num_chnl_fltr; 427 /* store away rss size info before configuring ADQ channels so that, 428 * it can be used after tc-qdisc delete, to get back RSS setting as 429 * they were before 430 */ 431 u16 orig_rss_size; 432 /* this keeps tracks of all enabled TC with and without DCB 433 * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue 434 * information 435 */ 436 u8 all_numtc; 437 u16 all_enatc; 438 439 /* store away TC info, to be used for rebuild logic */ 440 u8 old_numtc; 441 u16 old_ena_tc; 442 443 /* setup back reference, to which aggregator node this VSI 444 * corresponds to 445 */ 446 struct ice_agg_node *agg_node; 447 448 struct_group_tagged(ice_vsi_cfg_params, params, 449 struct ice_port_info *port_info; /* back pointer to port_info */ 450 struct ice_channel *ch; /* VSI's channel structure, may be NULL */ 451 union { 452 /* VF associated with this VSI, may be NULL */ 453 struct ice_vf *vf; 454 /* SF associated with this VSI, may be NULL */ 455 struct ice_dynamic_port *sf; 456 }; 457 u32 flags; /* VSI flags used for rebuild and configuration */ 458 enum ice_vsi_type type; /* the type of the VSI */ 459 ); 460 } ____cacheline_internodealigned_in_smp; 461 462 /* struct that defines an interrupt vector */ 463 struct ice_q_vector { 464 struct ice_vsi *vsi; 465 466 u16 v_idx; /* index in the vsi->q_vector array. */ 467 u16 reg_idx; /* PF relative register index */ 468 u8 num_ring_rx; /* total number of Rx rings in vector */ 469 u8 num_ring_tx; /* total number of Tx rings in vector */ 470 u8 wb_on_itr:1; /* if true, WB on ITR is enabled */ 471 /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this 472 * value to the device 473 */ 474 u8 intrl; 475 476 struct napi_struct napi; 477 478 struct ice_ring_container rx; 479 struct ice_ring_container tx; 480 481 cpumask_t affinity_mask; 482 struct irq_affinity_notify affinity_notify; 483 484 struct ice_channel *ch; 485 486 char name[ICE_INT_NAME_STR_LEN]; 487 488 u16 total_events; /* net_dim(): number of interrupts processed */ 489 u16 vf_reg_idx; /* VF relative register index */ 490 struct msi_map irq; 491 } ____cacheline_internodealigned_in_smp; 492 493 enum ice_pf_flags { 494 ICE_FLAG_FLTR_SYNC, 495 ICE_FLAG_RDMA_ENA, 496 ICE_FLAG_RSS_ENA, 497 ICE_FLAG_SRIOV_ENA, 498 ICE_FLAG_SRIOV_CAPABLE, 499 ICE_FLAG_DCB_CAPABLE, 500 ICE_FLAG_DCB_ENA, 501 ICE_FLAG_FD_ENA, 502 ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */ 503 ICE_FLAG_ADV_FEATURES, 504 ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */ 505 ICE_FLAG_CLS_FLOWER, 506 ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, 507 ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, 508 ICE_FLAG_NO_MEDIA, 509 ICE_FLAG_FW_LLDP_AGENT, 510 ICE_FLAG_MOD_POWER_UNSUPPORTED, 511 ICE_FLAG_PHY_FW_LOAD_FAILED, 512 ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ 513 ICE_FLAG_LEGACY_RX, 514 ICE_FLAG_VF_TRUE_PROMISC_ENA, 515 ICE_FLAG_MDD_AUTO_RESET_VF, 516 ICE_FLAG_VF_VLAN_PRUNING, 517 ICE_FLAG_LINK_LENIENT_MODE_ENA, 518 ICE_FLAG_PLUG_AUX_DEV, 519 ICE_FLAG_UNPLUG_AUX_DEV, 520 ICE_FLAG_MTU_CHANGED, 521 ICE_FLAG_GNSS, /* GNSS successfully initialized */ 522 ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */ 523 ICE_PF_FLAGS_NBITS /* must be last */ 524 }; 525 526 enum ice_misc_thread_tasks { 527 ICE_MISC_THREAD_TX_TSTAMP, 528 ICE_MISC_THREAD_NBITS /* must be last */ 529 }; 530 531 struct ice_eswitch { 532 struct ice_vsi *uplink_vsi; 533 struct ice_esw_br_offloads *br_offloads; 534 struct xarray reprs; 535 bool is_running; 536 }; 537 538 struct ice_agg_node { 539 u32 agg_id; 540 #define ICE_MAX_VSIS_IN_AGG_NODE 64 541 u32 num_vsis; 542 u8 valid; 543 }; 544 545 struct ice_pf_msix { 546 u32 cur; 547 u32 min; 548 u32 max; 549 }; 550 551 struct ice_pf { 552 struct pci_dev *pdev; 553 struct ice_adapter *adapter; 554 555 struct devlink_region *nvm_region; 556 struct devlink_region *sram_region; 557 struct devlink_region *devcaps_region; 558 559 /* devlink port data */ 560 struct devlink_port devlink_port; 561 562 /* OS reserved IRQ details */ 563 struct msix_entry *msix_entries; 564 struct ice_irq_tracker irq_tracker; 565 /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the 566 * number of MSIX vectors needed for all SR-IOV VFs from the number of 567 * MSIX vectors allowed on this PF. 568 */ 569 u16 sriov_base_vector; 570 unsigned long *sriov_irq_bm; /* bitmap to track irq usage */ 571 u16 sriov_irq_size; /* size of the irq_bm bitmap */ 572 573 u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ 574 575 struct ice_vsi **vsi; /* VSIs created by the driver */ 576 struct ice_vsi_stats **vsi_stats; 577 struct ice_sw *first_sw; /* first switch created by firmware */ 578 u16 eswitch_mode; /* current mode of eswitch */ 579 struct dentry *ice_debugfs_pf; 580 struct dentry *ice_debugfs_pf_fwlog; 581 /* keep track of all the dentrys for FW log modules */ 582 struct dentry **ice_debugfs_pf_fwlog_modules; 583 struct ice_vfs vfs; 584 DECLARE_BITMAP(features, ICE_F_MAX); 585 DECLARE_BITMAP(state, ICE_STATE_NBITS); 586 DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); 587 DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS); 588 unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ 589 unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ 590 unsigned long serv_tmr_period; 591 unsigned long serv_tmr_prev; 592 struct timer_list serv_tmr; 593 struct work_struct serv_task; 594 struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ 595 struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ 596 struct mutex tc_mutex; /* lock to protect TC changes */ 597 struct mutex adev_mutex; /* lock to protect aux device access */ 598 struct mutex lag_mutex; /* protect ice_lag struct in PF */ 599 u32 msg_enable; 600 struct ice_ptp ptp; 601 struct gnss_serial *gnss_serial; 602 struct gnss_device *gnss_dev; 603 u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ 604 u16 rdma_base_vector; 605 606 /* spinlock to protect the AdminQ wait list */ 607 spinlock_t aq_wait_lock; 608 struct hlist_head aq_wait_list; 609 wait_queue_head_t aq_wait_queue; 610 bool fw_emp_reset_disabled; 611 612 wait_queue_head_t reset_wait_queue; 613 614 u32 hw_csum_rx_error; 615 u32 hw_rx_eipe_error; 616 u32 oicr_err_reg; 617 struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */ 618 struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */ 619 u16 max_pf_txqs; /* Total Tx queues PF wide */ 620 u16 max_pf_rxqs; /* Total Rx queues PF wide */ 621 struct ice_pf_msix msix; 622 u16 num_lan_msix; /* Total MSIX vectors for base driver */ 623 u16 num_lan_tx; /* num LAN Tx queues setup */ 624 u16 num_lan_rx; /* num LAN Rx queues setup */ 625 u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ 626 u16 num_alloc_vsi; 627 u16 corer_count; /* Core reset count */ 628 u16 globr_count; /* Global reset count */ 629 u16 empr_count; /* EMP reset count */ 630 u16 pfr_count; /* PF reset count */ 631 632 u8 wol_ena : 1; /* software state of WoL */ 633 u32 wakeup_reason; /* last wakeup reason */ 634 struct ice_hw_port_stats stats; 635 struct ice_hw_port_stats stats_prev; 636 struct ice_hw hw; 637 u8 stat_prev_loaded:1; /* has previous stats been loaded */ 638 u8 rdma_mode; 639 u16 dcbx_cap; 640 u32 tx_timeout_count; 641 unsigned long tx_timeout_last_recovery; 642 u32 tx_timeout_recovery_level; 643 char int_name[ICE_INT_NAME_STR_LEN]; 644 char int_name_ll_ts[ICE_INT_NAME_STR_LEN]; 645 struct auxiliary_device *adev; 646 int aux_idx; 647 u32 sw_int_count; 648 /* count of tc_flower filters specific to channel (aka where filter 649 * action is "hw_tc <tc_num>") 650 */ 651 u16 num_dmac_chnl_fltrs; 652 struct hlist_head tc_flower_fltr_list; 653 654 u64 supported_rxdids; 655 656 __le64 nvm_phy_type_lo; /* NVM PHY type low */ 657 __le64 nvm_phy_type_hi; /* NVM PHY type high */ 658 struct ice_link_default_override_tlv link_dflt_override; 659 struct ice_lag *lag; /* Link Aggregation information */ 660 661 struct ice_eswitch eswitch; 662 struct ice_esw_br_port *br_port; 663 664 struct xarray dyn_ports; 665 struct xarray sf_nums; 666 667 #define ICE_INVALID_AGG_NODE_ID 0 668 #define ICE_PF_AGG_NODE_ID_START 1 669 #define ICE_MAX_PF_AGG_NODES 32 670 struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES]; 671 #define ICE_VF_AGG_NODE_ID_START 65 672 #define ICE_MAX_VF_AGG_NODES 32 673 struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; 674 struct ice_dplls dplls; 675 struct device *hwmon_dev; 676 struct ice_health health_reporters; 677 678 u8 num_quanta_prof_used; 679 }; 680 681 extern struct workqueue_struct *ice_lag_wq; 682 683 struct ice_netdev_priv { 684 struct ice_vsi *vsi; 685 struct ice_repr *repr; 686 /* indirect block callbacks on registered higher level devices 687 * (e.g. tunnel devices) 688 * 689 * tc_indr_block_cb_priv_list is used to look up indirect callback 690 * private data 691 */ 692 struct list_head tc_indr_block_priv_list; 693 }; 694 695 /** 696 * ice_vector_ch_enabled 697 * @qv: pointer to q_vector, can be NULL 698 * 699 * This function returns true if vector is channel enabled otherwise false 700 */ 701 static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv) 702 { 703 return !!qv->ch; /* Enable it to run with TC */ 704 } 705 706 /** 707 * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt 708 * @pf: Board private structure 709 * 710 * Return true if this PF should respond to the Tx timestamp interrupt 711 * indication in the miscellaneous OICR interrupt handler. 712 */ 713 static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf) 714 { 715 return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE; 716 } 717 718 /** 719 * ice_irq_dynamic_ena - Enable default interrupt generation settings 720 * @hw: pointer to HW struct 721 * @vsi: pointer to VSI struct, can be NULL 722 * @q_vector: pointer to q_vector, can be NULL 723 */ 724 static inline void 725 ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, 726 struct ice_q_vector *q_vector) 727 { 728 u32 vector = (vsi && q_vector) ? q_vector->reg_idx : 729 ((struct ice_pf *)hw->back)->oicr_irq.index; 730 int itr = ICE_ITR_NONE; 731 u32 val; 732 733 /* clear the PBA here, as this function is meant to clean out all 734 * previous interrupts and enable the interrupt 735 */ 736 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 737 (itr << GLINT_DYN_CTL_ITR_INDX_S); 738 if (vsi) 739 if (test_bit(ICE_VSI_DOWN, vsi->state)) 740 return; 741 wr32(hw, GLINT_DYN_CTL(vector), val); 742 } 743 744 /** 745 * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev 746 * @netdev: pointer to the netdev struct 747 */ 748 static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) 749 { 750 struct ice_netdev_priv *np = netdev_priv(netdev); 751 752 return np->vsi->back; 753 } 754 755 static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) 756 { 757 return !!READ_ONCE(vsi->xdp_prog); 758 } 759 760 static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) 761 { 762 ring->flags |= ICE_TX_FLAGS_RING_XDP; 763 } 764 765 /** 766 * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID 767 * @vsi: pointer to VSI 768 * @qid: index of a queue to look at XSK buff pool presence 769 * 770 * Return: A pointer to xsk_buff_pool structure if there is a buffer pool 771 * attached and configured as zero-copy, NULL otherwise. 772 */ 773 static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi, 774 u16 qid) 775 { 776 struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid); 777 778 if (!ice_is_xdp_ena_vsi(vsi)) 779 return NULL; 780 781 return (pool && pool->dev) ? pool : NULL; 782 } 783 784 /** 785 * ice_rx_xsk_pool - assign XSK buff pool to Rx ring 786 * @ring: Rx ring to use 787 * 788 * Sets XSK buff pool pointer on Rx ring. 789 */ 790 static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring) 791 { 792 struct ice_vsi *vsi = ring->vsi; 793 u16 qid = ring->q_index; 794 795 WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid)); 796 } 797 798 /** 799 * ice_tx_xsk_pool - assign XSK buff pool to XDP ring 800 * @vsi: pointer to VSI 801 * @qid: index of a queue to look at XSK buff pool presence 802 * 803 * Sets XSK buff pool pointer on XDP ring. 804 * 805 * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided 806 * queue id. Reason for doing so is that queue vectors might have assigned more 807 * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring 808 * carries a pointer to one of these XDP rings for its own purposes, such as 809 * handling XDP_TX action, therefore we can piggyback here on the 810 * rx_ring->xdp_ring assignment that was done during XDP rings initialization. 811 */ 812 static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) 813 { 814 struct ice_tx_ring *ring; 815 816 ring = vsi->rx_rings[qid]->xdp_ring; 817 if (!ring) 818 return; 819 820 WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid)); 821 } 822 823 /** 824 * ice_get_main_vsi - Get the PF VSI 825 * @pf: PF instance 826 * 827 * returns pf->vsi[0], which by definition is the PF VSI 828 */ 829 static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) 830 { 831 if (pf->vsi) 832 return pf->vsi[0]; 833 834 return NULL; 835 } 836 837 /** 838 * ice_get_netdev_priv_vsi - return VSI associated with netdev priv. 839 * @np: private netdev structure 840 */ 841 static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np) 842 { 843 /* In case of port representor return source port VSI. */ 844 if (np->repr) 845 return np->repr->src_vsi; 846 else 847 return np->vsi; 848 } 849 850 /** 851 * ice_get_ctrl_vsi - Get the control VSI 852 * @pf: PF instance 853 */ 854 static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) 855 { 856 /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */ 857 if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI) 858 return NULL; 859 860 return pf->vsi[pf->ctrl_vsi_idx]; 861 } 862 863 /** 864 * ice_find_vsi - Find the VSI from VSI ID 865 * @pf: The PF pointer to search in 866 * @vsi_num: The VSI ID to search for 867 */ 868 static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) 869 { 870 int i; 871 872 ice_for_each_vsi(pf, i) 873 if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) 874 return pf->vsi[i]; 875 return NULL; 876 } 877 878 /** 879 * ice_is_switchdev_running - check if switchdev is configured 880 * @pf: pointer to PF structure 881 * 882 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV 883 * and switchdev is configured, false otherwise. 884 */ 885 static inline bool ice_is_switchdev_running(struct ice_pf *pf) 886 { 887 return pf->eswitch.is_running; 888 } 889 890 #define ICE_FD_STAT_CTR_BLOCK_COUNT 256 891 #define ICE_FD_STAT_PF_IDX(base_idx) \ 892 ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT) 893 #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx) 894 #define ICE_FD_STAT_CH 1 895 #define ICE_FD_CH_STAT_IDX(base_idx) \ 896 (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH) 897 898 /** 899 * ice_is_adq_active - any active ADQs 900 * @pf: pointer to PF 901 * 902 * This function returns true if there are any ADQs configured (which is 903 * determined by looking at VSI type (which should be VSI_PF), numtc, and 904 * TC_MQPRIO flag) otherwise return false 905 */ 906 static inline bool ice_is_adq_active(struct ice_pf *pf) 907 { 908 struct ice_vsi *vsi; 909 910 vsi = ice_get_main_vsi(pf); 911 if (!vsi) 912 return false; 913 914 /* is ADQ configured */ 915 if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC && 916 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 917 return true; 918 919 return false; 920 } 921 922 void ice_debugfs_fwlog_init(struct ice_pf *pf); 923 void ice_debugfs_pf_deinit(struct ice_pf *pf); 924 void ice_debugfs_init(void); 925 void ice_debugfs_exit(void); 926 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module); 927 928 bool netif_is_ice(const struct net_device *dev); 929 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); 930 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); 931 int ice_vsi_open_ctrl(struct ice_vsi *vsi); 932 int ice_vsi_open(struct ice_vsi *vsi); 933 void ice_set_ethtool_ops(struct net_device *netdev); 934 void ice_set_ethtool_repr_ops(struct net_device *netdev); 935 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); 936 void ice_set_ethtool_sf_ops(struct net_device *netdev); 937 u16 ice_get_avail_txq_count(struct ice_pf *pf); 938 u16 ice_get_avail_rxq_count(struct ice_pf *pf); 939 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked); 940 void ice_update_vsi_stats(struct ice_vsi *vsi); 941 void ice_update_pf_stats(struct ice_pf *pf); 942 void 943 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, 944 struct ice_q_stats stats, u64 *pkts, u64 *bytes); 945 int ice_up(struct ice_vsi *vsi); 946 int ice_down(struct ice_vsi *vsi); 947 int ice_down_up(struct ice_vsi *vsi); 948 int ice_vsi_cfg_lan(struct ice_vsi *vsi); 949 struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); 950 951 enum ice_xdp_cfg { 952 ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */ 953 ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */ 954 }; 955 956 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); 957 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, 958 enum ice_xdp_cfg cfg_type); 959 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type); 960 void ice_map_xdp_rings(struct ice_vsi *vsi); 961 int 962 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 963 u32 flags); 964 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); 965 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); 966 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed); 967 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); 968 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc); 969 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); 970 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); 971 void ice_print_link_msg(struct ice_vsi *vsi, bool isup); 972 int ice_plug_aux_dev(struct ice_pf *pf); 973 void ice_unplug_aux_dev(struct ice_pf *pf); 974 int ice_init_rdma(struct ice_pf *pf); 975 void ice_deinit_rdma(struct ice_pf *pf); 976 const char *ice_aq_str(enum ice_aq_err aq_err); 977 bool ice_is_wol_supported(struct ice_hw *hw); 978 void ice_fdir_del_all_fltrs(struct ice_vsi *vsi); 979 int 980 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, 981 bool is_tun); 982 void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena); 983 int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); 984 int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); 985 int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd); 986 int 987 ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, 988 u32 *rule_locs); 989 void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx); 990 void ice_fdir_release_flows(struct ice_hw *hw); 991 void ice_fdir_replay_flows(struct ice_hw *hw); 992 void ice_fdir_replay_fltrs(struct ice_pf *pf); 993 int ice_fdir_create_dflt_rules(struct ice_pf *pf); 994 995 enum ice_aq_task_state { 996 ICE_AQ_TASK_NOT_PREPARED, 997 ICE_AQ_TASK_WAITING, 998 ICE_AQ_TASK_COMPLETE, 999 ICE_AQ_TASK_CANCELED, 1000 }; 1001 1002 struct ice_aq_task { 1003 struct hlist_node entry; 1004 struct ice_rq_event_info event; 1005 enum ice_aq_task_state state; 1006 u16 opcode; 1007 }; 1008 1009 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, 1010 u16 opcode); 1011 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, 1012 unsigned long timeout); 1013 int ice_open(struct net_device *netdev); 1014 int ice_open_internal(struct net_device *netdev); 1015 int ice_stop(struct net_device *netdev); 1016 void ice_service_task_schedule(struct ice_pf *pf); 1017 int ice_load(struct ice_pf *pf); 1018 void ice_unload(struct ice_pf *pf); 1019 void ice_adv_lnk_speed_maps_init(void); 1020 int ice_init_dev(struct ice_pf *pf); 1021 void ice_deinit_dev(struct ice_pf *pf); 1022 int ice_change_mtu(struct net_device *netdev, int new_mtu); 1023 void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue); 1024 int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp); 1025 void ice_set_netdev_features(struct net_device *netdev); 1026 int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); 1027 int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); 1028 void ice_get_stats64(struct net_device *netdev, 1029 struct rtnl_link_stats64 *stats); 1030 1031 /** 1032 * ice_set_rdma_cap - enable RDMA support 1033 * @pf: PF struct 1034 */ 1035 static inline void ice_set_rdma_cap(struct ice_pf *pf) 1036 { 1037 if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { 1038 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 1039 set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); 1040 } 1041 } 1042 1043 /** 1044 * ice_clear_rdma_cap - disable RDMA support 1045 * @pf: PF struct 1046 */ 1047 static inline void ice_clear_rdma_cap(struct ice_pf *pf) 1048 { 1049 /* defer unplug to service task to avoid RTNL lock and 1050 * clear PLUG bit so that pending plugs don't interfere 1051 */ 1052 clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); 1053 set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags); 1054 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 1055 } 1056 1057 static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw) 1058 { 1059 return hw->ptp.phy_model; 1060 } 1061 1062 extern const struct xdp_metadata_ops ice_xdp_md_ops; 1063 #endif /* _ICE_H_ */ 1064