1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #ifndef _ICE_H_ 5 #define _ICE_H_ 6 7 #include <linux/types.h> 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/firmware.h> 12 #include <linux/netdevice.h> 13 #include <linux/compiler.h> 14 #include <linux/etherdevice.h> 15 #include <linux/skbuff.h> 16 #include <linux/cpumask.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/if_vlan.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/pci.h> 21 #include <linux/workqueue.h> 22 #include <linux/wait.h> 23 #include <linux/interrupt.h> 24 #include <linux/ethtool.h> 25 #include <linux/timer.h> 26 #include <linux/delay.h> 27 #include <linux/bitmap.h> 28 #include <linux/log2.h> 29 #include <linux/ip.h> 30 #include <linux/sctp.h> 31 #include <linux/ipv6.h> 32 #include <linux/pkt_sched.h> 33 #include <linux/if_bridge.h> 34 #include <linux/ctype.h> 35 #include <linux/linkmode.h> 36 #include <linux/bpf.h> 37 #include <linux/btf.h> 38 #include <linux/auxiliary_bus.h> 39 #include <linux/avf/virtchnl.h> 40 #include <linux/cpu_rmap.h> 41 #include <linux/dim.h> 42 #include <linux/gnss.h> 43 #include <net/pkt_cls.h> 44 #include <net/pkt_sched.h> 45 #include <net/tc_act/tc_mirred.h> 46 #include <net/tc_act/tc_gact.h> 47 #include <net/ip.h> 48 #include <net/devlink.h> 49 #include <net/ipv6.h> 50 #include <net/xdp_sock.h> 51 #include <net/xdp_sock_drv.h> 52 #include <net/geneve.h> 53 #include <net/gre.h> 54 #include <net/udp_tunnel.h> 55 #include <net/vxlan.h> 56 #include <net/gtp.h> 57 #include <linux/ppp_defs.h> 58 #include "ice_devids.h" 59 #include "ice_type.h" 60 #include "ice_txrx.h" 61 #include "ice_dcb.h" 62 #include "ice_switch.h" 63 #include "ice_common.h" 64 #include "ice_flow.h" 65 #include "ice_sched.h" 66 #include "ice_idc_int.h" 67 #include "ice_sriov.h" 68 #include "ice_vf_mbx.h" 69 #include "ice_ptp.h" 70 #include "ice_fdir.h" 71 #include "ice_xsk.h" 72 #include "ice_arfs.h" 73 #include "ice_repr.h" 74 #include "ice_eswitch.h" 75 #include "ice_lag.h" 76 #include "ice_vsi_vlan_ops.h" 77 #include "ice_gnss.h" 78 #include "ice_irq.h" 79 #include "ice_dpll.h" 80 #include "ice_adapter.h" 81 #include "devlink/health.h" 82 83 #define ICE_BAR0 0 84 #define ICE_REQ_DESC_MULTIPLE 32 85 #define ICE_MIN_NUM_DESC 64 86 #define ICE_MAX_NUM_DESC 8160 87 #define ICE_DFLT_MIN_RX_DESC 512 88 #define ICE_DFLT_NUM_TX_DESC 256 89 #define ICE_DFLT_NUM_RX_DESC 2048 90 91 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) 92 #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) 93 #define ICE_AQ_LEN 192 94 #define ICE_MBXSQ_LEN 64 95 #define ICE_SBQ_LEN 64 96 #define ICE_MIN_LAN_TXRX_MSIX 1 97 #define ICE_MIN_LAN_OICR_MSIX 1 98 #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) 99 #define ICE_FDIR_MSIX 2 100 #define ICE_NO_VSI 0xffff 101 #define ICE_VSI_MAP_CONTIG 0 102 #define ICE_VSI_MAP_SCATTER 1 103 #define ICE_MAX_SCATTER_TXQS 16 104 #define ICE_MAX_SCATTER_RXQS 16 105 #define ICE_Q_WAIT_RETRY_LIMIT 10 106 #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) 107 #define ICE_MAX_LG_RSS_QS 256 108 #define ICE_INVAL_Q_INDEX 0xffff 109 110 #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ 111 112 #define ICE_CHNL_START_TC 1 113 114 #define ICE_MAX_RESET_WAIT 20 115 116 #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) 117 118 #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 119 120 #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD) 121 122 #define ICE_MAX_TSO_SIZE 131072 123 124 #define ICE_UP_TABLE_TRANSLATE(val, i) \ 125 (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ 126 ICE_AQ_VSI_UP_TABLE_UP##i##_M) 127 128 #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i])) 129 #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i])) 130 #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i])) 131 #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i])) 132 133 /* Minimum BW limit is 500 Kbps for any scheduler node */ 134 #define ICE_MIN_BW_LIMIT 500 135 /* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes. 136 * use it to convert user specified BW limit into Kbps 137 */ 138 #define ICE_BW_KBPS_DIVISOR 125 139 140 /* Default recipes have priority 4 and below, hence priority values between 5..7 141 * can be used as filter priority for advanced switch filter (advanced switch 142 * filters need new recipe to be created for specified extraction sequence 143 * because default recipe extraction sequence does not represent custom 144 * extraction) 145 */ 146 #define ICE_SWITCH_FLTR_PRIO_QUEUE 7 147 /* prio 6 is reserved for future use (e.g. switch filter with L3 fields + 148 * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as 149 * SYN/FIN/RST)) 150 */ 151 #define ICE_SWITCH_FLTR_PRIO_RSVD 6 152 #define ICE_SWITCH_FLTR_PRIO_VSI 5 153 #define ICE_SWITCH_FLTR_PRIO_QGRP ICE_SWITCH_FLTR_PRIO_VSI 154 155 /* Macro for each VSI in a PF */ 156 #define ice_for_each_vsi(pf, i) \ 157 for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) 158 159 /* Macros for each Tx/Xdp/Rx ring in a VSI */ 160 #define ice_for_each_txq(vsi, i) \ 161 for ((i) = 0; (i) < (vsi)->num_txq; (i)++) 162 163 #define ice_for_each_xdp_txq(vsi, i) \ 164 for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++) 165 166 #define ice_for_each_rxq(vsi, i) \ 167 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) 168 169 /* Macros for each allocated Tx/Rx ring whether used or not in a VSI */ 170 #define ice_for_each_alloc_txq(vsi, i) \ 171 for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) 172 173 #define ice_for_each_alloc_rxq(vsi, i) \ 174 for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) 175 176 #define ice_for_each_q_vector(vsi, i) \ 177 for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++) 178 179 #define ice_for_each_chnl_tc(i) \ 180 for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++) 181 182 #define ICE_UCAST_PROMISC_BITS ICE_PROMISC_UCAST_RX 183 184 #define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_RX | \ 185 ICE_PROMISC_VLAN_RX) 186 187 #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX) 188 189 #define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \ 190 ICE_PROMISC_MCAST_RX | \ 191 ICE_PROMISC_VLAN_TX | \ 192 ICE_PROMISC_VLAN_RX) 193 194 #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) 195 196 #define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned) 197 198 enum ice_feature { 199 ICE_F_DSCP, 200 ICE_F_PHY_RCLK, 201 ICE_F_SMA_CTRL, 202 ICE_F_CGU, 203 ICE_F_GNSS, 204 ICE_F_GCS, 205 ICE_F_ROCE_LAG, 206 ICE_F_SRIOV_LAG, 207 ICE_F_MBX_LIMIT, 208 ICE_F_MAX 209 }; 210 211 DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key); 212 213 struct ice_channel { 214 struct list_head list; 215 u8 type; 216 u16 sw_id; 217 u16 base_q; 218 u16 num_rxq; 219 u16 num_txq; 220 u16 vsi_num; 221 u8 ena_tc; 222 struct ice_aqc_vsi_props info; 223 u64 max_tx_rate; 224 u64 min_tx_rate; 225 atomic_t num_sb_fltr; 226 struct ice_vsi *ch_vsi; 227 }; 228 229 struct ice_txq_meta { 230 u32 q_teid; /* Tx-scheduler element identifier */ 231 u16 q_id; /* Entry in VSI's txq_map bitmap */ 232 u16 q_handle; /* Relative index of Tx queue within TC */ 233 u16 vsi_idx; /* VSI index that Tx queue belongs to */ 234 u8 tc; /* TC number that Tx queue belongs to */ 235 }; 236 237 struct ice_tc_info { 238 u16 qoffset; 239 u16 qcount_tx; 240 u16 qcount_rx; 241 u8 netdev_tc; 242 }; 243 244 struct ice_tc_cfg { 245 u8 numtc; /* Total number of enabled TCs */ 246 u16 ena_tc; /* Tx map */ 247 struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS]; 248 }; 249 250 struct ice_qs_cfg { 251 struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */ 252 unsigned long *pf_map; 253 unsigned long pf_map_size; 254 unsigned int q_count; 255 unsigned int scatter_count; 256 u16 *vsi_map; 257 u16 vsi_map_offset; 258 u8 mapping_mode; 259 }; 260 261 struct ice_sw { 262 struct ice_pf *pf; 263 u16 sw_id; /* switch ID for this switch */ 264 u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */ 265 }; 266 267 enum ice_pf_state { 268 ICE_TESTING, 269 ICE_DOWN, 270 ICE_NEEDS_RESTART, 271 ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ 272 ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ 273 ICE_PFR_REQ, /* set by driver */ 274 ICE_CORER_REQ, /* set by driver */ 275 ICE_GLOBR_REQ, /* set by driver */ 276 ICE_CORER_RECV, /* set by OICR handler */ 277 ICE_GLOBR_RECV, /* set by OICR handler */ 278 ICE_EMPR_RECV, /* set by OICR handler */ 279 ICE_SUSPENDED, /* set on module remove path */ 280 ICE_RESET_FAILED, /* set by reset/rebuild */ 281 /* When checking for the PF to be in a nominal operating state, the 282 * bits that are grouped at the beginning of the list need to be 283 * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will 284 * be checked. If you need to add a bit into consideration for nominal 285 * operating state, it must be added before 286 * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position 287 * without appropriate consideration. 288 */ 289 ICE_STATE_NOMINAL_CHECK_BITS, 290 ICE_ADMINQ_EVENT_PENDING, 291 ICE_MAILBOXQ_EVENT_PENDING, 292 ICE_SIDEBANDQ_EVENT_PENDING, 293 ICE_MDD_EVENT_PENDING, 294 ICE_VFLR_EVENT_PENDING, 295 ICE_FLTR_OVERFLOW_PROMISC, 296 ICE_VF_DIS, 297 ICE_CFG_BUSY, 298 ICE_SERVICE_SCHED, 299 ICE_SERVICE_DIS, 300 ICE_FD_FLUSH_REQ, 301 ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ 302 ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ 303 ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ 304 ICE_LINK_DEFAULT_OVERRIDE_PENDING, 305 ICE_PHY_INIT_COMPLETE, 306 ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ 307 ICE_AUX_ERR_PENDING, 308 ICE_STATE_NBITS /* must be last */ 309 }; 310 311 enum ice_vsi_state { 312 ICE_VSI_DOWN, 313 ICE_VSI_NEEDS_RESTART, 314 ICE_VSI_NETDEV_ALLOCD, 315 ICE_VSI_NETDEV_REGISTERED, 316 ICE_VSI_UMAC_FLTR_CHANGED, 317 ICE_VSI_MMAC_FLTR_CHANGED, 318 ICE_VSI_PROMISC_CHANGED, 319 ICE_VSI_REBUILD_PENDING, 320 ICE_VSI_STATE_NBITS /* must be last */ 321 }; 322 323 struct ice_vsi_stats { 324 struct ice_ring_stats **tx_ring_stats; /* Tx ring stats array */ 325 struct ice_ring_stats **rx_ring_stats; /* Rx ring stats array */ 326 }; 327 328 /* struct that defines a VSI, associated with a dev */ 329 struct ice_vsi { 330 struct net_device *netdev; 331 struct ice_sw *vsw; /* switch this VSI is on */ 332 struct ice_pf *back; /* back pointer to PF */ 333 struct ice_rx_ring **rx_rings; /* Rx ring array */ 334 struct ice_tx_ring **tx_rings; /* Tx ring array */ 335 struct ice_q_vector **q_vectors; /* q_vector array */ 336 337 irqreturn_t (*irq_handler)(int irq, void *data); 338 339 u64 tx_linearize; 340 DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS); 341 unsigned int current_netdev_flags; 342 u32 tx_restart; 343 u32 tx_busy; 344 u32 rx_buf_failed; 345 u32 rx_page_failed; 346 u16 num_q_vectors; 347 /* tell if only dynamic irq allocation is allowed */ 348 bool irq_dyn_alloc; 349 350 u16 vsi_num; /* HW (absolute) index of this VSI */ 351 u16 idx; /* software index in pf->vsi[] */ 352 353 u16 num_gfltr; 354 u16 num_bfltr; 355 356 /* RSS config */ 357 u16 rss_table_size; /* HW RSS table size */ 358 u16 rss_size; /* Allocated RSS queues */ 359 u8 rss_hfunc; /* User configured hash type */ 360 u8 *rss_hkey_user; /* User configured hash keys */ 361 u8 *rss_lut_user; /* User configured lookup table entries */ 362 u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */ 363 364 /* aRFS members only allocated for the PF VSI */ 365 #define ICE_MAX_ARFS_LIST 1024 366 #define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1) 367 struct hlist_head *arfs_fltr_list; 368 struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs; 369 spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ 370 atomic_t *arfs_last_fltr_id; 371 372 struct ice_aqc_vsi_props info; /* VSI properties */ 373 struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ 374 375 /* VSI stats */ 376 struct rtnl_link_stats64 net_stats; 377 struct rtnl_link_stats64 net_stats_prev; 378 struct ice_eth_stats eth_stats; 379 struct ice_eth_stats eth_stats_prev; 380 381 struct list_head tmp_sync_list; /* MAC filters to be synced */ 382 struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ 383 384 u8 irqs_ready:1; 385 u8 current_isup:1; /* Sync 'link up' logging */ 386 u8 stat_offsets_loaded:1; 387 struct ice_vsi_vlan_ops inner_vlan_ops; 388 struct ice_vsi_vlan_ops outer_vlan_ops; 389 u16 num_vlan; 390 391 /* queue information */ 392 u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 393 u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 394 u16 *txq_map; /* index in pf->avail_txqs */ 395 u16 *rxq_map; /* index in pf->avail_rxqs */ 396 u16 alloc_txq; /* Allocated Tx queues */ 397 u16 num_txq; /* Used Tx queues */ 398 u16 alloc_rxq; /* Allocated Rx queues */ 399 u16 num_rxq; /* Used Rx queues */ 400 u16 req_txq; /* User requested Tx queues */ 401 u16 req_rxq; /* User requested Rx queues */ 402 u16 num_rx_desc; 403 u16 num_tx_desc; 404 u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; 405 struct ice_tc_cfg tc_cfg; 406 struct bpf_prog *xdp_prog; 407 struct ice_tx_ring **xdp_rings; /* XDP ring array */ 408 u16 num_xdp_txq; /* Used XDP queues */ 409 u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 410 struct mutex xdp_state_lock; 411 412 struct net_device **target_netdevs; 413 414 struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */ 415 416 /* Channel Specific Fields */ 417 struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC]; 418 u16 cnt_q_avail; 419 u16 next_base_q; /* next queue to be used for channel setup */ 420 struct list_head ch_list; 421 u16 num_chnl_rxq; 422 u16 num_chnl_txq; 423 u16 ch_rss_size; 424 u16 num_chnl_fltr; 425 /* store away rss size info before configuring ADQ channels so that, 426 * it can be used after tc-qdisc delete, to get back RSS setting as 427 * they were before 428 */ 429 u16 orig_rss_size; 430 /* this keeps tracks of all enabled TC with and without DCB 431 * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue 432 * information 433 */ 434 u8 all_numtc; 435 u16 all_enatc; 436 437 /* store away TC info, to be used for rebuild logic */ 438 u8 old_numtc; 439 u16 old_ena_tc; 440 441 /* setup back reference, to which aggregator node this VSI 442 * corresponds to 443 */ 444 struct ice_agg_node *agg_node; 445 446 struct_group_tagged(ice_vsi_cfg_params, params, 447 struct ice_port_info *port_info; /* back pointer to port_info */ 448 struct ice_channel *ch; /* VSI's channel structure, may be NULL */ 449 union { 450 /* VF associated with this VSI, may be NULL */ 451 struct ice_vf *vf; 452 /* SF associated with this VSI, may be NULL */ 453 struct ice_dynamic_port *sf; 454 }; 455 u32 flags; /* VSI flags used for rebuild and configuration */ 456 enum ice_vsi_type type; /* the type of the VSI */ 457 ); 458 } ____cacheline_internodealigned_in_smp; 459 460 /* struct that defines an interrupt vector */ 461 struct ice_q_vector { 462 struct ice_vsi *vsi; 463 464 u16 v_idx; /* index in the vsi->q_vector array. */ 465 u16 reg_idx; /* PF relative register index */ 466 u8 num_ring_rx; /* total number of Rx rings in vector */ 467 u8 num_ring_tx; /* total number of Tx rings in vector */ 468 u8 wb_on_itr:1; /* if true, WB on ITR is enabled */ 469 /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this 470 * value to the device 471 */ 472 u8 intrl; 473 474 struct napi_struct napi; 475 476 struct ice_ring_container rx; 477 struct ice_ring_container tx; 478 479 struct ice_channel *ch; 480 481 char name[ICE_INT_NAME_STR_LEN]; 482 483 u16 total_events; /* net_dim(): number of interrupts processed */ 484 u16 vf_reg_idx; /* VF relative register index */ 485 struct msi_map irq; 486 } ____cacheline_internodealigned_in_smp; 487 488 enum ice_pf_flags { 489 ICE_FLAG_FLTR_SYNC, 490 ICE_FLAG_RDMA_ENA, 491 ICE_FLAG_RSS_ENA, 492 ICE_FLAG_SRIOV_ENA, 493 ICE_FLAG_SRIOV_CAPABLE, 494 ICE_FLAG_DCB_CAPABLE, 495 ICE_FLAG_DCB_ENA, 496 ICE_FLAG_FD_ENA, 497 ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */ 498 ICE_FLAG_ADV_FEATURES, 499 ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */ 500 ICE_FLAG_CLS_FLOWER, 501 ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, 502 ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, 503 ICE_FLAG_NO_MEDIA, 504 ICE_FLAG_FW_LLDP_AGENT, 505 ICE_FLAG_MOD_POWER_UNSUPPORTED, 506 ICE_FLAG_PHY_FW_LOAD_FAILED, 507 ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ 508 ICE_FLAG_LEGACY_RX, 509 ICE_FLAG_VF_TRUE_PROMISC_ENA, 510 ICE_FLAG_MDD_AUTO_RESET_VF, 511 ICE_FLAG_VF_VLAN_PRUNING, 512 ICE_FLAG_LINK_LENIENT_MODE_ENA, 513 ICE_FLAG_PLUG_AUX_DEV, 514 ICE_FLAG_UNPLUG_AUX_DEV, 515 ICE_FLAG_MTU_CHANGED, 516 ICE_FLAG_GNSS, /* GNSS successfully initialized */ 517 ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */ 518 ICE_PF_FLAGS_NBITS /* must be last */ 519 }; 520 521 enum ice_misc_thread_tasks { 522 ICE_MISC_THREAD_TX_TSTAMP, 523 ICE_MISC_THREAD_NBITS /* must be last */ 524 }; 525 526 struct ice_eswitch { 527 struct ice_vsi *uplink_vsi; 528 struct ice_esw_br_offloads *br_offloads; 529 struct xarray reprs; 530 bool is_running; 531 }; 532 533 struct ice_agg_node { 534 u32 agg_id; 535 #define ICE_MAX_VSIS_IN_AGG_NODE 64 536 u32 num_vsis; 537 u8 valid; 538 }; 539 540 struct ice_pf_msix { 541 u32 cur; 542 u32 min; 543 u32 max; 544 u32 total; 545 u32 rest; 546 }; 547 548 struct ice_pf { 549 struct pci_dev *pdev; 550 struct ice_adapter *adapter; 551 552 struct devlink_region *nvm_region; 553 struct devlink_region *sram_region; 554 struct devlink_region *devcaps_region; 555 556 /* devlink port data */ 557 struct devlink_port devlink_port; 558 559 /* OS reserved IRQ details */ 560 struct msix_entry *msix_entries; 561 struct ice_irq_tracker irq_tracker; 562 struct ice_virt_irq_tracker virt_irq_tracker; 563 564 u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ 565 566 struct ice_vsi **vsi; /* VSIs created by the driver */ 567 struct ice_vsi_stats **vsi_stats; 568 struct ice_sw *first_sw; /* first switch created by firmware */ 569 u16 eswitch_mode; /* current mode of eswitch */ 570 struct dentry *ice_debugfs_pf; 571 struct dentry *ice_debugfs_pf_fwlog; 572 /* keep track of all the dentrys for FW log modules */ 573 struct dentry **ice_debugfs_pf_fwlog_modules; 574 struct ice_vfs vfs; 575 DECLARE_BITMAP(features, ICE_F_MAX); 576 DECLARE_BITMAP(state, ICE_STATE_NBITS); 577 DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); 578 DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS); 579 unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ 580 unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ 581 unsigned long serv_tmr_period; 582 unsigned long serv_tmr_prev; 583 struct timer_list serv_tmr; 584 struct work_struct serv_task; 585 struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ 586 struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ 587 struct mutex tc_mutex; /* lock to protect TC changes */ 588 struct mutex adev_mutex; /* lock to protect aux device access */ 589 struct mutex lag_mutex; /* protect ice_lag struct in PF */ 590 u32 msg_enable; 591 struct ice_ptp ptp; 592 struct gnss_serial *gnss_serial; 593 struct gnss_device *gnss_dev; 594 u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ 595 u16 rdma_base_vector; 596 597 /* spinlock to protect the AdminQ wait list */ 598 spinlock_t aq_wait_lock; 599 struct hlist_head aq_wait_list; 600 wait_queue_head_t aq_wait_queue; 601 bool fw_emp_reset_disabled; 602 603 wait_queue_head_t reset_wait_queue; 604 605 u32 hw_csum_rx_error; 606 u32 hw_rx_eipe_error; 607 u32 oicr_err_reg; 608 struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */ 609 struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */ 610 u16 max_pf_txqs; /* Total Tx queues PF wide */ 611 u16 max_pf_rxqs; /* Total Rx queues PF wide */ 612 struct ice_pf_msix msix; 613 u16 num_lan_tx; /* num LAN Tx queues setup */ 614 u16 num_lan_rx; /* num LAN Rx queues setup */ 615 u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ 616 u16 num_alloc_vsi; 617 u16 corer_count; /* Core reset count */ 618 u16 globr_count; /* Global reset count */ 619 u16 empr_count; /* EMP reset count */ 620 u16 pfr_count; /* PF reset count */ 621 622 u8 wol_ena : 1; /* software state of WoL */ 623 u32 wakeup_reason; /* last wakeup reason */ 624 struct ice_hw_port_stats stats; 625 struct ice_hw_port_stats stats_prev; 626 struct ice_hw hw; 627 u8 stat_prev_loaded:1; /* has previous stats been loaded */ 628 u8 rdma_mode; 629 u16 dcbx_cap; 630 u32 tx_timeout_count; 631 unsigned long tx_timeout_last_recovery; 632 u32 tx_timeout_recovery_level; 633 char int_name[ICE_INT_NAME_STR_LEN]; 634 char int_name_ll_ts[ICE_INT_NAME_STR_LEN]; 635 struct auxiliary_device *adev; 636 int aux_idx; 637 u32 sw_int_count; 638 /* count of tc_flower filters specific to channel (aka where filter 639 * action is "hw_tc <tc_num>") 640 */ 641 u16 num_dmac_chnl_fltrs; 642 struct hlist_head tc_flower_fltr_list; 643 644 u64 supported_rxdids; 645 646 __le64 nvm_phy_type_lo; /* NVM PHY type low */ 647 __le64 nvm_phy_type_hi; /* NVM PHY type high */ 648 struct ice_link_default_override_tlv link_dflt_override; 649 struct ice_lag *lag; /* Link Aggregation information */ 650 651 struct ice_eswitch eswitch; 652 struct ice_esw_br_port *br_port; 653 654 struct xarray dyn_ports; 655 struct xarray sf_nums; 656 657 #define ICE_INVALID_AGG_NODE_ID 0 658 #define ICE_PF_AGG_NODE_ID_START 1 659 #define ICE_MAX_PF_AGG_NODES 32 660 struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES]; 661 #define ICE_VF_AGG_NODE_ID_START 65 662 #define ICE_MAX_VF_AGG_NODES 32 663 struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; 664 struct ice_dplls dplls; 665 struct device *hwmon_dev; 666 struct ice_health health_reporters; 667 668 u8 num_quanta_prof_used; 669 }; 670 671 extern struct workqueue_struct *ice_lag_wq; 672 673 struct ice_netdev_priv { 674 struct ice_vsi *vsi; 675 struct ice_repr *repr; 676 /* indirect block callbacks on registered higher level devices 677 * (e.g. tunnel devices) 678 * 679 * tc_indr_block_cb_priv_list is used to look up indirect callback 680 * private data 681 */ 682 struct list_head tc_indr_block_priv_list; 683 }; 684 685 /** 686 * ice_vector_ch_enabled 687 * @qv: pointer to q_vector, can be NULL 688 * 689 * This function returns true if vector is channel enabled otherwise false 690 */ 691 static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv) 692 { 693 return !!qv->ch; /* Enable it to run with TC */ 694 } 695 696 /** 697 * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt 698 * @pf: Board private structure 699 * 700 * Return true if this PF should respond to the Tx timestamp interrupt 701 * indication in the miscellaneous OICR interrupt handler. 702 */ 703 static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf) 704 { 705 return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE; 706 } 707 708 /** 709 * ice_irq_dynamic_ena - Enable default interrupt generation settings 710 * @hw: pointer to HW struct 711 * @vsi: pointer to VSI struct, can be NULL 712 * @q_vector: pointer to q_vector, can be NULL 713 */ 714 static inline void 715 ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, 716 struct ice_q_vector *q_vector) 717 { 718 u32 vector = (vsi && q_vector) ? q_vector->reg_idx : 719 ((struct ice_pf *)hw->back)->oicr_irq.index; 720 int itr = ICE_ITR_NONE; 721 u32 val; 722 723 /* clear the PBA here, as this function is meant to clean out all 724 * previous interrupts and enable the interrupt 725 */ 726 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 727 (itr << GLINT_DYN_CTL_ITR_INDX_S); 728 if (vsi) 729 if (test_bit(ICE_VSI_DOWN, vsi->state)) 730 return; 731 wr32(hw, GLINT_DYN_CTL(vector), val); 732 } 733 734 /** 735 * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev 736 * @netdev: pointer to the netdev struct 737 */ 738 static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) 739 { 740 struct ice_netdev_priv *np = netdev_priv(netdev); 741 742 return np->vsi->back; 743 } 744 745 static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) 746 { 747 return !!READ_ONCE(vsi->xdp_prog); 748 } 749 750 static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) 751 { 752 ring->flags |= ICE_TX_FLAGS_RING_XDP; 753 } 754 755 /** 756 * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID 757 * @vsi: pointer to VSI 758 * @qid: index of a queue to look at XSK buff pool presence 759 * 760 * Return: A pointer to xsk_buff_pool structure if there is a buffer pool 761 * attached and configured as zero-copy, NULL otherwise. 762 */ 763 static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi, 764 u16 qid) 765 { 766 struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid); 767 768 if (!ice_is_xdp_ena_vsi(vsi)) 769 return NULL; 770 771 return (pool && pool->dev) ? pool : NULL; 772 } 773 774 /** 775 * ice_rx_xsk_pool - assign XSK buff pool to Rx ring 776 * @ring: Rx ring to use 777 * 778 * Sets XSK buff pool pointer on Rx ring. 779 */ 780 static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring) 781 { 782 struct ice_vsi *vsi = ring->vsi; 783 u16 qid = ring->q_index; 784 785 WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid)); 786 } 787 788 /** 789 * ice_tx_xsk_pool - assign XSK buff pool to XDP ring 790 * @vsi: pointer to VSI 791 * @qid: index of a queue to look at XSK buff pool presence 792 * 793 * Sets XSK buff pool pointer on XDP ring. 794 * 795 * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided 796 * queue id. Reason for doing so is that queue vectors might have assigned more 797 * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring 798 * carries a pointer to one of these XDP rings for its own purposes, such as 799 * handling XDP_TX action, therefore we can piggyback here on the 800 * rx_ring->xdp_ring assignment that was done during XDP rings initialization. 801 */ 802 static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) 803 { 804 struct ice_tx_ring *ring; 805 806 ring = vsi->rx_rings[qid]->xdp_ring; 807 if (!ring) 808 return; 809 810 WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid)); 811 } 812 813 /** 814 * ice_get_main_vsi - Get the PF VSI 815 * @pf: PF instance 816 * 817 * returns pf->vsi[0], which by definition is the PF VSI 818 */ 819 static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) 820 { 821 if (pf->vsi) 822 return pf->vsi[0]; 823 824 return NULL; 825 } 826 827 /** 828 * ice_get_netdev_priv_vsi - return VSI associated with netdev priv. 829 * @np: private netdev structure 830 */ 831 static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np) 832 { 833 /* In case of port representor return source port VSI. */ 834 if (np->repr) 835 return np->repr->src_vsi; 836 else 837 return np->vsi; 838 } 839 840 /** 841 * ice_get_ctrl_vsi - Get the control VSI 842 * @pf: PF instance 843 */ 844 static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) 845 { 846 /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */ 847 if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI) 848 return NULL; 849 850 return pf->vsi[pf->ctrl_vsi_idx]; 851 } 852 853 /** 854 * ice_find_vsi - Find the VSI from VSI ID 855 * @pf: The PF pointer to search in 856 * @vsi_num: The VSI ID to search for 857 */ 858 static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) 859 { 860 int i; 861 862 ice_for_each_vsi(pf, i) 863 if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) 864 return pf->vsi[i]; 865 return NULL; 866 } 867 868 /** 869 * ice_is_switchdev_running - check if switchdev is configured 870 * @pf: pointer to PF structure 871 * 872 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV 873 * and switchdev is configured, false otherwise. 874 */ 875 static inline bool ice_is_switchdev_running(struct ice_pf *pf) 876 { 877 return pf->eswitch.is_running; 878 } 879 880 #define ICE_FD_STAT_CTR_BLOCK_COUNT 256 881 #define ICE_FD_STAT_PF_IDX(base_idx) \ 882 ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT) 883 #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx) 884 #define ICE_FD_STAT_CH 1 885 #define ICE_FD_CH_STAT_IDX(base_idx) \ 886 (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH) 887 888 /** 889 * ice_is_adq_active - any active ADQs 890 * @pf: pointer to PF 891 * 892 * This function returns true if there are any ADQs configured (which is 893 * determined by looking at VSI type (which should be VSI_PF), numtc, and 894 * TC_MQPRIO flag) otherwise return false 895 */ 896 static inline bool ice_is_adq_active(struct ice_pf *pf) 897 { 898 struct ice_vsi *vsi; 899 900 vsi = ice_get_main_vsi(pf); 901 if (!vsi) 902 return false; 903 904 /* is ADQ configured */ 905 if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC && 906 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 907 return true; 908 909 return false; 910 } 911 912 void ice_debugfs_fwlog_init(struct ice_pf *pf); 913 void ice_debugfs_pf_deinit(struct ice_pf *pf); 914 void ice_debugfs_init(void); 915 void ice_debugfs_exit(void); 916 void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module); 917 918 bool netif_is_ice(const struct net_device *dev); 919 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); 920 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); 921 int ice_vsi_open_ctrl(struct ice_vsi *vsi); 922 int ice_vsi_open(struct ice_vsi *vsi); 923 void ice_set_ethtool_ops(struct net_device *netdev); 924 void ice_set_ethtool_repr_ops(struct net_device *netdev); 925 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); 926 void ice_set_ethtool_sf_ops(struct net_device *netdev); 927 u16 ice_get_avail_txq_count(struct ice_pf *pf); 928 u16 ice_get_avail_rxq_count(struct ice_pf *pf); 929 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked); 930 void ice_update_vsi_stats(struct ice_vsi *vsi); 931 void ice_update_pf_stats(struct ice_pf *pf); 932 void 933 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, 934 struct ice_q_stats stats, u64 *pkts, u64 *bytes); 935 int ice_up(struct ice_vsi *vsi); 936 int ice_down(struct ice_vsi *vsi); 937 int ice_down_up(struct ice_vsi *vsi); 938 int ice_vsi_cfg_lan(struct ice_vsi *vsi); 939 struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); 940 941 enum ice_xdp_cfg { 942 ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */ 943 ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */ 944 }; 945 946 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); 947 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, 948 enum ice_xdp_cfg cfg_type); 949 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type); 950 void ice_map_xdp_rings(struct ice_vsi *vsi); 951 int 952 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 953 u32 flags); 954 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); 955 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); 956 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed); 957 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); 958 int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc); 959 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); 960 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); 961 void ice_print_link_msg(struct ice_vsi *vsi, bool isup); 962 int ice_plug_aux_dev(struct ice_pf *pf); 963 void ice_unplug_aux_dev(struct ice_pf *pf); 964 int ice_init_rdma(struct ice_pf *pf); 965 void ice_deinit_rdma(struct ice_pf *pf); 966 const char *ice_aq_str(enum ice_aq_err aq_err); 967 bool ice_is_wol_supported(struct ice_hw *hw); 968 void ice_fdir_del_all_fltrs(struct ice_vsi *vsi); 969 int 970 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, 971 bool is_tun); 972 void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena); 973 int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); 974 int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); 975 int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd); 976 int 977 ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, 978 u32 *rule_locs); 979 void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx); 980 void ice_fdir_release_flows(struct ice_hw *hw); 981 void ice_fdir_replay_flows(struct ice_hw *hw); 982 void ice_fdir_replay_fltrs(struct ice_pf *pf); 983 int ice_fdir_create_dflt_rules(struct ice_pf *pf); 984 985 enum ice_aq_task_state { 986 ICE_AQ_TASK_NOT_PREPARED, 987 ICE_AQ_TASK_WAITING, 988 ICE_AQ_TASK_COMPLETE, 989 ICE_AQ_TASK_CANCELED, 990 }; 991 992 struct ice_aq_task { 993 struct hlist_node entry; 994 struct ice_rq_event_info event; 995 enum ice_aq_task_state state; 996 u16 opcode; 997 }; 998 999 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, 1000 u16 opcode); 1001 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, 1002 unsigned long timeout); 1003 int ice_open(struct net_device *netdev); 1004 int ice_open_internal(struct net_device *netdev); 1005 int ice_stop(struct net_device *netdev); 1006 void ice_service_task_schedule(struct ice_pf *pf); 1007 int ice_load(struct ice_pf *pf); 1008 void ice_unload(struct ice_pf *pf); 1009 void ice_adv_lnk_speed_maps_init(void); 1010 int ice_init_dev(struct ice_pf *pf); 1011 void ice_deinit_dev(struct ice_pf *pf); 1012 int ice_change_mtu(struct net_device *netdev, int new_mtu); 1013 void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue); 1014 int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp); 1015 void ice_set_netdev_features(struct net_device *netdev); 1016 int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); 1017 int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); 1018 void ice_get_stats64(struct net_device *netdev, 1019 struct rtnl_link_stats64 *stats); 1020 1021 /** 1022 * ice_set_rdma_cap - enable RDMA support 1023 * @pf: PF struct 1024 */ 1025 static inline void ice_set_rdma_cap(struct ice_pf *pf) 1026 { 1027 if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { 1028 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 1029 set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); 1030 } 1031 } 1032 1033 /** 1034 * ice_clear_rdma_cap - disable RDMA support 1035 * @pf: PF struct 1036 */ 1037 static inline void ice_clear_rdma_cap(struct ice_pf *pf) 1038 { 1039 /* defer unplug to service task to avoid RTNL lock and 1040 * clear PLUG bit so that pending plugs don't interfere 1041 */ 1042 clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); 1043 set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags); 1044 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 1045 } 1046 1047 extern const struct xdp_metadata_ops ice_xdp_md_ops; 1048 #endif /* _ICE_H_ */ 1049