1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #ifndef _ICE_H_ 5 #define _ICE_H_ 6 7 #include <linux/types.h> 8 #include <linux/errno.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/firmware.h> 12 #include <linux/netdevice.h> 13 #include <linux/compiler.h> 14 #include <linux/etherdevice.h> 15 #include <linux/skbuff.h> 16 #include <linux/cpumask.h> 17 #include <linux/rtnetlink.h> 18 #include <linux/if_vlan.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/pci.h> 21 #include <linux/workqueue.h> 22 #include <linux/wait.h> 23 #include <linux/interrupt.h> 24 #include <linux/ethtool.h> 25 #include <linux/timer.h> 26 #include <linux/delay.h> 27 #include <linux/bitmap.h> 28 #include <linux/log2.h> 29 #include <linux/ip.h> 30 #include <linux/sctp.h> 31 #include <linux/ipv6.h> 32 #include <linux/pkt_sched.h> 33 #include <linux/if_bridge.h> 34 #include <linux/ctype.h> 35 #include <linux/linkmode.h> 36 #include <linux/bpf.h> 37 #include <linux/btf.h> 38 #include <linux/auxiliary_bus.h> 39 #include <linux/avf/virtchnl.h> 40 #include <linux/cpu_rmap.h> 41 #include <linux/dim.h> 42 #include <linux/gnss.h> 43 #include <net/pkt_cls.h> 44 #include <net/pkt_sched.h> 45 #include <net/tc_act/tc_mirred.h> 46 #include <net/tc_act/tc_gact.h> 47 #include <net/ip.h> 48 #include <net/devlink.h> 49 #include <net/ipv6.h> 50 #include <net/xdp_sock.h> 51 #include <net/xdp_sock_drv.h> 52 #include <net/geneve.h> 53 #include <net/gre.h> 54 #include <net/udp_tunnel.h> 55 #include <net/vxlan.h> 56 #include <net/gtp.h> 57 #include <linux/ppp_defs.h> 58 #include "ice_devids.h" 59 #include "ice_type.h" 60 #include "ice_txrx.h" 61 #include "ice_dcb.h" 62 #include "ice_switch.h" 63 #include "ice_common.h" 64 #include "ice_flow.h" 65 #include "ice_sched.h" 66 #include "ice_idc_int.h" 67 #include "ice_sriov.h" 68 #include "ice_vf_mbx.h" 69 #include "ice_ptp.h" 70 #include "ice_fdir.h" 71 #include "ice_xsk.h" 72 #include "ice_arfs.h" 73 #include "ice_repr.h" 74 #include "ice_eswitch.h" 75 #include "ice_lag.h" 76 #include "ice_vsi_vlan_ops.h" 77 #include "ice_gnss.h" 78 #include "ice_irq.h" 79 #include "ice_dpll.h" 80 81 #define ICE_BAR0 0 82 #define ICE_REQ_DESC_MULTIPLE 32 83 #define ICE_MIN_NUM_DESC 64 84 #define ICE_MAX_NUM_DESC 8160 85 #define ICE_DFLT_MIN_RX_DESC 512 86 #define ICE_DFLT_NUM_TX_DESC 256 87 #define ICE_DFLT_NUM_RX_DESC 2048 88 89 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) 90 #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) 91 #define ICE_AQ_LEN 192 92 #define ICE_MBXSQ_LEN 64 93 #define ICE_SBQ_LEN 64 94 #define ICE_MIN_LAN_TXRX_MSIX 1 95 #define ICE_MIN_LAN_OICR_MSIX 1 96 #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) 97 #define ICE_FDIR_MSIX 2 98 #define ICE_RDMA_NUM_AEQ_MSIX 4 99 #define ICE_MIN_RDMA_MSIX 2 100 #define ICE_ESWITCH_MSIX 1 101 #define ICE_NO_VSI 0xffff 102 #define ICE_VSI_MAP_CONTIG 0 103 #define ICE_VSI_MAP_SCATTER 1 104 #define ICE_MAX_SCATTER_TXQS 16 105 #define ICE_MAX_SCATTER_RXQS 16 106 #define ICE_Q_WAIT_RETRY_LIMIT 10 107 #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) 108 #define ICE_MAX_LG_RSS_QS 256 109 #define ICE_INVAL_Q_INDEX 0xffff 110 111 #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ 112 113 #define ICE_CHNL_START_TC 1 114 115 #define ICE_MAX_RESET_WAIT 20 116 117 #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) 118 119 #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 120 121 #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD) 122 123 #define ICE_MAX_TSO_SIZE 131072 124 125 #define ICE_UP_TABLE_TRANSLATE(val, i) \ 126 (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ 127 ICE_AQ_VSI_UP_TABLE_UP##i##_M) 128 129 #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i])) 130 #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i])) 131 #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i])) 132 #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i])) 133 134 /* Minimum BW limit is 500 Kbps for any scheduler node */ 135 #define ICE_MIN_BW_LIMIT 500 136 /* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes. 137 * use it to convert user specified BW limit into Kbps 138 */ 139 #define ICE_BW_KBPS_DIVISOR 125 140 141 /* Default recipes have priority 4 and below, hence priority values between 5..7 142 * can be used as filter priority for advanced switch filter (advanced switch 143 * filters need new recipe to be created for specified extraction sequence 144 * because default recipe extraction sequence does not represent custom 145 * extraction) 146 */ 147 #define ICE_SWITCH_FLTR_PRIO_QUEUE 7 148 /* prio 6 is reserved for future use (e.g. switch filter with L3 fields + 149 * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as 150 * SYN/FIN/RST)) 151 */ 152 #define ICE_SWITCH_FLTR_PRIO_RSVD 6 153 #define ICE_SWITCH_FLTR_PRIO_VSI 5 154 #define ICE_SWITCH_FLTR_PRIO_QGRP ICE_SWITCH_FLTR_PRIO_VSI 155 156 /* Macro for each VSI in a PF */ 157 #define ice_for_each_vsi(pf, i) \ 158 for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) 159 160 /* Macros for each Tx/Xdp/Rx ring in a VSI */ 161 #define ice_for_each_txq(vsi, i) \ 162 for ((i) = 0; (i) < (vsi)->num_txq; (i)++) 163 164 #define ice_for_each_xdp_txq(vsi, i) \ 165 for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++) 166 167 #define ice_for_each_rxq(vsi, i) \ 168 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) 169 170 /* Macros for each allocated Tx/Rx ring whether used or not in a VSI */ 171 #define ice_for_each_alloc_txq(vsi, i) \ 172 for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) 173 174 #define ice_for_each_alloc_rxq(vsi, i) \ 175 for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) 176 177 #define ice_for_each_q_vector(vsi, i) \ 178 for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++) 179 180 #define ice_for_each_chnl_tc(i) \ 181 for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++) 182 183 #define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX) 184 185 #define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \ 186 ICE_PROMISC_UCAST_RX | \ 187 ICE_PROMISC_VLAN_TX | \ 188 ICE_PROMISC_VLAN_RX) 189 190 #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX) 191 192 #define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \ 193 ICE_PROMISC_MCAST_RX | \ 194 ICE_PROMISC_VLAN_TX | \ 195 ICE_PROMISC_VLAN_RX) 196 197 #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) 198 199 #define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned) 200 201 enum ice_feature { 202 ICE_F_DSCP, 203 ICE_F_PHY_RCLK, 204 ICE_F_SMA_CTRL, 205 ICE_F_CGU, 206 ICE_F_GNSS, 207 ICE_F_ROCE_LAG, 208 ICE_F_SRIOV_LAG, 209 ICE_F_MAX 210 }; 211 212 DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key); 213 214 struct ice_channel { 215 struct list_head list; 216 u8 type; 217 u16 sw_id; 218 u16 base_q; 219 u16 num_rxq; 220 u16 num_txq; 221 u16 vsi_num; 222 u8 ena_tc; 223 struct ice_aqc_vsi_props info; 224 u64 max_tx_rate; 225 u64 min_tx_rate; 226 atomic_t num_sb_fltr; 227 struct ice_vsi *ch_vsi; 228 }; 229 230 struct ice_txq_meta { 231 u32 q_teid; /* Tx-scheduler element identifier */ 232 u16 q_id; /* Entry in VSI's txq_map bitmap */ 233 u16 q_handle; /* Relative index of Tx queue within TC */ 234 u16 vsi_idx; /* VSI index that Tx queue belongs to */ 235 u8 tc; /* TC number that Tx queue belongs to */ 236 }; 237 238 struct ice_tc_info { 239 u16 qoffset; 240 u16 qcount_tx; 241 u16 qcount_rx; 242 u8 netdev_tc; 243 }; 244 245 struct ice_tc_cfg { 246 u8 numtc; /* Total number of enabled TCs */ 247 u16 ena_tc; /* Tx map */ 248 struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS]; 249 }; 250 251 struct ice_qs_cfg { 252 struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */ 253 unsigned long *pf_map; 254 unsigned long pf_map_size; 255 unsigned int q_count; 256 unsigned int scatter_count; 257 u16 *vsi_map; 258 u16 vsi_map_offset; 259 u8 mapping_mode; 260 }; 261 262 struct ice_sw { 263 struct ice_pf *pf; 264 u16 sw_id; /* switch ID for this switch */ 265 u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */ 266 }; 267 268 enum ice_pf_state { 269 ICE_TESTING, 270 ICE_DOWN, 271 ICE_NEEDS_RESTART, 272 ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ 273 ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ 274 ICE_PFR_REQ, /* set by driver */ 275 ICE_CORER_REQ, /* set by driver */ 276 ICE_GLOBR_REQ, /* set by driver */ 277 ICE_CORER_RECV, /* set by OICR handler */ 278 ICE_GLOBR_RECV, /* set by OICR handler */ 279 ICE_EMPR_RECV, /* set by OICR handler */ 280 ICE_SUSPENDED, /* set on module remove path */ 281 ICE_RESET_FAILED, /* set by reset/rebuild */ 282 /* When checking for the PF to be in a nominal operating state, the 283 * bits that are grouped at the beginning of the list need to be 284 * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will 285 * be checked. If you need to add a bit into consideration for nominal 286 * operating state, it must be added before 287 * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position 288 * without appropriate consideration. 289 */ 290 ICE_STATE_NOMINAL_CHECK_BITS, 291 ICE_ADMINQ_EVENT_PENDING, 292 ICE_MAILBOXQ_EVENT_PENDING, 293 ICE_SIDEBANDQ_EVENT_PENDING, 294 ICE_MDD_EVENT_PENDING, 295 ICE_VFLR_EVENT_PENDING, 296 ICE_FLTR_OVERFLOW_PROMISC, 297 ICE_VF_DIS, 298 ICE_CFG_BUSY, 299 ICE_SERVICE_SCHED, 300 ICE_SERVICE_DIS, 301 ICE_FD_FLUSH_REQ, 302 ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ 303 ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ 304 ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ 305 ICE_LINK_DEFAULT_OVERRIDE_PENDING, 306 ICE_PHY_INIT_COMPLETE, 307 ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ 308 ICE_AUX_ERR_PENDING, 309 ICE_STATE_NBITS /* must be last */ 310 }; 311 312 enum ice_vsi_state { 313 ICE_VSI_DOWN, 314 ICE_VSI_NEEDS_RESTART, 315 ICE_VSI_NETDEV_ALLOCD, 316 ICE_VSI_NETDEV_REGISTERED, 317 ICE_VSI_UMAC_FLTR_CHANGED, 318 ICE_VSI_MMAC_FLTR_CHANGED, 319 ICE_VSI_PROMISC_CHANGED, 320 ICE_VSI_STATE_NBITS /* must be last */ 321 }; 322 323 struct ice_vsi_stats { 324 struct ice_ring_stats **tx_ring_stats; /* Tx ring stats array */ 325 struct ice_ring_stats **rx_ring_stats; /* Rx ring stats array */ 326 }; 327 328 /* struct that defines a VSI, associated with a dev */ 329 struct ice_vsi { 330 struct net_device *netdev; 331 struct ice_sw *vsw; /* switch this VSI is on */ 332 struct ice_pf *back; /* back pointer to PF */ 333 struct ice_port_info *port_info; /* back pointer to port_info */ 334 struct ice_rx_ring **rx_rings; /* Rx ring array */ 335 struct ice_tx_ring **tx_rings; /* Tx ring array */ 336 struct ice_q_vector **q_vectors; /* q_vector array */ 337 338 irqreturn_t (*irq_handler)(int irq, void *data); 339 340 u64 tx_linearize; 341 DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS); 342 unsigned int current_netdev_flags; 343 u32 tx_restart; 344 u32 tx_busy; 345 u32 rx_buf_failed; 346 u32 rx_page_failed; 347 u16 num_q_vectors; 348 /* tell if only dynamic irq allocation is allowed */ 349 bool irq_dyn_alloc; 350 351 enum ice_vsi_type type; 352 u16 vsi_num; /* HW (absolute) index of this VSI */ 353 u16 idx; /* software index in pf->vsi[] */ 354 355 struct ice_vf *vf; /* VF associated with this VSI */ 356 357 u16 num_gfltr; 358 u16 num_bfltr; 359 360 /* RSS config */ 361 u16 rss_table_size; /* HW RSS table size */ 362 u16 rss_size; /* Allocated RSS queues */ 363 u8 *rss_hkey_user; /* User configured hash keys */ 364 u8 *rss_lut_user; /* User configured lookup table entries */ 365 u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */ 366 367 /* aRFS members only allocated for the PF VSI */ 368 #define ICE_MAX_ARFS_LIST 1024 369 #define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1) 370 struct hlist_head *arfs_fltr_list; 371 struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs; 372 spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ 373 atomic_t *arfs_last_fltr_id; 374 375 u16 max_frame; 376 u16 rx_buf_len; 377 378 struct ice_aqc_vsi_props info; /* VSI properties */ 379 struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ 380 381 /* VSI stats */ 382 struct rtnl_link_stats64 net_stats; 383 struct rtnl_link_stats64 net_stats_prev; 384 struct ice_eth_stats eth_stats; 385 struct ice_eth_stats eth_stats_prev; 386 387 struct list_head tmp_sync_list; /* MAC filters to be synced */ 388 struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ 389 390 u8 irqs_ready:1; 391 u8 current_isup:1; /* Sync 'link up' logging */ 392 u8 stat_offsets_loaded:1; 393 struct ice_vsi_vlan_ops inner_vlan_ops; 394 struct ice_vsi_vlan_ops outer_vlan_ops; 395 u16 num_vlan; 396 397 /* queue information */ 398 u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 399 u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 400 u16 *txq_map; /* index in pf->avail_txqs */ 401 u16 *rxq_map; /* index in pf->avail_rxqs */ 402 u16 alloc_txq; /* Allocated Tx queues */ 403 u16 num_txq; /* Used Tx queues */ 404 u16 alloc_rxq; /* Allocated Rx queues */ 405 u16 num_rxq; /* Used Rx queues */ 406 u16 req_txq; /* User requested Tx queues */ 407 u16 req_rxq; /* User requested Rx queues */ 408 u16 num_rx_desc; 409 u16 num_tx_desc; 410 u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; 411 struct ice_tc_cfg tc_cfg; 412 struct bpf_prog *xdp_prog; 413 struct ice_tx_ring **xdp_rings; /* XDP ring array */ 414 unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ 415 u16 num_xdp_txq; /* Used XDP queues */ 416 u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 417 418 struct net_device **target_netdevs; 419 420 struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */ 421 422 /* Channel Specific Fields */ 423 struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC]; 424 u16 cnt_q_avail; 425 u16 next_base_q; /* next queue to be used for channel setup */ 426 struct list_head ch_list; 427 u16 num_chnl_rxq; 428 u16 num_chnl_txq; 429 u16 ch_rss_size; 430 u16 num_chnl_fltr; 431 /* store away rss size info before configuring ADQ channels so that, 432 * it can be used after tc-qdisc delete, to get back RSS setting as 433 * they were before 434 */ 435 u16 orig_rss_size; 436 /* this keeps tracks of all enabled TC with and without DCB 437 * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue 438 * information 439 */ 440 u8 all_numtc; 441 u16 all_enatc; 442 443 /* store away TC info, to be used for rebuild logic */ 444 u8 old_numtc; 445 u16 old_ena_tc; 446 447 struct ice_channel *ch; 448 449 /* setup back reference, to which aggregator node this VSI 450 * corresponds to 451 */ 452 struct ice_agg_node *agg_node; 453 } ____cacheline_internodealigned_in_smp; 454 455 /* struct that defines an interrupt vector */ 456 struct ice_q_vector { 457 struct ice_vsi *vsi; 458 459 u16 v_idx; /* index in the vsi->q_vector array. */ 460 u16 reg_idx; 461 u8 num_ring_rx; /* total number of Rx rings in vector */ 462 u8 num_ring_tx; /* total number of Tx rings in vector */ 463 u8 wb_on_itr:1; /* if true, WB on ITR is enabled */ 464 /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this 465 * value to the device 466 */ 467 u8 intrl; 468 469 struct napi_struct napi; 470 471 struct ice_ring_container rx; 472 struct ice_ring_container tx; 473 474 cpumask_t affinity_mask; 475 struct irq_affinity_notify affinity_notify; 476 477 struct ice_channel *ch; 478 479 char name[ICE_INT_NAME_STR_LEN]; 480 481 u16 total_events; /* net_dim(): number of interrupts processed */ 482 struct msi_map irq; 483 } ____cacheline_internodealigned_in_smp; 484 485 enum ice_pf_flags { 486 ICE_FLAG_FLTR_SYNC, 487 ICE_FLAG_RDMA_ENA, 488 ICE_FLAG_RSS_ENA, 489 ICE_FLAG_SRIOV_ENA, 490 ICE_FLAG_SRIOV_CAPABLE, 491 ICE_FLAG_DCB_CAPABLE, 492 ICE_FLAG_DCB_ENA, 493 ICE_FLAG_FD_ENA, 494 ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */ 495 ICE_FLAG_PTP, /* PTP is enabled by software */ 496 ICE_FLAG_ADV_FEATURES, 497 ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */ 498 ICE_FLAG_CLS_FLOWER, 499 ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, 500 ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, 501 ICE_FLAG_NO_MEDIA, 502 ICE_FLAG_FW_LLDP_AGENT, 503 ICE_FLAG_MOD_POWER_UNSUPPORTED, 504 ICE_FLAG_PHY_FW_LOAD_FAILED, 505 ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ 506 ICE_FLAG_LEGACY_RX, 507 ICE_FLAG_VF_TRUE_PROMISC_ENA, 508 ICE_FLAG_MDD_AUTO_RESET_VF, 509 ICE_FLAG_VF_VLAN_PRUNING, 510 ICE_FLAG_LINK_LENIENT_MODE_ENA, 511 ICE_FLAG_PLUG_AUX_DEV, 512 ICE_FLAG_UNPLUG_AUX_DEV, 513 ICE_FLAG_MTU_CHANGED, 514 ICE_FLAG_GNSS, /* GNSS successfully initialized */ 515 ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */ 516 ICE_PF_FLAGS_NBITS /* must be last */ 517 }; 518 519 enum ice_misc_thread_tasks { 520 ICE_MISC_THREAD_EXTTS_EVENT, 521 ICE_MISC_THREAD_TX_TSTAMP, 522 ICE_MISC_THREAD_NBITS /* must be last */ 523 }; 524 525 struct ice_switchdev_info { 526 struct ice_vsi *control_vsi; 527 struct ice_vsi *uplink_vsi; 528 struct ice_esw_br_offloads *br_offloads; 529 bool is_running; 530 }; 531 532 struct ice_agg_node { 533 u32 agg_id; 534 #define ICE_MAX_VSIS_IN_AGG_NODE 64 535 u32 num_vsis; 536 u8 valid; 537 }; 538 539 struct ice_pf { 540 struct pci_dev *pdev; 541 542 struct devlink_region *nvm_region; 543 struct devlink_region *sram_region; 544 struct devlink_region *devcaps_region; 545 546 /* devlink port data */ 547 struct devlink_port devlink_port; 548 549 /* OS reserved IRQ details */ 550 struct msix_entry *msix_entries; 551 struct ice_irq_tracker irq_tracker; 552 /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the 553 * number of MSIX vectors needed for all SR-IOV VFs from the number of 554 * MSIX vectors allowed on this PF. 555 */ 556 u16 sriov_base_vector; 557 558 u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ 559 560 struct ice_vsi **vsi; /* VSIs created by the driver */ 561 struct ice_vsi_stats **vsi_stats; 562 struct ice_sw *first_sw; /* first switch created by firmware */ 563 u16 eswitch_mode; /* current mode of eswitch */ 564 struct ice_vfs vfs; 565 DECLARE_BITMAP(features, ICE_F_MAX); 566 DECLARE_BITMAP(state, ICE_STATE_NBITS); 567 DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); 568 DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS); 569 unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ 570 unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ 571 unsigned long serv_tmr_period; 572 unsigned long serv_tmr_prev; 573 struct timer_list serv_tmr; 574 struct work_struct serv_task; 575 struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ 576 struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ 577 struct mutex tc_mutex; /* lock to protect TC changes */ 578 struct mutex adev_mutex; /* lock to protect aux device access */ 579 struct mutex lag_mutex; /* protect ice_lag struct in PF */ 580 u32 msg_enable; 581 struct ice_ptp ptp; 582 struct gnss_serial *gnss_serial; 583 struct gnss_device *gnss_dev; 584 u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ 585 u16 rdma_base_vector; 586 587 /* spinlock to protect the AdminQ wait list */ 588 spinlock_t aq_wait_lock; 589 struct hlist_head aq_wait_list; 590 wait_queue_head_t aq_wait_queue; 591 bool fw_emp_reset_disabled; 592 593 wait_queue_head_t reset_wait_queue; 594 595 u32 hw_csum_rx_error; 596 u32 oicr_err_reg; 597 struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */ 598 u16 max_pf_txqs; /* Total Tx queues PF wide */ 599 u16 max_pf_rxqs; /* Total Rx queues PF wide */ 600 u16 num_lan_msix; /* Total MSIX vectors for base driver */ 601 u16 num_lan_tx; /* num LAN Tx queues setup */ 602 u16 num_lan_rx; /* num LAN Rx queues setup */ 603 u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ 604 u16 num_alloc_vsi; 605 u16 corer_count; /* Core reset count */ 606 u16 globr_count; /* Global reset count */ 607 u16 empr_count; /* EMP reset count */ 608 u16 pfr_count; /* PF reset count */ 609 610 u8 wol_ena : 1; /* software state of WoL */ 611 u32 wakeup_reason; /* last wakeup reason */ 612 struct ice_hw_port_stats stats; 613 struct ice_hw_port_stats stats_prev; 614 struct ice_hw hw; 615 u8 stat_prev_loaded:1; /* has previous stats been loaded */ 616 u8 rdma_mode; 617 u16 dcbx_cap; 618 u32 tx_timeout_count; 619 unsigned long tx_timeout_last_recovery; 620 u32 tx_timeout_recovery_level; 621 char int_name[ICE_INT_NAME_STR_LEN]; 622 struct auxiliary_device *adev; 623 int aux_idx; 624 u32 sw_int_count; 625 /* count of tc_flower filters specific to channel (aka where filter 626 * action is "hw_tc <tc_num>") 627 */ 628 u16 num_dmac_chnl_fltrs; 629 struct hlist_head tc_flower_fltr_list; 630 631 u64 supported_rxdids; 632 633 __le64 nvm_phy_type_lo; /* NVM PHY type low */ 634 __le64 nvm_phy_type_hi; /* NVM PHY type high */ 635 struct ice_link_default_override_tlv link_dflt_override; 636 struct ice_lag *lag; /* Link Aggregation information */ 637 638 struct ice_switchdev_info switchdev; 639 struct ice_esw_br_port *br_port; 640 641 #define ICE_INVALID_AGG_NODE_ID 0 642 #define ICE_PF_AGG_NODE_ID_START 1 643 #define ICE_MAX_PF_AGG_NODES 32 644 struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES]; 645 #define ICE_VF_AGG_NODE_ID_START 65 646 #define ICE_MAX_VF_AGG_NODES 32 647 struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; 648 struct ice_dplls dplls; 649 }; 650 651 extern struct workqueue_struct *ice_lag_wq; 652 653 struct ice_netdev_priv { 654 struct ice_vsi *vsi; 655 struct ice_repr *repr; 656 /* indirect block callbacks on registered higher level devices 657 * (e.g. tunnel devices) 658 * 659 * tc_indr_block_cb_priv_list is used to look up indirect callback 660 * private data 661 */ 662 struct list_head tc_indr_block_priv_list; 663 }; 664 665 /** 666 * ice_vector_ch_enabled 667 * @qv: pointer to q_vector, can be NULL 668 * 669 * This function returns true if vector is channel enabled otherwise false 670 */ 671 static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv) 672 { 673 return !!qv->ch; /* Enable it to run with TC */ 674 } 675 676 /** 677 * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt 678 * @pf: Board private structure 679 * 680 * Return true if this PF should respond to the Tx timestamp interrupt 681 * indication in the miscellaneous OICR interrupt handler. 682 */ 683 static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf) 684 { 685 return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE; 686 } 687 688 /** 689 * ice_irq_dynamic_ena - Enable default interrupt generation settings 690 * @hw: pointer to HW struct 691 * @vsi: pointer to VSI struct, can be NULL 692 * @q_vector: pointer to q_vector, can be NULL 693 */ 694 static inline void 695 ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, 696 struct ice_q_vector *q_vector) 697 { 698 u32 vector = (vsi && q_vector) ? q_vector->reg_idx : 699 ((struct ice_pf *)hw->back)->oicr_irq.index; 700 int itr = ICE_ITR_NONE; 701 u32 val; 702 703 /* clear the PBA here, as this function is meant to clean out all 704 * previous interrupts and enable the interrupt 705 */ 706 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 707 (itr << GLINT_DYN_CTL_ITR_INDX_S); 708 if (vsi) 709 if (test_bit(ICE_VSI_DOWN, vsi->state)) 710 return; 711 wr32(hw, GLINT_DYN_CTL(vector), val); 712 } 713 714 /** 715 * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev 716 * @netdev: pointer to the netdev struct 717 */ 718 static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) 719 { 720 struct ice_netdev_priv *np = netdev_priv(netdev); 721 722 return np->vsi->back; 723 } 724 725 static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) 726 { 727 return !!READ_ONCE(vsi->xdp_prog); 728 } 729 730 static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) 731 { 732 ring->flags |= ICE_TX_FLAGS_RING_XDP; 733 } 734 735 /** 736 * ice_xsk_pool - get XSK buffer pool bound to a ring 737 * @ring: Rx ring to use 738 * 739 * Returns a pointer to xsk_buff_pool structure if there is a buffer pool 740 * present, NULL otherwise. 741 */ 742 static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) 743 { 744 struct ice_vsi *vsi = ring->vsi; 745 u16 qid = ring->q_index; 746 747 if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) 748 return NULL; 749 750 return xsk_get_pool_from_qid(vsi->netdev, qid); 751 } 752 753 /** 754 * ice_tx_xsk_pool - assign XSK buff pool to XDP ring 755 * @vsi: pointer to VSI 756 * @qid: index of a queue to look at XSK buff pool presence 757 * 758 * Sets XSK buff pool pointer on XDP ring. 759 * 760 * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided 761 * queue id. Reason for doing so is that queue vectors might have assigned more 762 * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring 763 * carries a pointer to one of these XDP rings for its own purposes, such as 764 * handling XDP_TX action, therefore we can piggyback here on the 765 * rx_ring->xdp_ring assignment that was done during XDP rings initialization. 766 */ 767 static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) 768 { 769 struct ice_tx_ring *ring; 770 771 ring = vsi->rx_rings[qid]->xdp_ring; 772 if (!ring) 773 return; 774 775 if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) { 776 ring->xsk_pool = NULL; 777 return; 778 } 779 780 ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid); 781 } 782 783 /** 784 * ice_get_main_vsi - Get the PF VSI 785 * @pf: PF instance 786 * 787 * returns pf->vsi[0], which by definition is the PF VSI 788 */ 789 static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) 790 { 791 if (pf->vsi) 792 return pf->vsi[0]; 793 794 return NULL; 795 } 796 797 /** 798 * ice_get_netdev_priv_vsi - return VSI associated with netdev priv. 799 * @np: private netdev structure 800 */ 801 static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np) 802 { 803 /* In case of port representor return source port VSI. */ 804 if (np->repr) 805 return np->repr->src_vsi; 806 else 807 return np->vsi; 808 } 809 810 /** 811 * ice_get_ctrl_vsi - Get the control VSI 812 * @pf: PF instance 813 */ 814 static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) 815 { 816 /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */ 817 if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI) 818 return NULL; 819 820 return pf->vsi[pf->ctrl_vsi_idx]; 821 } 822 823 /** 824 * ice_find_vsi - Find the VSI from VSI ID 825 * @pf: The PF pointer to search in 826 * @vsi_num: The VSI ID to search for 827 */ 828 static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) 829 { 830 int i; 831 832 ice_for_each_vsi(pf, i) 833 if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) 834 return pf->vsi[i]; 835 return NULL; 836 } 837 838 /** 839 * ice_is_switchdev_running - check if switchdev is configured 840 * @pf: pointer to PF structure 841 * 842 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV 843 * and switchdev is configured, false otherwise. 844 */ 845 static inline bool ice_is_switchdev_running(struct ice_pf *pf) 846 { 847 return pf->switchdev.is_running; 848 } 849 850 #define ICE_FD_STAT_CTR_BLOCK_COUNT 256 851 #define ICE_FD_STAT_PF_IDX(base_idx) \ 852 ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT) 853 #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx) 854 #define ICE_FD_STAT_CH 1 855 #define ICE_FD_CH_STAT_IDX(base_idx) \ 856 (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH) 857 858 /** 859 * ice_is_adq_active - any active ADQs 860 * @pf: pointer to PF 861 * 862 * This function returns true if there are any ADQs configured (which is 863 * determined by looking at VSI type (which should be VSI_PF), numtc, and 864 * TC_MQPRIO flag) otherwise return false 865 */ 866 static inline bool ice_is_adq_active(struct ice_pf *pf) 867 { 868 struct ice_vsi *vsi; 869 870 vsi = ice_get_main_vsi(pf); 871 if (!vsi) 872 return false; 873 874 /* is ADQ configured */ 875 if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC && 876 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) 877 return true; 878 879 return false; 880 } 881 882 bool netif_is_ice(const struct net_device *dev); 883 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); 884 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); 885 int ice_vsi_open_ctrl(struct ice_vsi *vsi); 886 int ice_vsi_open(struct ice_vsi *vsi); 887 void ice_set_ethtool_ops(struct net_device *netdev); 888 void ice_set_ethtool_repr_ops(struct net_device *netdev); 889 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); 890 u16 ice_get_avail_txq_count(struct ice_pf *pf); 891 u16 ice_get_avail_rxq_count(struct ice_pf *pf); 892 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked); 893 void ice_update_vsi_stats(struct ice_vsi *vsi); 894 void ice_update_pf_stats(struct ice_pf *pf); 895 void 896 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, 897 struct ice_q_stats stats, u64 *pkts, u64 *bytes); 898 int ice_up(struct ice_vsi *vsi); 899 int ice_down(struct ice_vsi *vsi); 900 int ice_down_up(struct ice_vsi *vsi); 901 int ice_vsi_cfg_lan(struct ice_vsi *vsi); 902 struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); 903 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); 904 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); 905 int ice_destroy_xdp_rings(struct ice_vsi *vsi); 906 int 907 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 908 u32 flags); 909 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); 910 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); 911 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed); 912 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); 913 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); 914 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); 915 void ice_print_link_msg(struct ice_vsi *vsi, bool isup); 916 int ice_plug_aux_dev(struct ice_pf *pf); 917 void ice_unplug_aux_dev(struct ice_pf *pf); 918 int ice_init_rdma(struct ice_pf *pf); 919 void ice_deinit_rdma(struct ice_pf *pf); 920 const char *ice_aq_str(enum ice_aq_err aq_err); 921 bool ice_is_wol_supported(struct ice_hw *hw); 922 void ice_fdir_del_all_fltrs(struct ice_vsi *vsi); 923 int 924 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, 925 bool is_tun); 926 void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena); 927 int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); 928 int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); 929 int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd); 930 int 931 ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, 932 u32 *rule_locs); 933 void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx); 934 void ice_fdir_release_flows(struct ice_hw *hw); 935 void ice_fdir_replay_flows(struct ice_hw *hw); 936 void ice_fdir_replay_fltrs(struct ice_pf *pf); 937 int ice_fdir_create_dflt_rules(struct ice_pf *pf); 938 939 enum ice_aq_task_state { 940 ICE_AQ_TASK_NOT_PREPARED, 941 ICE_AQ_TASK_WAITING, 942 ICE_AQ_TASK_COMPLETE, 943 ICE_AQ_TASK_CANCELED, 944 }; 945 946 struct ice_aq_task { 947 struct hlist_node entry; 948 struct ice_rq_event_info event; 949 enum ice_aq_task_state state; 950 u16 opcode; 951 }; 952 953 void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, 954 u16 opcode); 955 int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, 956 unsigned long timeout); 957 int ice_open(struct net_device *netdev); 958 int ice_open_internal(struct net_device *netdev); 959 int ice_stop(struct net_device *netdev); 960 void ice_service_task_schedule(struct ice_pf *pf); 961 int ice_load(struct ice_pf *pf); 962 void ice_unload(struct ice_pf *pf); 963 964 /** 965 * ice_set_rdma_cap - enable RDMA support 966 * @pf: PF struct 967 */ 968 static inline void ice_set_rdma_cap(struct ice_pf *pf) 969 { 970 if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { 971 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); 972 set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); 973 } 974 } 975 976 /** 977 * ice_clear_rdma_cap - disable RDMA support 978 * @pf: PF struct 979 */ 980 static inline void ice_clear_rdma_cap(struct ice_pf *pf) 981 { 982 /* defer unplug to service task to avoid RTNL lock and 983 * clear PLUG bit so that pending plugs don't interfere 984 */ 985 clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); 986 set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags); 987 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); 988 } 989 #endif /* _ICE_H_ */ 990