1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #ifndef _IDPF_H_ 5 #define _IDPF_H_ 6 7 /* Forward declaration */ 8 struct idpf_adapter; 9 struct idpf_vport; 10 struct idpf_vport_max_q; 11 12 #include <net/pkt_sched.h> 13 #include <linux/aer.h> 14 #include <linux/etherdevice.h> 15 #include <linux/ioport.h> 16 #include <linux/pci.h> 17 #include <linux/bitfield.h> 18 #include <linux/sctp.h> 19 #include <linux/ethtool_netlink.h> 20 #include <net/gro.h> 21 22 #include <linux/net/intel/iidc_rdma.h> 23 #include <linux/net/intel/iidc_rdma_idpf.h> 24 25 #include "virtchnl2.h" 26 #include "idpf_txrx.h" 27 #include "idpf_controlq.h" 28 29 #define GETMAXVAL(num_bits) GENMASK((num_bits) - 1, 0) 30 31 #define IDPF_NO_FREE_SLOT 0xffff 32 33 /* Default Mailbox settings */ 34 #define IDPF_NUM_FILTERS_PER_MSG 20 35 #define IDPF_NUM_DFLT_MBX_Q 2 /* includes both TX and RX */ 36 #define IDPF_DFLT_MBX_Q_LEN 64 37 #define IDPF_DFLT_MBX_ID -1 38 /* maximum number of times to try before resetting mailbox */ 39 #define IDPF_MB_MAX_ERR 20 40 #define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \ 41 ((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz)) 42 43 #define IDPF_WAIT_FOR_MARKER_TIMEO 500 44 #define IDPF_MAX_WAIT 500 45 46 /* available message levels */ 47 #define IDPF_AVAIL_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 48 49 #define IDPF_DIM_PROFILE_SLOTS 5 50 51 #define IDPF_VIRTCHNL_VERSION_MAJOR VIRTCHNL2_VERSION_MAJOR_2 52 #define IDPF_VIRTCHNL_VERSION_MINOR VIRTCHNL2_VERSION_MINOR_0 53 54 /** 55 * struct idpf_mac_filter 56 * @list: list member field 57 * @macaddr: MAC address 58 * @remove: filter should be removed (virtchnl) 59 * @add: filter should be added (virtchnl) 60 */ 61 struct idpf_mac_filter { 62 struct list_head list; 63 u8 macaddr[ETH_ALEN]; 64 bool remove; 65 bool add; 66 }; 67 68 /** 69 * enum idpf_state - State machine to handle bring up 70 * @__IDPF_VER_CHECK: Negotiate virtchnl version 71 * @__IDPF_GET_CAPS: Negotiate capabilities 72 * @__IDPF_INIT_SW: Init based on given capabilities 73 * @__IDPF_STATE_LAST: Must be last, used to determine size 74 */ 75 enum idpf_state { 76 __IDPF_VER_CHECK, 77 __IDPF_GET_CAPS, 78 __IDPF_INIT_SW, 79 __IDPF_STATE_LAST, 80 }; 81 82 /** 83 * enum idpf_flags - Hard reset causes. 84 * @IDPF_HR_FUNC_RESET: Hard reset when TxRx timeout 85 * @IDPF_HR_DRV_LOAD: Set on driver load for a clean HW 86 * @IDPF_HR_RESET_IN_PROG: Reset in progress 87 * @IDPF_REMOVE_IN_PROG: Driver remove in progress 88 * @IDPF_MB_INTR_MODE: Mailbox in interrupt mode 89 * @IDPF_VC_CORE_INIT: virtchnl core has been init 90 * @IDPF_FLAGS_NBITS: Must be last 91 */ 92 enum idpf_flags { 93 IDPF_HR_FUNC_RESET, 94 IDPF_HR_DRV_LOAD, 95 IDPF_HR_RESET_IN_PROG, 96 IDPF_REMOVE_IN_PROG, 97 IDPF_MB_INTR_MODE, 98 IDPF_VC_CORE_INIT, 99 IDPF_FLAGS_NBITS, 100 }; 101 102 /** 103 * enum idpf_cap_field - Offsets into capabilities struct for specific caps 104 * @IDPF_BASE_CAPS: generic base capabilities 105 * @IDPF_CSUM_CAPS: checksum offload capabilities 106 * @IDPF_SEG_CAPS: segmentation offload capabilities 107 * @IDPF_RSS_CAPS: RSS offload capabilities 108 * @IDPF_HSPLIT_CAPS: Header split capabilities 109 * @IDPF_RSC_CAPS: RSC offload capabilities 110 * @IDPF_OTHER_CAPS: miscellaneous offloads 111 * 112 * Used when checking for a specific capability flag since different capability 113 * sets are not mutually exclusive numerically, the caller must specify which 114 * type of capability they are checking for. 115 */ 116 enum idpf_cap_field { 117 IDPF_BASE_CAPS = -1, 118 IDPF_CSUM_CAPS = offsetof(struct virtchnl2_get_capabilities, 119 csum_caps), 120 IDPF_SEG_CAPS = offsetof(struct virtchnl2_get_capabilities, 121 seg_caps), 122 IDPF_RSS_CAPS = offsetof(struct virtchnl2_get_capabilities, 123 rss_caps), 124 IDPF_HSPLIT_CAPS = offsetof(struct virtchnl2_get_capabilities, 125 hsplit_caps), 126 IDPF_RSC_CAPS = offsetof(struct virtchnl2_get_capabilities, 127 rsc_caps), 128 IDPF_OTHER_CAPS = offsetof(struct virtchnl2_get_capabilities, 129 other_caps), 130 }; 131 132 /** 133 * enum idpf_vport_state - Current vport state 134 * @__IDPF_VPORT_DOWN: Vport is down 135 * @__IDPF_VPORT_UP: Vport is up 136 * @__IDPF_VPORT_STATE_LAST: Must be last, number of states 137 */ 138 enum idpf_vport_state { 139 __IDPF_VPORT_DOWN, 140 __IDPF_VPORT_UP, 141 __IDPF_VPORT_STATE_LAST, 142 }; 143 144 /** 145 * struct idpf_netdev_priv - Struct to store vport back pointer 146 * @adapter: Adapter back pointer 147 * @vport: Vport back pointer 148 * @vport_id: Vport identifier 149 * @link_speed_mbps: Link speed in mbps 150 * @vport_idx: Relative vport index 151 * @max_tx_hdr_size: Max header length hardware can support 152 * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather 153 * @state: See enum idpf_vport_state 154 * @netstats: Packet and byte stats 155 * @stats_lock: Lock to protect stats update 156 */ 157 struct idpf_netdev_priv { 158 struct idpf_adapter *adapter; 159 struct idpf_vport *vport; 160 u32 vport_id; 161 u32 link_speed_mbps; 162 u16 vport_idx; 163 u16 max_tx_hdr_size; 164 u16 tx_max_bufs; 165 enum idpf_vport_state state; 166 struct rtnl_link_stats64 netstats; 167 spinlock_t stats_lock; 168 }; 169 170 /** 171 * struct idpf_reset_reg - Reset register offsets/masks 172 * @rstat: Reset status register 173 * @rstat_m: Reset status mask 174 */ 175 struct idpf_reset_reg { 176 void __iomem *rstat; 177 u32 rstat_m; 178 }; 179 180 /** 181 * struct idpf_vport_max_q - Queue limits 182 * @max_rxq: Maximum number of RX queues supported 183 * @max_txq: Maixmum number of TX queues supported 184 * @max_bufq: In splitq, maximum number of buffer queues supported 185 * @max_complq: In splitq, maximum number of completion queues supported 186 */ 187 struct idpf_vport_max_q { 188 u16 max_rxq; 189 u16 max_txq; 190 u16 max_bufq; 191 u16 max_complq; 192 }; 193 194 /** 195 * struct idpf_reg_ops - Device specific register operation function pointers 196 * @ctlq_reg_init: Mailbox control queue register initialization 197 * @intr_reg_init: Traffic interrupt register initialization 198 * @mb_intr_reg_init: Mailbox interrupt register initialization 199 * @reset_reg_init: Reset register initialization 200 * @trigger_reset: Trigger a reset to occur 201 * @ptp_reg_init: PTP register initialization 202 */ 203 struct idpf_reg_ops { 204 void (*ctlq_reg_init)(struct idpf_adapter *adapter, 205 struct idpf_ctlq_create_info *cq); 206 int (*intr_reg_init)(struct idpf_vport *vport); 207 void (*mb_intr_reg_init)(struct idpf_adapter *adapter); 208 void (*reset_reg_init)(struct idpf_adapter *adapter); 209 void (*trigger_reset)(struct idpf_adapter *adapter, 210 enum idpf_flags trig_cause); 211 void (*ptp_reg_init)(const struct idpf_adapter *adapter); 212 }; 213 214 #define IDPF_MMIO_REG_NUM_STATIC 2 215 #define IDPF_PF_MBX_REGION_SZ 4096 216 #define IDPF_PF_RSTAT_REGION_SZ 2048 217 #define IDPF_VF_MBX_REGION_SZ 10240 218 #define IDPF_VF_RSTAT_REGION_SZ 2048 219 220 /** 221 * struct idpf_dev_ops - Device specific operations 222 * @reg_ops: Register operations 223 * @idc_init: IDC initialization 224 * @static_reg_info: array of mailbox and rstat register info 225 */ 226 struct idpf_dev_ops { 227 struct idpf_reg_ops reg_ops; 228 229 int (*idc_init)(struct idpf_adapter *adapter); 230 231 /* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */ 232 struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC]; 233 }; 234 235 /** 236 * enum idpf_vport_reset_cause - Vport soft reset causes 237 * @IDPF_SR_Q_CHANGE: Soft reset queue change 238 * @IDPF_SR_Q_DESC_CHANGE: Soft reset descriptor change 239 * @IDPF_SR_MTU_CHANGE: Soft reset MTU change 240 * @IDPF_SR_RSC_CHANGE: Soft reset RSC change 241 */ 242 enum idpf_vport_reset_cause { 243 IDPF_SR_Q_CHANGE, 244 IDPF_SR_Q_DESC_CHANGE, 245 IDPF_SR_MTU_CHANGE, 246 IDPF_SR_RSC_CHANGE, 247 }; 248 249 /** 250 * enum idpf_vport_flags - Vport flags 251 * @IDPF_VPORT_DEL_QUEUES: To send delete queues message 252 * @IDPF_VPORT_FLAGS_NBITS: Must be last 253 */ 254 enum idpf_vport_flags { 255 IDPF_VPORT_DEL_QUEUES, 256 IDPF_VPORT_FLAGS_NBITS, 257 }; 258 259 struct idpf_port_stats { 260 struct u64_stats_sync stats_sync; 261 u64_stats_t rx_hw_csum_err; 262 u64_stats_t rx_hsplit; 263 u64_stats_t rx_hsplit_hbo; 264 u64_stats_t rx_bad_descs; 265 u64_stats_t tx_linearize; 266 u64_stats_t tx_busy; 267 u64_stats_t tx_drops; 268 u64_stats_t tx_dma_map_errs; 269 struct virtchnl2_vport_stats vport_stats; 270 }; 271 272 struct idpf_fsteer_fltr { 273 struct list_head list; 274 u32 loc; 275 u32 q_index; 276 }; 277 278 /** 279 * struct idpf_vport - Handle for netdevices and queue resources 280 * @num_txq: Number of allocated TX queues 281 * @num_complq: Number of allocated completion queues 282 * @txq_desc_count: TX queue descriptor count 283 * @complq_desc_count: Completion queue descriptor count 284 * @compln_clean_budget: Work budget for completion clean 285 * @num_txq_grp: Number of TX queue groups 286 * @txq_grps: Array of TX queue groups 287 * @txq_model: Split queue or single queue queuing model 288 * @txqs: Used only in hotpath to get to the right queue very fast 289 * @crc_enable: Enable CRC insertion offload 290 * @xdpsq_share: whether XDPSQ sharing is enabled 291 * @num_xdp_txq: number of XDPSQs 292 * @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs) 293 * @xdp_prog: installed XDP program 294 * @num_rxq: Number of allocated RX queues 295 * @num_bufq: Number of allocated buffer queues 296 * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors 297 * to complete all buffer descriptors for all buffer queues in 298 * the worst case. 299 * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping 300 * @bufq_desc_count: Buffer queue descriptor count 301 * @num_rxq_grp: Number of RX queues in a group 302 * @rxq_grps: Total number of RX groups. Number of groups * number of RX per 303 * group will yield total number of RX queues. 304 * @rxq_model: Splitq queue or single queue queuing model 305 * @rx_ptype_lkup: Lookup table for ptypes on RX 306 * @vdev_info: IDC vport device info pointer 307 * @adapter: back pointer to associated adapter 308 * @netdev: Associated net_device. Each vport should have one and only one 309 * associated netdev. 310 * @flags: See enum idpf_vport_flags 311 * @vport_type: Default SRIOV, SIOV, etc. 312 * @vport_id: Device given vport identifier 313 * @idx: Software index in adapter vports struct 314 * @default_vport: Use this vport if one isn't specified 315 * @base_rxd: True if the driver should use base descriptors instead of flex 316 * @num_q_vectors: Number of IRQ vectors allocated 317 * @q_vectors: Array of queue vectors 318 * @q_vector_idxs: Starting index of queue vectors 319 * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues 320 * @noirq_dyn_ctl_ena: value to write to the above to enable it 321 * @noirq_v_idx: ID of the NOIRQ vector 322 * @max_mtu: device given max possible MTU 323 * @default_mac_addr: device will give a default MAC to use 324 * @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation 325 * @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation 326 * @port_stats: per port csum, header split, and other offload stats 327 * @link_up: True if link is up 328 * @tx_tstamp_caps: Capabilities negotiated for Tx timestamping 329 * @tstamp_config: The Tx tstamp config 330 * @tstamp_task: Tx timestamping task 331 */ 332 struct idpf_vport { 333 u16 num_txq; 334 u16 num_complq; 335 u32 txq_desc_count; 336 u32 complq_desc_count; 337 u32 compln_clean_budget; 338 u16 num_txq_grp; 339 struct idpf_txq_group *txq_grps; 340 u32 txq_model; 341 struct idpf_tx_queue **txqs; 342 bool crc_enable; 343 344 bool xdpsq_share; 345 u16 num_xdp_txq; 346 u16 xdp_txq_offset; 347 struct bpf_prog *xdp_prog; 348 349 u16 num_rxq; 350 u16 num_bufq; 351 u32 rxq_desc_count; 352 u8 num_bufqs_per_qgrp; 353 u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP]; 354 u16 num_rxq_grp; 355 struct idpf_rxq_group *rxq_grps; 356 u32 rxq_model; 357 struct libeth_rx_pt *rx_ptype_lkup; 358 359 struct iidc_rdma_vport_dev_info *vdev_info; 360 361 struct idpf_adapter *adapter; 362 struct net_device *netdev; 363 DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS); 364 u16 vport_type; 365 u32 vport_id; 366 u16 idx; 367 bool default_vport; 368 bool base_rxd; 369 370 u16 num_q_vectors; 371 struct idpf_q_vector *q_vectors; 372 u16 *q_vector_idxs; 373 374 void __iomem *noirq_dyn_ctl; 375 u32 noirq_dyn_ctl_ena; 376 u16 noirq_v_idx; 377 378 u16 max_mtu; 379 u8 default_mac_addr[ETH_ALEN]; 380 u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS]; 381 u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS]; 382 struct idpf_port_stats port_stats; 383 384 bool link_up; 385 386 struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; 387 struct kernel_hwtstamp_config tstamp_config; 388 struct work_struct tstamp_task; 389 }; 390 391 /** 392 * enum idpf_user_flags 393 * @__IDPF_USER_FLAG_HSPLIT: header split state 394 * @__IDPF_PROMISC_UC: Unicast promiscuous mode 395 * @__IDPF_PROMISC_MC: Multicast promiscuous mode 396 * @__IDPF_USER_FLAGS_NBITS: Must be last 397 */ 398 enum idpf_user_flags { 399 __IDPF_USER_FLAG_HSPLIT = 0U, 400 __IDPF_PROMISC_UC = 32, 401 __IDPF_PROMISC_MC, 402 403 __IDPF_USER_FLAGS_NBITS, 404 }; 405 406 /** 407 * struct idpf_rss_data - Associated RSS data 408 * @rss_key_size: Size of RSS hash key 409 * @rss_key: RSS hash key 410 * @rss_lut_size: Size of RSS lookup table 411 * @rss_lut: RSS lookup table 412 * @cached_lut: Used to restore previously init RSS lut 413 */ 414 struct idpf_rss_data { 415 u16 rss_key_size; 416 u8 *rss_key; 417 u16 rss_lut_size; 418 u32 *rss_lut; 419 u32 *cached_lut; 420 }; 421 422 /** 423 * struct idpf_q_coalesce - User defined coalescing configuration values for 424 * a single queue. 425 * @tx_intr_mode: Dynamic TX ITR or not 426 * @rx_intr_mode: Dynamic RX ITR or not 427 * @tx_coalesce_usecs: TX interrupt throttling rate 428 * @rx_coalesce_usecs: RX interrupt throttling rate 429 * 430 * Used to restore user coalescing configuration after a reset. 431 */ 432 struct idpf_q_coalesce { 433 u32 tx_intr_mode; 434 u32 rx_intr_mode; 435 u32 tx_coalesce_usecs; 436 u32 rx_coalesce_usecs; 437 }; 438 439 /** 440 * struct idpf_vport_user_config_data - User defined configuration values for 441 * each vport. 442 * @rss_data: See struct idpf_rss_data 443 * @q_coalesce: Array of per queue coalescing data 444 * @num_req_tx_qs: Number of user requested TX queues through ethtool 445 * @num_req_rx_qs: Number of user requested RX queues through ethtool 446 * @num_req_txq_desc: Number of user requested TX queue descriptors through 447 * ethtool 448 * @num_req_rxq_desc: Number of user requested RX queue descriptors through 449 * ethtool 450 * @xdp_prog: requested XDP program to install 451 * @user_flags: User toggled config flags 452 * @mac_filter_list: List of MAC filters 453 * @num_fsteer_fltrs: number of flow steering filters 454 * @flow_steer_list: list of flow steering filters 455 * 456 * Used to restore configuration after a reset as the vport will get wiped. 457 */ 458 struct idpf_vport_user_config_data { 459 struct idpf_rss_data rss_data; 460 struct idpf_q_coalesce *q_coalesce; 461 u16 num_req_tx_qs; 462 u16 num_req_rx_qs; 463 u32 num_req_txq_desc; 464 u32 num_req_rxq_desc; 465 struct bpf_prog *xdp_prog; 466 DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS); 467 struct list_head mac_filter_list; 468 u32 num_fsteer_fltrs; 469 struct list_head flow_steer_list; 470 }; 471 472 /** 473 * enum idpf_vport_config_flags - Vport config flags 474 * @IDPF_VPORT_REG_NETDEV: Register netdev 475 * @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset 476 * @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last 477 */ 478 enum idpf_vport_config_flags { 479 IDPF_VPORT_REG_NETDEV, 480 IDPF_VPORT_UP_REQUESTED, 481 IDPF_VPORT_CONFIG_FLAGS_NBITS, 482 }; 483 484 /** 485 * struct idpf_avail_queue_info 486 * @avail_rxq: Available RX queues 487 * @avail_txq: Available TX queues 488 * @avail_bufq: Available buffer queues 489 * @avail_complq: Available completion queues 490 * 491 * Maintain total queues available after allocating max queues to each vport. 492 */ 493 struct idpf_avail_queue_info { 494 u16 avail_rxq; 495 u16 avail_txq; 496 u16 avail_bufq; 497 u16 avail_complq; 498 }; 499 500 /** 501 * struct idpf_vector_info - Utility structure to pass function arguments as a 502 * structure 503 * @num_req_vecs: Vectors required based on the number of queues updated by the 504 * user via ethtool 505 * @num_curr_vecs: Current number of vectors, must be >= @num_req_vecs 506 * @index: Relative starting index for vectors 507 * @default_vport: Vectors are for default vport 508 */ 509 struct idpf_vector_info { 510 u16 num_req_vecs; 511 u16 num_curr_vecs; 512 u16 index; 513 bool default_vport; 514 }; 515 516 /** 517 * struct idpf_vector_lifo - Stack to maintain vector indexes used for vector 518 * distribution algorithm 519 * @top: Points to stack top i.e. next available vector index 520 * @base: Always points to start of the free pool 521 * @size: Total size of the vector stack 522 * @vec_idx: Array to store all the vector indexes 523 * 524 * Vector stack maintains all the relative vector indexes at the *adapter* 525 * level. This stack is divided into 2 parts, first one is called as 'default 526 * pool' and other one is called 'free pool'. Vector distribution algorithm 527 * gives priority to default vports in a way that at least IDPF_MIN_Q_VEC 528 * vectors are allocated per default vport and the relative vector indexes for 529 * those are maintained in default pool. Free pool contains all the unallocated 530 * vector indexes which can be allocated on-demand basis. Mailbox vector index 531 * is maintained in the default pool of the stack. 532 */ 533 struct idpf_vector_lifo { 534 u16 top; 535 u16 base; 536 u16 size; 537 u16 *vec_idx; 538 }; 539 540 /** 541 * struct idpf_vport_config - Vport configuration data 542 * @user_config: see struct idpf_vport_user_config_data 543 * @max_q: Maximum possible queues 544 * @req_qs_chunks: Queue chunk data for requested queues 545 * @mac_filter_list_lock: Lock to protect mac filters 546 * @flags: See enum idpf_vport_config_flags 547 */ 548 struct idpf_vport_config { 549 struct idpf_vport_user_config_data user_config; 550 struct idpf_vport_max_q max_q; 551 struct virtchnl2_add_queues *req_qs_chunks; 552 spinlock_t mac_filter_list_lock; 553 DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS); 554 }; 555 556 struct idpf_vc_xn_manager; 557 558 #define idpf_for_each_vport(adapter, iter) \ 559 for (struct idpf_vport **__##iter = &(adapter)->vports[0], \ 560 *iter = (adapter)->max_vports ? *__##iter : NULL; \ 561 iter; \ 562 iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \ 563 *__##iter : NULL) 564 565 /** 566 * struct idpf_adapter - Device data struct generated on probe 567 * @pdev: PCI device struct given on probe 568 * @virt_ver_maj: Virtchnl version major 569 * @virt_ver_min: Virtchnl version minor 570 * @msg_enable: Debug message level enabled 571 * @mb_wait_count: Number of times mailbox was attempted initialization 572 * @state: Init state machine 573 * @flags: See enum idpf_flags 574 * @reset_reg: See struct idpf_reset_reg 575 * @hw: Device access data 576 * @num_avail_msix: Available number of MSIX vectors 577 * @num_msix_entries: Number of entries in MSIX table 578 * @msix_entries: MSIX table 579 * @num_rdma_msix_entries: Available number of MSIX vectors for RDMA 580 * @rdma_msix_entries: RDMA MSIX table 581 * @req_vec_chunks: Requested vector chunk data 582 * @mb_vector: Mailbox vector data 583 * @vector_stack: Stack to store the msix vector indexes 584 * @irq_mb_handler: Handler for hard interrupt for mailbox 585 * @tx_timeout_count: Number of TX timeouts that have occurred 586 * @avail_queues: Device given queue limits 587 * @vports: Array to store vports created by the driver 588 * @netdevs: Associated Vport netdevs 589 * @vport_params_reqd: Vport params requested 590 * @vport_params_recvd: Vport params received 591 * @vport_ids: Array of device given vport identifiers 592 * @vport_config: Vport config parameters 593 * @max_vports: Maximum vports that can be allocated 594 * @num_alloc_vports: Current number of vports allocated 595 * @next_vport: Next free slot in pf->vport[] - 0-based! 596 * @init_task: Initialization task 597 * @init_wq: Workqueue for initialization task 598 * @serv_task: Periodically recurring maintenance task 599 * @serv_wq: Workqueue for service task 600 * @mbx_task: Task to handle mailbox interrupts 601 * @mbx_wq: Workqueue for mailbox responses 602 * @vc_event_task: Task to handle out of band virtchnl event notifications 603 * @vc_event_wq: Workqueue for virtchnl events 604 * @stats_task: Periodic statistics retrieval task 605 * @stats_wq: Workqueue for statistics task 606 * @caps: Negotiated capabilities with device 607 * @vcxn_mngr: Virtchnl transaction manager 608 * @dev_ops: See idpf_dev_ops 609 * @cdev_info: IDC core device info pointer 610 * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk 611 * to VFs but is used to initialize them 612 * @crc_enable: Enable CRC insertion offload 613 * @req_tx_splitq: TX split or single queue model to request 614 * @req_rx_splitq: RX split or single queue model to request 615 * @vport_ctrl_lock: Lock to protect the vport control flow 616 * @vector_lock: Lock to protect vector distribution 617 * @queue_lock: Lock to protect queue distribution 618 * @vc_buf_lock: Lock to protect virtchnl buffer 619 * @ptp: Storage for PTP-related data 620 */ 621 struct idpf_adapter { 622 struct pci_dev *pdev; 623 u32 virt_ver_maj; 624 u32 virt_ver_min; 625 626 u32 msg_enable; 627 u32 mb_wait_count; 628 enum idpf_state state; 629 DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS); 630 struct idpf_reset_reg reset_reg; 631 struct idpf_hw hw; 632 u16 num_avail_msix; 633 u16 num_msix_entries; 634 struct msix_entry *msix_entries; 635 u16 num_rdma_msix_entries; 636 struct msix_entry *rdma_msix_entries; 637 struct virtchnl2_alloc_vectors *req_vec_chunks; 638 struct idpf_q_vector mb_vector; 639 struct idpf_vector_lifo vector_stack; 640 irqreturn_t (*irq_mb_handler)(int irq, void *data); 641 642 u32 tx_timeout_count; 643 struct idpf_avail_queue_info avail_queues; 644 struct idpf_vport **vports; 645 struct net_device **netdevs; 646 struct virtchnl2_create_vport **vport_params_reqd; 647 struct virtchnl2_create_vport **vport_params_recvd; 648 u32 *vport_ids; 649 650 struct idpf_vport_config **vport_config; 651 u16 max_vports; 652 u16 num_alloc_vports; 653 u16 next_vport; 654 655 struct delayed_work init_task; 656 struct workqueue_struct *init_wq; 657 struct delayed_work serv_task; 658 struct workqueue_struct *serv_wq; 659 struct delayed_work mbx_task; 660 struct workqueue_struct *mbx_wq; 661 struct delayed_work vc_event_task; 662 struct workqueue_struct *vc_event_wq; 663 struct delayed_work stats_task; 664 struct workqueue_struct *stats_wq; 665 struct virtchnl2_get_capabilities caps; 666 struct idpf_vc_xn_manager *vcxn_mngr; 667 668 struct idpf_dev_ops dev_ops; 669 struct iidc_rdma_core_dev_info *cdev_info; 670 int num_vfs; 671 bool crc_enable; 672 bool req_tx_splitq; 673 bool req_rx_splitq; 674 675 struct mutex vport_ctrl_lock; 676 struct mutex vector_lock; 677 struct mutex queue_lock; 678 struct mutex vc_buf_lock; 679 680 struct idpf_ptp *ptp; 681 }; 682 683 /** 684 * idpf_is_queue_model_split - check if queue model is split 685 * @q_model: queue model single or split 686 * 687 * Returns true if queue model is split else false 688 */ 689 static inline int idpf_is_queue_model_split(u16 q_model) 690 { 691 return !IS_ENABLED(CONFIG_IDPF_SINGLEQ) || 692 q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; 693 } 694 695 static inline bool idpf_xdp_enabled(const struct idpf_vport *vport) 696 { 697 return vport->adapter && vport->xdp_prog; 698 } 699 700 #define idpf_is_cap_ena(adapter, field, flag) \ 701 idpf_is_capability_ena(adapter, false, field, flag) 702 #define idpf_is_cap_ena_all(adapter, field, flag) \ 703 idpf_is_capability_ena(adapter, true, field, flag) 704 705 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, 706 enum idpf_cap_field field, u64 flag); 707 708 /** 709 * idpf_is_rdma_cap_ena - Determine if RDMA is supported 710 * @adapter: private data struct 711 * 712 * Return: true if RDMA capability is enabled, false otherwise 713 */ 714 static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter) 715 { 716 return idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_RDMA); 717 } 718 719 #define IDPF_CAP_RSS (\ 720 VIRTCHNL2_FLOW_IPV4_TCP |\ 721 VIRTCHNL2_FLOW_IPV4_TCP |\ 722 VIRTCHNL2_FLOW_IPV4_UDP |\ 723 VIRTCHNL2_FLOW_IPV4_SCTP |\ 724 VIRTCHNL2_FLOW_IPV4_OTHER |\ 725 VIRTCHNL2_FLOW_IPV6_TCP |\ 726 VIRTCHNL2_FLOW_IPV6_TCP |\ 727 VIRTCHNL2_FLOW_IPV6_UDP |\ 728 VIRTCHNL2_FLOW_IPV6_SCTP |\ 729 VIRTCHNL2_FLOW_IPV6_OTHER) 730 731 #define IDPF_CAP_RSC (\ 732 VIRTCHNL2_CAP_RSC_IPV4_TCP |\ 733 VIRTCHNL2_CAP_RSC_IPV6_TCP) 734 735 #define IDPF_CAP_HSPLIT (\ 736 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\ 737 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6) 738 739 #define IDPF_CAP_TX_CSUM_L4V4 (\ 740 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |\ 741 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP) 742 743 #define IDPF_CAP_TX_CSUM_L4V6 (\ 744 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |\ 745 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP) 746 747 #define IDPF_CAP_RX_CSUM (\ 748 VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\ 749 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\ 750 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP |\ 751 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ 752 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) 753 754 #define IDPF_CAP_TX_SCTP_CSUM (\ 755 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\ 756 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP) 757 758 #define IDPF_CAP_TUNNEL_TX_CSUM (\ 759 VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\ 760 VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL) 761 762 /** 763 * idpf_get_reserved_vecs - Get reserved vectors 764 * @adapter: private data struct 765 */ 766 static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter) 767 { 768 return le16_to_cpu(adapter->caps.num_allocated_vectors); 769 } 770 771 /** 772 * idpf_get_reserved_rdma_vecs - Get reserved RDMA vectors 773 * @adapter: private data struct 774 * 775 * Return: number of vectors reserved for RDMA 776 */ 777 static inline u16 idpf_get_reserved_rdma_vecs(struct idpf_adapter *adapter) 778 { 779 return le16_to_cpu(adapter->caps.num_rdma_allocated_vectors); 780 } 781 782 /** 783 * idpf_get_default_vports - Get default number of vports 784 * @adapter: private data struct 785 */ 786 static inline u16 idpf_get_default_vports(struct idpf_adapter *adapter) 787 { 788 return le16_to_cpu(adapter->caps.default_num_vports); 789 } 790 791 /** 792 * idpf_get_max_vports - Get max number of vports 793 * @adapter: private data struct 794 */ 795 static inline u16 idpf_get_max_vports(struct idpf_adapter *adapter) 796 { 797 return le16_to_cpu(adapter->caps.max_vports); 798 } 799 800 /** 801 * idpf_get_max_tx_bufs - Get max scatter-gather buffers supported by the device 802 * @adapter: private data struct 803 */ 804 static inline unsigned int idpf_get_max_tx_bufs(struct idpf_adapter *adapter) 805 { 806 return adapter->caps.max_sg_bufs_per_tx_pkt; 807 } 808 809 /** 810 * idpf_get_min_tx_pkt_len - Get min packet length supported by the device 811 * @adapter: private data struct 812 */ 813 static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter) 814 { 815 u8 pkt_len = adapter->caps.min_sso_packet_len; 816 817 return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN; 818 } 819 820 /** 821 * idpf_get_mbx_reg_addr - Get BAR0 mailbox register address 822 * @adapter: private data struct 823 * @reg_offset: register offset value 824 * 825 * Return: BAR0 mailbox register address based on register offset. 826 */ 827 static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter, 828 resource_size_t reg_offset) 829 { 830 return adapter->hw.mbx.vaddr + reg_offset; 831 } 832 833 /** 834 * idpf_get_rstat_reg_addr - Get BAR0 rstat register address 835 * @adapter: private data struct 836 * @reg_offset: register offset value 837 * 838 * Return: BAR0 rstat register address based on register offset. 839 */ 840 static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter, 841 resource_size_t reg_offset) 842 { 843 reg_offset -= adapter->dev_ops.static_reg_info[1].start; 844 845 return adapter->hw.rstat.vaddr + reg_offset; 846 } 847 848 /** 849 * idpf_get_reg_addr - Get BAR0 register address 850 * @adapter: private data struct 851 * @reg_offset: register offset value 852 * 853 * Based on the register offset, return the actual BAR0 register address 854 */ 855 static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter, 856 resource_size_t reg_offset) 857 { 858 struct idpf_hw *hw = &adapter->hw; 859 860 for (int i = 0; i < hw->num_lan_regs; i++) { 861 struct idpf_mmio_reg *region = &hw->lan_regs[i]; 862 863 if (reg_offset >= region->addr_start && 864 reg_offset < (region->addr_start + region->addr_len)) { 865 /* Convert the offset so that it is relative to the 866 * start of the region. Then add the base address of 867 * the region to get the final address. 868 */ 869 reg_offset -= region->addr_start; 870 871 return region->vaddr + reg_offset; 872 } 873 } 874 875 /* It's impossible to hit this case with offsets from the CP. But if we 876 * do for any other reason, the kernel will panic on that register 877 * access. Might as well do it here to make it clear what's happening. 878 */ 879 BUG(); 880 881 return NULL; 882 } 883 884 /** 885 * idpf_is_reset_detected - check if we were reset at some point 886 * @adapter: driver specific private structure 887 * 888 * Returns true if we are either in reset currently or were previously reset. 889 */ 890 static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter) 891 { 892 if (!adapter->hw.arq) 893 return true; 894 895 return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) & 896 adapter->hw.arq->reg.len_mask); 897 } 898 899 /** 900 * idpf_is_reset_in_prog - check if reset is in progress 901 * @adapter: driver specific private structure 902 * 903 * Returns true if hard reset is in progress, false otherwise 904 */ 905 static inline bool idpf_is_reset_in_prog(struct idpf_adapter *adapter) 906 { 907 return (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags) || 908 test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || 909 test_bit(IDPF_HR_DRV_LOAD, adapter->flags)); 910 } 911 912 /** 913 * idpf_netdev_to_vport - get a vport handle from a netdev 914 * @netdev: network interface device structure 915 */ 916 static inline struct idpf_vport *idpf_netdev_to_vport(struct net_device *netdev) 917 { 918 struct idpf_netdev_priv *np = netdev_priv(netdev); 919 920 return np->vport; 921 } 922 923 /** 924 * idpf_netdev_to_adapter - Get adapter handle from a netdev 925 * @netdev: Network interface device structure 926 */ 927 static inline struct idpf_adapter *idpf_netdev_to_adapter(struct net_device *netdev) 928 { 929 struct idpf_netdev_priv *np = netdev_priv(netdev); 930 931 return np->adapter; 932 } 933 934 /** 935 * idpf_is_feature_ena - Determine if a particular feature is enabled 936 * @vport: Vport to check 937 * @feature: Netdev flag to check 938 * 939 * Returns true or false if a particular feature is enabled. 940 */ 941 static inline bool idpf_is_feature_ena(const struct idpf_vport *vport, 942 netdev_features_t feature) 943 { 944 return vport->netdev->features & feature; 945 } 946 947 /** 948 * idpf_get_max_tx_hdr_size -- get the size of tx header 949 * @adapter: Driver specific private structure 950 */ 951 static inline u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter) 952 { 953 return le16_to_cpu(adapter->caps.max_tx_hdr_size); 954 } 955 956 /** 957 * idpf_vport_ctrl_lock - Acquire the vport control lock 958 * @netdev: Network interface device structure 959 * 960 * This lock should be used by non-datapath code to protect against vport 961 * destruction. 962 */ 963 static inline void idpf_vport_ctrl_lock(struct net_device *netdev) 964 { 965 struct idpf_netdev_priv *np = netdev_priv(netdev); 966 967 mutex_lock(&np->adapter->vport_ctrl_lock); 968 } 969 970 /** 971 * idpf_vport_ctrl_unlock - Release the vport control lock 972 * @netdev: Network interface device structure 973 */ 974 static inline void idpf_vport_ctrl_unlock(struct net_device *netdev) 975 { 976 struct idpf_netdev_priv *np = netdev_priv(netdev); 977 978 mutex_unlock(&np->adapter->vport_ctrl_lock); 979 } 980 981 void idpf_statistics_task(struct work_struct *work); 982 void idpf_init_task(struct work_struct *work); 983 void idpf_service_task(struct work_struct *work); 984 void idpf_mbx_task(struct work_struct *work); 985 void idpf_vc_event_task(struct work_struct *work); 986 void idpf_dev_ops_init(struct idpf_adapter *adapter); 987 void idpf_vf_dev_ops_init(struct idpf_adapter *adapter); 988 int idpf_intr_req(struct idpf_adapter *adapter); 989 void idpf_intr_rel(struct idpf_adapter *adapter); 990 u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter); 991 int idpf_initiate_soft_reset(struct idpf_vport *vport, 992 enum idpf_vport_reset_cause reset_cause); 993 void idpf_deinit_task(struct idpf_adapter *adapter); 994 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter, 995 u16 *q_vector_idxs, 996 struct idpf_vector_info *vec_info); 997 void idpf_set_ethtool_ops(struct net_device *netdev); 998 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, 999 u16 itr, bool tx); 1000 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs); 1001 1002 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport); 1003 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val); 1004 int idpf_idc_init(struct idpf_adapter *adapter); 1005 int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter, 1006 enum iidc_function_type ftype); 1007 void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info); 1008 void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info); 1009 void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info); 1010 void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info, 1011 enum iidc_rdma_event_type event_type); 1012 1013 int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter, 1014 struct virtchnl2_flow_rule_add_del *rule, 1015 enum virtchnl2_op opcode); 1016 #endif /* !_IDPF_H_ */ 1017