1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #ifndef _IDPF_H_ 5 #define _IDPF_H_ 6 7 /* Forward declaration */ 8 struct idpf_adapter; 9 struct idpf_vport; 10 struct idpf_vport_max_q; 11 12 #include <net/pkt_sched.h> 13 #include <linux/aer.h> 14 #include <linux/etherdevice.h> 15 #include <linux/pci.h> 16 #include <linux/bitfield.h> 17 #include <linux/sctp.h> 18 #include <linux/ethtool_netlink.h> 19 #include <net/gro.h> 20 #include <linux/dim.h> 21 22 #include "virtchnl2.h" 23 #include "idpf_lan_txrx.h" 24 #include "idpf_txrx.h" 25 #include "idpf_controlq.h" 26 27 #define GETMAXVAL(num_bits) GENMASK((num_bits) - 1, 0) 28 29 #define IDPF_NO_FREE_SLOT 0xffff 30 31 /* Default Mailbox settings */ 32 #define IDPF_NUM_FILTERS_PER_MSG 20 33 #define IDPF_NUM_DFLT_MBX_Q 2 /* includes both TX and RX */ 34 #define IDPF_DFLT_MBX_Q_LEN 64 35 #define IDPF_DFLT_MBX_ID -1 36 /* maximum number of times to try before resetting mailbox */ 37 #define IDPF_MB_MAX_ERR 20 38 #define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \ 39 ((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz)) 40 41 #define IDPF_MAX_WAIT 500 42 43 /* available message levels */ 44 #define IDPF_AVAIL_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 45 46 #define IDPF_DIM_PROFILE_SLOTS 5 47 48 #define IDPF_VIRTCHNL_VERSION_MAJOR VIRTCHNL2_VERSION_MAJOR_2 49 #define IDPF_VIRTCHNL_VERSION_MINOR VIRTCHNL2_VERSION_MINOR_0 50 51 /** 52 * struct idpf_mac_filter 53 * @list: list member field 54 * @macaddr: MAC address 55 * @remove: filter should be removed (virtchnl) 56 * @add: filter should be added (virtchnl) 57 */ 58 struct idpf_mac_filter { 59 struct list_head list; 60 u8 macaddr[ETH_ALEN]; 61 bool remove; 62 bool add; 63 }; 64 65 /** 66 * enum idpf_state - State machine to handle bring up 67 * @__IDPF_VER_CHECK: Negotiate virtchnl version 68 * @__IDPF_GET_CAPS: Negotiate capabilities 69 * @__IDPF_INIT_SW: Init based on given capabilities 70 * @__IDPF_STATE_LAST: Must be last, used to determine size 71 */ 72 enum idpf_state { 73 __IDPF_VER_CHECK, 74 __IDPF_GET_CAPS, 75 __IDPF_INIT_SW, 76 __IDPF_STATE_LAST, 77 }; 78 79 /** 80 * enum idpf_flags - Hard reset causes. 81 * @IDPF_HR_FUNC_RESET: Hard reset when TxRx timeout 82 * @IDPF_HR_DRV_LOAD: Set on driver load for a clean HW 83 * @IDPF_HR_RESET_IN_PROG: Reset in progress 84 * @IDPF_REMOVE_IN_PROG: Driver remove in progress 85 * @IDPF_MB_INTR_MODE: Mailbox in interrupt mode 86 * @IDPF_VC_CORE_INIT: virtchnl core has been init 87 * @IDPF_FLAGS_NBITS: Must be last 88 */ 89 enum idpf_flags { 90 IDPF_HR_FUNC_RESET, 91 IDPF_HR_DRV_LOAD, 92 IDPF_HR_RESET_IN_PROG, 93 IDPF_REMOVE_IN_PROG, 94 IDPF_MB_INTR_MODE, 95 IDPF_VC_CORE_INIT, 96 IDPF_FLAGS_NBITS, 97 }; 98 99 /** 100 * enum idpf_cap_field - Offsets into capabilities struct for specific caps 101 * @IDPF_BASE_CAPS: generic base capabilities 102 * @IDPF_CSUM_CAPS: checksum offload capabilities 103 * @IDPF_SEG_CAPS: segmentation offload capabilities 104 * @IDPF_RSS_CAPS: RSS offload capabilities 105 * @IDPF_HSPLIT_CAPS: Header split capabilities 106 * @IDPF_RSC_CAPS: RSC offload capabilities 107 * @IDPF_OTHER_CAPS: miscellaneous offloads 108 * 109 * Used when checking for a specific capability flag since different capability 110 * sets are not mutually exclusive numerically, the caller must specify which 111 * type of capability they are checking for. 112 */ 113 enum idpf_cap_field { 114 IDPF_BASE_CAPS = -1, 115 IDPF_CSUM_CAPS = offsetof(struct virtchnl2_get_capabilities, 116 csum_caps), 117 IDPF_SEG_CAPS = offsetof(struct virtchnl2_get_capabilities, 118 seg_caps), 119 IDPF_RSS_CAPS = offsetof(struct virtchnl2_get_capabilities, 120 rss_caps), 121 IDPF_HSPLIT_CAPS = offsetof(struct virtchnl2_get_capabilities, 122 hsplit_caps), 123 IDPF_RSC_CAPS = offsetof(struct virtchnl2_get_capabilities, 124 rsc_caps), 125 IDPF_OTHER_CAPS = offsetof(struct virtchnl2_get_capabilities, 126 other_caps), 127 }; 128 129 /** 130 * enum idpf_vport_state - Current vport state 131 * @__IDPF_VPORT_DOWN: Vport is down 132 * @__IDPF_VPORT_UP: Vport is up 133 * @__IDPF_VPORT_STATE_LAST: Must be last, number of states 134 */ 135 enum idpf_vport_state { 136 __IDPF_VPORT_DOWN, 137 __IDPF_VPORT_UP, 138 __IDPF_VPORT_STATE_LAST, 139 }; 140 141 /** 142 * struct idpf_netdev_priv - Struct to store vport back pointer 143 * @adapter: Adapter back pointer 144 * @vport: Vport back pointer 145 * @vport_id: Vport identifier 146 * @vport_idx: Relative vport index 147 * @state: See enum idpf_vport_state 148 * @netstats: Packet and byte stats 149 * @stats_lock: Lock to protect stats update 150 */ 151 struct idpf_netdev_priv { 152 struct idpf_adapter *adapter; 153 struct idpf_vport *vport; 154 u32 vport_id; 155 u16 vport_idx; 156 enum idpf_vport_state state; 157 struct rtnl_link_stats64 netstats; 158 spinlock_t stats_lock; 159 }; 160 161 /** 162 * struct idpf_reset_reg - Reset register offsets/masks 163 * @rstat: Reset status register 164 * @rstat_m: Reset status mask 165 */ 166 struct idpf_reset_reg { 167 void __iomem *rstat; 168 u32 rstat_m; 169 }; 170 171 /** 172 * struct idpf_vport_max_q - Queue limits 173 * @max_rxq: Maximum number of RX queues supported 174 * @max_txq: Maixmum number of TX queues supported 175 * @max_bufq: In splitq, maximum number of buffer queues supported 176 * @max_complq: In splitq, maximum number of completion queues supported 177 */ 178 struct idpf_vport_max_q { 179 u16 max_rxq; 180 u16 max_txq; 181 u16 max_bufq; 182 u16 max_complq; 183 }; 184 185 /** 186 * struct idpf_reg_ops - Device specific register operation function pointers 187 * @ctlq_reg_init: Mailbox control queue register initialization 188 * @intr_reg_init: Traffic interrupt register initialization 189 * @mb_intr_reg_init: Mailbox interrupt register initialization 190 * @reset_reg_init: Reset register initialization 191 * @trigger_reset: Trigger a reset to occur 192 */ 193 struct idpf_reg_ops { 194 void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq); 195 int (*intr_reg_init)(struct idpf_vport *vport); 196 void (*mb_intr_reg_init)(struct idpf_adapter *adapter); 197 void (*reset_reg_init)(struct idpf_adapter *adapter); 198 void (*trigger_reset)(struct idpf_adapter *adapter, 199 enum idpf_flags trig_cause); 200 }; 201 202 /** 203 * struct idpf_dev_ops - Device specific operations 204 * @reg_ops: Register operations 205 */ 206 struct idpf_dev_ops { 207 struct idpf_reg_ops reg_ops; 208 }; 209 210 /** 211 * enum idpf_vport_reset_cause - Vport soft reset causes 212 * @IDPF_SR_Q_CHANGE: Soft reset queue change 213 * @IDPF_SR_Q_DESC_CHANGE: Soft reset descriptor change 214 * @IDPF_SR_MTU_CHANGE: Soft reset MTU change 215 * @IDPF_SR_RSC_CHANGE: Soft reset RSC change 216 */ 217 enum idpf_vport_reset_cause { 218 IDPF_SR_Q_CHANGE, 219 IDPF_SR_Q_DESC_CHANGE, 220 IDPF_SR_MTU_CHANGE, 221 IDPF_SR_RSC_CHANGE, 222 }; 223 224 /** 225 * enum idpf_vport_flags - Vport flags 226 * @IDPF_VPORT_DEL_QUEUES: To send delete queues message 227 * @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets 228 * processing is done 229 * @IDPF_VPORT_FLAGS_NBITS: Must be last 230 */ 231 enum idpf_vport_flags { 232 IDPF_VPORT_DEL_QUEUES, 233 IDPF_VPORT_SW_MARKER, 234 IDPF_VPORT_FLAGS_NBITS, 235 }; 236 237 struct idpf_port_stats { 238 struct u64_stats_sync stats_sync; 239 u64_stats_t rx_hw_csum_err; 240 u64_stats_t rx_hsplit; 241 u64_stats_t rx_hsplit_hbo; 242 u64_stats_t rx_bad_descs; 243 u64_stats_t tx_linearize; 244 u64_stats_t tx_busy; 245 u64_stats_t tx_drops; 246 u64_stats_t tx_dma_map_errs; 247 struct virtchnl2_vport_stats vport_stats; 248 }; 249 250 /** 251 * struct idpf_vport - Handle for netdevices and queue resources 252 * @num_txq: Number of allocated TX queues 253 * @num_complq: Number of allocated completion queues 254 * @txq_desc_count: TX queue descriptor count 255 * @complq_desc_count: Completion queue descriptor count 256 * @compln_clean_budget: Work budget for completion clean 257 * @num_txq_grp: Number of TX queue groups 258 * @txq_grps: Array of TX queue groups 259 * @txq_model: Split queue or single queue queuing model 260 * @txqs: Used only in hotpath to get to the right queue very fast 261 * @crc_enable: Enable CRC insertion offload 262 * @num_rxq: Number of allocated RX queues 263 * @num_bufq: Number of allocated buffer queues 264 * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors 265 * to complete all buffer descriptors for all buffer queues in 266 * the worst case. 267 * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping 268 * @bufq_desc_count: Buffer queue descriptor count 269 * @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc) 270 * @num_rxq_grp: Number of RX queues in a group 271 * @rxq_grps: Total number of RX groups. Number of groups * number of RX per 272 * group will yield total number of RX queues. 273 * @rxq_model: Splitq queue or single queue queuing model 274 * @rx_ptype_lkup: Lookup table for ptypes on RX 275 * @adapter: back pointer to associated adapter 276 * @netdev: Associated net_device. Each vport should have one and only one 277 * associated netdev. 278 * @flags: See enum idpf_vport_flags 279 * @vport_type: Default SRIOV, SIOV, etc. 280 * @vport_id: Device given vport identifier 281 * @idx: Software index in adapter vports struct 282 * @default_vport: Use this vport if one isn't specified 283 * @base_rxd: True if the driver should use base descriptors instead of flex 284 * @num_q_vectors: Number of IRQ vectors allocated 285 * @q_vectors: Array of queue vectors 286 * @q_vector_idxs: Starting index of queue vectors 287 * @max_mtu: device given max possible MTU 288 * @default_mac_addr: device will give a default MAC to use 289 * @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation 290 * @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation 291 * @port_stats: per port csum, header split, and other offload stats 292 * @link_up: True if link is up 293 * @link_speed_mbps: Link speed in mbps 294 * @sw_marker_wq: workqueue for marker packets 295 */ 296 struct idpf_vport { 297 u16 num_txq; 298 u16 num_complq; 299 u32 txq_desc_count; 300 u32 complq_desc_count; 301 u32 compln_clean_budget; 302 u16 num_txq_grp; 303 struct idpf_txq_group *txq_grps; 304 u32 txq_model; 305 struct idpf_queue **txqs; 306 bool crc_enable; 307 308 u16 num_rxq; 309 u16 num_bufq; 310 u32 rxq_desc_count; 311 u8 num_bufqs_per_qgrp; 312 u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP]; 313 u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP]; 314 u16 num_rxq_grp; 315 struct idpf_rxq_group *rxq_grps; 316 u32 rxq_model; 317 struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE]; 318 319 struct idpf_adapter *adapter; 320 struct net_device *netdev; 321 DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS); 322 u16 vport_type; 323 u32 vport_id; 324 u16 idx; 325 bool default_vport; 326 bool base_rxd; 327 328 u16 num_q_vectors; 329 struct idpf_q_vector *q_vectors; 330 u16 *q_vector_idxs; 331 u16 max_mtu; 332 u8 default_mac_addr[ETH_ALEN]; 333 u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS]; 334 u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS]; 335 struct idpf_port_stats port_stats; 336 337 bool link_up; 338 u32 link_speed_mbps; 339 340 wait_queue_head_t sw_marker_wq; 341 }; 342 343 /** 344 * enum idpf_user_flags 345 * @__IDPF_USER_FLAG_HSPLIT: header split state 346 * @__IDPF_PROMISC_UC: Unicast promiscuous mode 347 * @__IDPF_PROMISC_MC: Multicast promiscuous mode 348 * @__IDPF_USER_FLAGS_NBITS: Must be last 349 */ 350 enum idpf_user_flags { 351 __IDPF_USER_FLAG_HSPLIT = 0U, 352 __IDPF_PROMISC_UC = 32, 353 __IDPF_PROMISC_MC, 354 355 __IDPF_USER_FLAGS_NBITS, 356 }; 357 358 /** 359 * struct idpf_rss_data - Associated RSS data 360 * @rss_key_size: Size of RSS hash key 361 * @rss_key: RSS hash key 362 * @rss_lut_size: Size of RSS lookup table 363 * @rss_lut: RSS lookup table 364 * @cached_lut: Used to restore previously init RSS lut 365 */ 366 struct idpf_rss_data { 367 u16 rss_key_size; 368 u8 *rss_key; 369 u16 rss_lut_size; 370 u32 *rss_lut; 371 u32 *cached_lut; 372 }; 373 374 /** 375 * struct idpf_vport_user_config_data - User defined configuration values for 376 * each vport. 377 * @rss_data: See struct idpf_rss_data 378 * @num_req_tx_qs: Number of user requested TX queues through ethtool 379 * @num_req_rx_qs: Number of user requested RX queues through ethtool 380 * @num_req_txq_desc: Number of user requested TX queue descriptors through 381 * ethtool 382 * @num_req_rxq_desc: Number of user requested RX queue descriptors through 383 * ethtool 384 * @user_flags: User toggled config flags 385 * @mac_filter_list: List of MAC filters 386 * 387 * Used to restore configuration after a reset as the vport will get wiped. 388 */ 389 struct idpf_vport_user_config_data { 390 struct idpf_rss_data rss_data; 391 u16 num_req_tx_qs; 392 u16 num_req_rx_qs; 393 u32 num_req_txq_desc; 394 u32 num_req_rxq_desc; 395 DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS); 396 struct list_head mac_filter_list; 397 }; 398 399 /** 400 * enum idpf_vport_config_flags - Vport config flags 401 * @IDPF_VPORT_REG_NETDEV: Register netdev 402 * @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset 403 * @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last 404 */ 405 enum idpf_vport_config_flags { 406 IDPF_VPORT_REG_NETDEV, 407 IDPF_VPORT_UP_REQUESTED, 408 IDPF_VPORT_CONFIG_FLAGS_NBITS, 409 }; 410 411 /** 412 * struct idpf_avail_queue_info 413 * @avail_rxq: Available RX queues 414 * @avail_txq: Available TX queues 415 * @avail_bufq: Available buffer queues 416 * @avail_complq: Available completion queues 417 * 418 * Maintain total queues available after allocating max queues to each vport. 419 */ 420 struct idpf_avail_queue_info { 421 u16 avail_rxq; 422 u16 avail_txq; 423 u16 avail_bufq; 424 u16 avail_complq; 425 }; 426 427 /** 428 * struct idpf_vector_info - Utility structure to pass function arguments as a 429 * structure 430 * @num_req_vecs: Vectors required based on the number of queues updated by the 431 * user via ethtool 432 * @num_curr_vecs: Current number of vectors, must be >= @num_req_vecs 433 * @index: Relative starting index for vectors 434 * @default_vport: Vectors are for default vport 435 */ 436 struct idpf_vector_info { 437 u16 num_req_vecs; 438 u16 num_curr_vecs; 439 u16 index; 440 bool default_vport; 441 }; 442 443 /** 444 * struct idpf_vector_lifo - Stack to maintain vector indexes used for vector 445 * distribution algorithm 446 * @top: Points to stack top i.e. next available vector index 447 * @base: Always points to start of the free pool 448 * @size: Total size of the vector stack 449 * @vec_idx: Array to store all the vector indexes 450 * 451 * Vector stack maintains all the relative vector indexes at the *adapter* 452 * level. This stack is divided into 2 parts, first one is called as 'default 453 * pool' and other one is called 'free pool'. Vector distribution algorithm 454 * gives priority to default vports in a way that at least IDPF_MIN_Q_VEC 455 * vectors are allocated per default vport and the relative vector indexes for 456 * those are maintained in default pool. Free pool contains all the unallocated 457 * vector indexes which can be allocated on-demand basis. Mailbox vector index 458 * is maintained in the default pool of the stack. 459 */ 460 struct idpf_vector_lifo { 461 u16 top; 462 u16 base; 463 u16 size; 464 u16 *vec_idx; 465 }; 466 467 /** 468 * struct idpf_vport_config - Vport configuration data 469 * @user_config: see struct idpf_vport_user_config_data 470 * @max_q: Maximum possible queues 471 * @req_qs_chunks: Queue chunk data for requested queues 472 * @mac_filter_list_lock: Lock to protect mac filters 473 * @flags: See enum idpf_vport_config_flags 474 */ 475 struct idpf_vport_config { 476 struct idpf_vport_user_config_data user_config; 477 struct idpf_vport_max_q max_q; 478 struct virtchnl2_add_queues *req_qs_chunks; 479 spinlock_t mac_filter_list_lock; 480 DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS); 481 }; 482 483 struct idpf_vc_xn_manager; 484 485 /** 486 * struct idpf_adapter - Device data struct generated on probe 487 * @pdev: PCI device struct given on probe 488 * @virt_ver_maj: Virtchnl version major 489 * @virt_ver_min: Virtchnl version minor 490 * @msg_enable: Debug message level enabled 491 * @mb_wait_count: Number of times mailbox was attempted initialization 492 * @state: Init state machine 493 * @flags: See enum idpf_flags 494 * @reset_reg: See struct idpf_reset_reg 495 * @hw: Device access data 496 * @num_req_msix: Requested number of MSIX vectors 497 * @num_avail_msix: Available number of MSIX vectors 498 * @num_msix_entries: Number of entries in MSIX table 499 * @msix_entries: MSIX table 500 * @req_vec_chunks: Requested vector chunk data 501 * @mb_vector: Mailbox vector data 502 * @vector_stack: Stack to store the msix vector indexes 503 * @irq_mb_handler: Handler for hard interrupt for mailbox 504 * @tx_timeout_count: Number of TX timeouts that have occurred 505 * @avail_queues: Device given queue limits 506 * @vports: Array to store vports created by the driver 507 * @netdevs: Associated Vport netdevs 508 * @vport_params_reqd: Vport params requested 509 * @vport_params_recvd: Vport params received 510 * @vport_ids: Array of device given vport identifiers 511 * @vport_config: Vport config parameters 512 * @max_vports: Maximum vports that can be allocated 513 * @num_alloc_vports: Current number of vports allocated 514 * @next_vport: Next free slot in pf->vport[] - 0-based! 515 * @init_task: Initialization task 516 * @init_wq: Workqueue for initialization task 517 * @serv_task: Periodically recurring maintenance task 518 * @serv_wq: Workqueue for service task 519 * @mbx_task: Task to handle mailbox interrupts 520 * @mbx_wq: Workqueue for mailbox responses 521 * @vc_event_task: Task to handle out of band virtchnl event notifications 522 * @vc_event_wq: Workqueue for virtchnl events 523 * @stats_task: Periodic statistics retrieval task 524 * @stats_wq: Workqueue for statistics task 525 * @caps: Negotiated capabilities with device 526 * @vcxn_mngr: Virtchnl transaction manager 527 * @dev_ops: See idpf_dev_ops 528 * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk 529 * to VFs but is used to initialize them 530 * @crc_enable: Enable CRC insertion offload 531 * @req_tx_splitq: TX split or single queue model to request 532 * @req_rx_splitq: RX split or single queue model to request 533 * @vport_ctrl_lock: Lock to protect the vport control flow 534 * @vector_lock: Lock to protect vector distribution 535 * @queue_lock: Lock to protect queue distribution 536 * @vc_buf_lock: Lock to protect virtchnl buffer 537 */ 538 struct idpf_adapter { 539 struct pci_dev *pdev; 540 u32 virt_ver_maj; 541 u32 virt_ver_min; 542 543 u32 msg_enable; 544 u32 mb_wait_count; 545 enum idpf_state state; 546 DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS); 547 struct idpf_reset_reg reset_reg; 548 struct idpf_hw hw; 549 u16 num_req_msix; 550 u16 num_avail_msix; 551 u16 num_msix_entries; 552 struct msix_entry *msix_entries; 553 struct virtchnl2_alloc_vectors *req_vec_chunks; 554 struct idpf_q_vector mb_vector; 555 struct idpf_vector_lifo vector_stack; 556 irqreturn_t (*irq_mb_handler)(int irq, void *data); 557 558 u32 tx_timeout_count; 559 struct idpf_avail_queue_info avail_queues; 560 struct idpf_vport **vports; 561 struct net_device **netdevs; 562 struct virtchnl2_create_vport **vport_params_reqd; 563 struct virtchnl2_create_vport **vport_params_recvd; 564 u32 *vport_ids; 565 566 struct idpf_vport_config **vport_config; 567 u16 max_vports; 568 u16 num_alloc_vports; 569 u16 next_vport; 570 571 struct delayed_work init_task; 572 struct workqueue_struct *init_wq; 573 struct delayed_work serv_task; 574 struct workqueue_struct *serv_wq; 575 struct delayed_work mbx_task; 576 struct workqueue_struct *mbx_wq; 577 struct delayed_work vc_event_task; 578 struct workqueue_struct *vc_event_wq; 579 struct delayed_work stats_task; 580 struct workqueue_struct *stats_wq; 581 struct virtchnl2_get_capabilities caps; 582 struct idpf_vc_xn_manager *vcxn_mngr; 583 584 struct idpf_dev_ops dev_ops; 585 int num_vfs; 586 bool crc_enable; 587 bool req_tx_splitq; 588 bool req_rx_splitq; 589 590 struct mutex vport_ctrl_lock; 591 struct mutex vector_lock; 592 struct mutex queue_lock; 593 struct mutex vc_buf_lock; 594 }; 595 596 /** 597 * idpf_is_queue_model_split - check if queue model is split 598 * @q_model: queue model single or split 599 * 600 * Returns true if queue model is split else false 601 */ 602 static inline int idpf_is_queue_model_split(u16 q_model) 603 { 604 return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; 605 } 606 607 #define idpf_is_cap_ena(adapter, field, flag) \ 608 idpf_is_capability_ena(adapter, false, field, flag) 609 #define idpf_is_cap_ena_all(adapter, field, flag) \ 610 idpf_is_capability_ena(adapter, true, field, flag) 611 612 bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, 613 enum idpf_cap_field field, u64 flag); 614 615 #define IDPF_CAP_RSS (\ 616 VIRTCHNL2_CAP_RSS_IPV4_TCP |\ 617 VIRTCHNL2_CAP_RSS_IPV4_TCP |\ 618 VIRTCHNL2_CAP_RSS_IPV4_UDP |\ 619 VIRTCHNL2_CAP_RSS_IPV4_SCTP |\ 620 VIRTCHNL2_CAP_RSS_IPV4_OTHER |\ 621 VIRTCHNL2_CAP_RSS_IPV6_TCP |\ 622 VIRTCHNL2_CAP_RSS_IPV6_TCP |\ 623 VIRTCHNL2_CAP_RSS_IPV6_UDP |\ 624 VIRTCHNL2_CAP_RSS_IPV6_SCTP |\ 625 VIRTCHNL2_CAP_RSS_IPV6_OTHER) 626 627 #define IDPF_CAP_RSC (\ 628 VIRTCHNL2_CAP_RSC_IPV4_TCP |\ 629 VIRTCHNL2_CAP_RSC_IPV6_TCP) 630 631 #define IDPF_CAP_HSPLIT (\ 632 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\ 633 VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6) 634 635 #define IDPF_CAP_RX_CSUM_L4V4 (\ 636 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\ 637 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP) 638 639 #define IDPF_CAP_RX_CSUM_L4V6 (\ 640 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ 641 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) 642 643 #define IDPF_CAP_RX_CSUM (\ 644 VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\ 645 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\ 646 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP |\ 647 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ 648 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) 649 650 #define IDPF_CAP_SCTP_CSUM (\ 651 VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\ 652 VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\ 653 VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\ 654 VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP) 655 656 #define IDPF_CAP_TUNNEL_TX_CSUM (\ 657 VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\ 658 VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL) 659 660 /** 661 * idpf_get_reserved_vecs - Get reserved vectors 662 * @adapter: private data struct 663 */ 664 static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter) 665 { 666 return le16_to_cpu(adapter->caps.num_allocated_vectors); 667 } 668 669 /** 670 * idpf_get_default_vports - Get default number of vports 671 * @adapter: private data struct 672 */ 673 static inline u16 idpf_get_default_vports(struct idpf_adapter *adapter) 674 { 675 return le16_to_cpu(adapter->caps.default_num_vports); 676 } 677 678 /** 679 * idpf_get_max_vports - Get max number of vports 680 * @adapter: private data struct 681 */ 682 static inline u16 idpf_get_max_vports(struct idpf_adapter *adapter) 683 { 684 return le16_to_cpu(adapter->caps.max_vports); 685 } 686 687 /** 688 * idpf_get_max_tx_bufs - Get max scatter-gather buffers supported by the device 689 * @adapter: private data struct 690 */ 691 static inline unsigned int idpf_get_max_tx_bufs(struct idpf_adapter *adapter) 692 { 693 return adapter->caps.max_sg_bufs_per_tx_pkt; 694 } 695 696 /** 697 * idpf_get_min_tx_pkt_len - Get min packet length supported by the device 698 * @adapter: private data struct 699 */ 700 static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter) 701 { 702 u8 pkt_len = adapter->caps.min_sso_packet_len; 703 704 return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN; 705 } 706 707 /** 708 * idpf_get_reg_addr - Get BAR0 register address 709 * @adapter: private data struct 710 * @reg_offset: register offset value 711 * 712 * Based on the register offset, return the actual BAR0 register address 713 */ 714 static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter, 715 resource_size_t reg_offset) 716 { 717 return (void __iomem *)(adapter->hw.hw_addr + reg_offset); 718 } 719 720 /** 721 * idpf_is_reset_detected - check if we were reset at some point 722 * @adapter: driver specific private structure 723 * 724 * Returns true if we are either in reset currently or were previously reset. 725 */ 726 static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter) 727 { 728 if (!adapter->hw.arq) 729 return true; 730 731 return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) & 732 adapter->hw.arq->reg.len_mask); 733 } 734 735 /** 736 * idpf_is_reset_in_prog - check if reset is in progress 737 * @adapter: driver specific private structure 738 * 739 * Returns true if hard reset is in progress, false otherwise 740 */ 741 static inline bool idpf_is_reset_in_prog(struct idpf_adapter *adapter) 742 { 743 return (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags) || 744 test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || 745 test_bit(IDPF_HR_DRV_LOAD, adapter->flags)); 746 } 747 748 /** 749 * idpf_netdev_to_vport - get a vport handle from a netdev 750 * @netdev: network interface device structure 751 */ 752 static inline struct idpf_vport *idpf_netdev_to_vport(struct net_device *netdev) 753 { 754 struct idpf_netdev_priv *np = netdev_priv(netdev); 755 756 return np->vport; 757 } 758 759 /** 760 * idpf_netdev_to_adapter - Get adapter handle from a netdev 761 * @netdev: Network interface device structure 762 */ 763 static inline struct idpf_adapter *idpf_netdev_to_adapter(struct net_device *netdev) 764 { 765 struct idpf_netdev_priv *np = netdev_priv(netdev); 766 767 return np->adapter; 768 } 769 770 /** 771 * idpf_is_feature_ena - Determine if a particular feature is enabled 772 * @vport: Vport to check 773 * @feature: Netdev flag to check 774 * 775 * Returns true or false if a particular feature is enabled. 776 */ 777 static inline bool idpf_is_feature_ena(const struct idpf_vport *vport, 778 netdev_features_t feature) 779 { 780 return vport->netdev->features & feature; 781 } 782 783 /** 784 * idpf_get_max_tx_hdr_size -- get the size of tx header 785 * @adapter: Driver specific private structure 786 */ 787 static inline u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter) 788 { 789 return le16_to_cpu(adapter->caps.max_tx_hdr_size); 790 } 791 792 /** 793 * idpf_vport_ctrl_lock - Acquire the vport control lock 794 * @netdev: Network interface device structure 795 * 796 * This lock should be used by non-datapath code to protect against vport 797 * destruction. 798 */ 799 static inline void idpf_vport_ctrl_lock(struct net_device *netdev) 800 { 801 struct idpf_netdev_priv *np = netdev_priv(netdev); 802 803 mutex_lock(&np->adapter->vport_ctrl_lock); 804 } 805 806 /** 807 * idpf_vport_ctrl_unlock - Release the vport control lock 808 * @netdev: Network interface device structure 809 */ 810 static inline void idpf_vport_ctrl_unlock(struct net_device *netdev) 811 { 812 struct idpf_netdev_priv *np = netdev_priv(netdev); 813 814 mutex_unlock(&np->adapter->vport_ctrl_lock); 815 } 816 817 void idpf_statistics_task(struct work_struct *work); 818 void idpf_init_task(struct work_struct *work); 819 void idpf_service_task(struct work_struct *work); 820 void idpf_mbx_task(struct work_struct *work); 821 void idpf_vc_event_task(struct work_struct *work); 822 void idpf_dev_ops_init(struct idpf_adapter *adapter); 823 void idpf_vf_dev_ops_init(struct idpf_adapter *adapter); 824 int idpf_intr_req(struct idpf_adapter *adapter); 825 void idpf_intr_rel(struct idpf_adapter *adapter); 826 u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter); 827 int idpf_initiate_soft_reset(struct idpf_vport *vport, 828 enum idpf_vport_reset_cause reset_cause); 829 void idpf_deinit_task(struct idpf_adapter *adapter); 830 int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter, 831 u16 *q_vector_idxs, 832 struct idpf_vector_info *vec_info); 833 void idpf_set_ethtool_ops(struct net_device *netdev); 834 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, 835 u16 itr, bool tx); 836 int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs); 837 838 u8 idpf_vport_get_hsplit(const struct idpf_vport *vport); 839 bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val); 840 841 #endif /* !_IDPF_H_ */ 842