1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2013-2022, Intel Corporation. */ 3 4 #ifndef _VIRTCHNL_H_ 5 #define _VIRTCHNL_H_ 6 7 #include <linux/bitops.h> 8 #include <linux/bits.h> 9 #include <linux/overflow.h> 10 #include <uapi/linux/if_ether.h> 11 12 /* Description: 13 * This header file describes the Virtual Function (VF) - Physical Function 14 * (PF) communication protocol used by the drivers for all devices starting 15 * from our 40G product line 16 * 17 * Admin queue buffer usage: 18 * desc->opcode is always aqc_opc_send_msg_to_pf 19 * flags, retval, datalen, and data addr are all used normally. 20 * The Firmware copies the cookie fields when sending messages between the 21 * PF and VF, but uses all other fields internally. Due to this limitation, 22 * we must send all messages as "indirect", i.e. using an external buffer. 23 * 24 * All the VSI indexes are relative to the VF. Each VF can have maximum of 25 * three VSIs. All the queue indexes are relative to the VSI. Each VF can 26 * have a maximum of sixteen queues for all of its VSIs. 27 * 28 * The PF is required to return a status code in v_retval for all messages 29 * except RESET_VF, which does not require any response. The returned value 30 * is of virtchnl_status_code type, defined here. 31 * 32 * In general, VF driver initialization should roughly follow the order of 33 * these opcodes. The VF driver must first validate the API version of the 34 * PF driver, then request a reset, then get resources, then configure 35 * queues and interrupts. After these operations are complete, the VF 36 * driver may start its queues, optionally add MAC and VLAN filters, and 37 * process traffic. 38 */ 39 40 /* START GENERIC DEFINES 41 * Need to ensure the following enums and defines hold the same meaning and 42 * value in current and future projects 43 */ 44 45 /* Error Codes */ 46 enum virtchnl_status_code { 47 VIRTCHNL_STATUS_SUCCESS = 0, 48 VIRTCHNL_STATUS_ERR_PARAM = -5, 49 VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, 50 VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, 51 VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, 52 VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, 53 VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, 54 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, 55 }; 56 57 /* Backward compatibility */ 58 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM 59 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED 60 61 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 62 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 63 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 64 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 65 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 66 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 67 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 68 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 69 70 enum virtchnl_link_speed { 71 VIRTCHNL_LINK_SPEED_UNKNOWN = 0, 72 VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), 73 VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), 74 VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), 75 VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), 76 VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), 77 VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), 78 VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), 79 VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), 80 }; 81 82 /* for hsplit_0 field of Rx HMC context */ 83 /* deprecated with AVF 1.0 */ 84 enum virtchnl_rx_hsplit { 85 VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, 86 VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, 87 VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, 88 VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, 89 VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, 90 }; 91 92 enum virtchnl_bw_limit_type { 93 VIRTCHNL_BW_SHAPER = 0, 94 }; 95 /* END GENERIC DEFINES */ 96 97 /* Opcodes for VF-PF communication. These are placed in the v_opcode field 98 * of the virtchnl_msg structure. 99 */ 100 enum virtchnl_ops { 101 /* The PF sends status change events to VFs using 102 * the VIRTCHNL_OP_EVENT opcode. 103 * VFs send requests to the PF using the other ops. 104 * Use of "advanced opcode" features must be negotiated as part of capabilities 105 * exchange and are not considered part of base mode feature set. 106 */ 107 VIRTCHNL_OP_UNKNOWN = 0, 108 VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ 109 VIRTCHNL_OP_RESET_VF = 2, 110 VIRTCHNL_OP_GET_VF_RESOURCES = 3, 111 VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, 112 VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, 113 VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, 114 VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, 115 VIRTCHNL_OP_ENABLE_QUEUES = 8, 116 VIRTCHNL_OP_DISABLE_QUEUES = 9, 117 VIRTCHNL_OP_ADD_ETH_ADDR = 10, 118 VIRTCHNL_OP_DEL_ETH_ADDR = 11, 119 VIRTCHNL_OP_ADD_VLAN = 12, 120 VIRTCHNL_OP_DEL_VLAN = 13, 121 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, 122 VIRTCHNL_OP_GET_STATS = 15, 123 VIRTCHNL_OP_RSVD = 16, 124 VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ 125 VIRTCHNL_OP_CONFIG_RSS_HFUNC = 18, 126 /* opcode 19 is reserved */ 127 VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ 128 VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP, 129 VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ 130 VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, 131 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ 132 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, 133 VIRTCHNL_OP_CONFIG_RSS_KEY = 23, 134 VIRTCHNL_OP_CONFIG_RSS_LUT = 24, 135 VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS = 25, 136 VIRTCHNL_OP_SET_RSS_HASHCFG = 26, 137 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, 138 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, 139 VIRTCHNL_OP_REQUEST_QUEUES = 29, 140 VIRTCHNL_OP_ENABLE_CHANNELS = 30, 141 VIRTCHNL_OP_DISABLE_CHANNELS = 31, 142 VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, 143 VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, 144 /* opcode 34 - 43 are reserved */ 145 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44, 146 VIRTCHNL_OP_ADD_RSS_CFG = 45, 147 VIRTCHNL_OP_DEL_RSS_CFG = 46, 148 VIRTCHNL_OP_ADD_FDIR_FILTER = 47, 149 VIRTCHNL_OP_DEL_FDIR_FILTER = 48, 150 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51, 151 VIRTCHNL_OP_ADD_VLAN_V2 = 52, 152 VIRTCHNL_OP_DEL_VLAN_V2 = 53, 153 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54, 154 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55, 155 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56, 156 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57, 157 /* opcode 58 and 59 are reserved */ 158 VIRTCHNL_OP_1588_PTP_GET_CAPS = 60, 159 VIRTCHNL_OP_1588_PTP_GET_TIME = 61, 160 /* opcode 62 - 65 are reserved */ 161 VIRTCHNL_OP_GET_QOS_CAPS = 66, 162 /* opcode 68 through 111 are reserved */ 163 VIRTCHNL_OP_CONFIG_QUEUE_BW = 112, 164 VIRTCHNL_OP_CONFIG_QUANTA = 113, 165 VIRTCHNL_OP_MAX, 166 }; 167 168 /* These macros are used to generate compilation errors if a structure/union 169 * is not exactly the correct length. It gives a divide by zero error if the 170 * structure/union is not of the correct size, otherwise it creates an enum 171 * that is never used. 172 */ 173 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ 174 { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } 175 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ 176 { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } 177 178 /* Message descriptions and data structures. */ 179 180 /* VIRTCHNL_OP_VERSION 181 * VF posts its version number to the PF. PF responds with its version number 182 * in the same format, along with a return code. 183 * Reply from PF has its major/minor versions also in param0 and param1. 184 * If there is a major version mismatch, then the VF cannot operate. 185 * If there is a minor version mismatch, then the VF can operate but should 186 * add a warning to the system log. 187 * 188 * This enum element MUST always be specified as == 1, regardless of other 189 * changes in the API. The PF must always respond to this message without 190 * error regardless of version mismatch. 191 */ 192 #define VIRTCHNL_VERSION_MAJOR 1 193 #define VIRTCHNL_VERSION_MINOR 1 194 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 195 196 struct virtchnl_version_info { 197 u32 major; 198 u32 minor; 199 }; 200 201 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); 202 203 #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) 204 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) 205 206 /* VIRTCHNL_OP_RESET_VF 207 * VF sends this request to PF with no parameters 208 * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register 209 * until reset completion is indicated. The admin queue must be reinitialized 210 * after this operation. 211 * 212 * When reset is complete, PF must ensure that all queues in all VSIs associated 213 * with the VF are stopped, all queue configurations in the HMC are set to 0, 214 * and all MAC and VLAN filters (except the default MAC address) on all VSIs 215 * are cleared. 216 */ 217 218 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV 219 * vsi_type should always be 6 for backward compatibility. Add other fields 220 * as needed. 221 */ 222 enum virtchnl_vsi_type { 223 VIRTCHNL_VSI_TYPE_INVALID = 0, 224 VIRTCHNL_VSI_SRIOV = 6, 225 }; 226 227 /* VIRTCHNL_OP_GET_VF_RESOURCES 228 * Version 1.0 VF sends this request to PF with no parameters 229 * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities 230 * PF responds with an indirect message containing 231 * virtchnl_vf_resource and one or more 232 * virtchnl_vsi_resource structures. 233 */ 234 235 struct virtchnl_vsi_resource { 236 u16 vsi_id; 237 u16 num_queue_pairs; 238 239 /* see enum virtchnl_vsi_type */ 240 s32 vsi_type; 241 u16 qset_handle; 242 u8 default_mac_addr[ETH_ALEN]; 243 }; 244 245 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); 246 247 /* VF capability flags 248 * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including 249 * TX/RX Checksum offloading and TSO for non-tunnelled packets. 250 */ 251 #define VIRTCHNL_VF_OFFLOAD_L2 BIT(0) 252 #define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1) 253 #define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA 254 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3) 255 #define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4) 256 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5) 257 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6) 258 /* used to negotiate communicating link speeds in Mbps */ 259 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7) 260 #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10) 261 #define VIRTCHNL_VF_OFFLOAD_TC_U32 BIT(11) 262 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15) 263 #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16) 264 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17) 265 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18) 266 #define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19) 267 #define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20) 268 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21) 269 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22) 270 #define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23) 271 #define VIRTCHNL_VF_OFFLOAD_USO BIT(25) 272 #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26) 273 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27) 274 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28) 275 #define VIRTCHNL_VF_OFFLOAD_QOS BIT(29) 276 #define VIRTCHNL_VF_CAP_PTP BIT(31) 277 278 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ 279 VIRTCHNL_VF_OFFLOAD_VLAN | \ 280 VIRTCHNL_VF_OFFLOAD_RSS_PF) 281 282 struct virtchnl_vf_resource { 283 u16 num_vsis; 284 u16 num_queue_pairs; 285 u16 max_vectors; 286 u16 max_mtu; 287 288 u32 vf_cap_flags; 289 u32 rss_key_size; 290 u32 rss_lut_size; 291 292 struct virtchnl_vsi_resource vsi_res[]; 293 }; 294 295 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource); 296 #define virtchnl_vf_resource_LEGACY_SIZEOF 36 297 298 /* VIRTCHNL_OP_CONFIG_TX_QUEUE 299 * VF sends this message to set up parameters for one TX queue. 300 * External data buffer contains one instance of virtchnl_txq_info. 301 * PF configures requested queue and returns a status code. 302 */ 303 304 /* Tx queue config info */ 305 struct virtchnl_txq_info { 306 u16 vsi_id; 307 u16 queue_id; 308 u16 ring_len; /* number of descriptors, multiple of 8 */ 309 u16 headwb_enabled; /* deprecated with AVF 1.0 */ 310 u64 dma_ring_addr; 311 u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ 312 }; 313 314 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); 315 316 /* RX descriptor IDs (range from 0 to 63) */ 317 enum virtchnl_rx_desc_ids { 318 VIRTCHNL_RXDID_0_16B_BASE = 0, 319 VIRTCHNL_RXDID_1_32B_BASE = 1, 320 VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2, 321 VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3, 322 VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4, 323 VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5, 324 VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6, 325 VIRTCHNL_RXDID_7_HW_RSVD = 7, 326 /* 8 through 15 are reserved */ 327 VIRTCHNL_RXDID_16_COMMS_GENERIC = 16, 328 VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17, 329 VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18, 330 VIRTCHNL_RXDID_19_COMMS_AUX_IPV6 = 19, 331 VIRTCHNL_RXDID_20_COMMS_AUX_FLOW = 20, 332 VIRTCHNL_RXDID_21_COMMS_AUX_TCP = 21, 333 /* 22 through 63 are reserved */ 334 }; 335 336 #define VIRTCHNL_RXDID_BIT(x) BIT_ULL(VIRTCHNL_RXDID_##x) 337 338 /* RX descriptor ID bitmasks */ 339 enum virtchnl_rx_desc_id_bitmasks { 340 VIRTCHNL_RXDID_0_16B_BASE_M = VIRTCHNL_RXDID_BIT(0_16B_BASE), 341 VIRTCHNL_RXDID_1_32B_BASE_M = VIRTCHNL_RXDID_BIT(1_32B_BASE), 342 VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = VIRTCHNL_RXDID_BIT(2_FLEX_SQ_NIC), 343 VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = VIRTCHNL_RXDID_BIT(3_FLEX_SQ_SW), 344 VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = VIRTCHNL_RXDID_BIT(4_FLEX_SQ_NIC_VEB), 345 VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M = VIRTCHNL_RXDID_BIT(5_FLEX_SQ_NIC_ACL), 346 VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M = VIRTCHNL_RXDID_BIT(6_FLEX_SQ_NIC_2), 347 VIRTCHNL_RXDID_7_HW_RSVD_M = VIRTCHNL_RXDID_BIT(7_HW_RSVD), 348 /* 8 through 15 are reserved */ 349 VIRTCHNL_RXDID_16_COMMS_GENERIC_M = VIRTCHNL_RXDID_BIT(16_COMMS_GENERIC), 350 VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M = VIRTCHNL_RXDID_BIT(17_COMMS_AUX_VLAN), 351 VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M = VIRTCHNL_RXDID_BIT(18_COMMS_AUX_IPV4), 352 VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M = VIRTCHNL_RXDID_BIT(19_COMMS_AUX_IPV6), 353 VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M = VIRTCHNL_RXDID_BIT(20_COMMS_AUX_FLOW), 354 VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M = VIRTCHNL_RXDID_BIT(21_COMMS_AUX_TCP), 355 /* 22 through 63 are reserved */ 356 }; 357 358 /* virtchnl_rxq_info_flags - definition of bits in the flags field of the 359 * virtchnl_rxq_info structure. 360 * 361 * @VIRTCHNL_PTP_RX_TSTAMP: request to enable Rx timestamping 362 * 363 * Other flag bits are currently reserved and they may be extended in the 364 * future. 365 */ 366 enum virtchnl_rxq_info_flags { 367 VIRTCHNL_PTP_RX_TSTAMP = BIT(0), 368 }; 369 370 /* VIRTCHNL_OP_CONFIG_RX_QUEUE 371 * VF sends this message to set up parameters for one RX queue. 372 * External data buffer contains one instance of virtchnl_rxq_info. 373 * PF configures requested queue and returns a status code. The 374 * crc_disable flag disables CRC stripping on the VF. Setting 375 * the crc_disable flag to 1 will disable CRC stripping for each 376 * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC 377 * offload must have been set prior to sending this info or the PF 378 * will ignore the request. This flag should be set the same for 379 * all of the queues for a VF. 380 */ 381 382 /* Rx queue config info */ 383 struct virtchnl_rxq_info { 384 u16 vsi_id; 385 u16 queue_id; 386 u32 ring_len; /* number of descriptors, multiple of 32 */ 387 u16 hdr_size; 388 u16 splithdr_enabled; /* deprecated with AVF 1.0 */ 389 u32 databuffer_size; 390 u32 max_pkt_size; 391 u8 crc_disable; 392 /* see enum virtchnl_rx_desc_ids; 393 * only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported. Note 394 * that when the offload is not supported, the descriptor format aligns 395 * with VIRTCHNL_RXDID_1_32B_BASE. 396 */ 397 enum virtchnl_rx_desc_ids rxdid:8; 398 enum virtchnl_rxq_info_flags flags:8; /* see virtchnl_rxq_info_flags */ 399 u8 pad1; 400 u64 dma_ring_addr; 401 402 /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */ 403 s32 rx_split_pos; 404 u32 pad2; 405 }; 406 407 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); 408 409 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES 410 * VF sends this message to set parameters for all active TX and RX queues 411 * associated with the specified VSI. 412 * PF configures queues and returns status. 413 * If the number of queues specified is greater than the number of queues 414 * associated with the VSI, an error is returned and no queues are configured. 415 * NOTE: The VF is not required to configure all queues in a single request. 416 * It may send multiple messages. PF drivers must correctly handle all VF 417 * requests. 418 */ 419 struct virtchnl_queue_pair_info { 420 /* NOTE: vsi_id and queue_id should be identical for both queues. */ 421 struct virtchnl_txq_info txq; 422 struct virtchnl_rxq_info rxq; 423 }; 424 425 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); 426 427 struct virtchnl_vsi_queue_config_info { 428 u16 vsi_id; 429 u16 num_queue_pairs; 430 u32 pad; 431 struct virtchnl_queue_pair_info qpair[]; 432 }; 433 434 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info); 435 #define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF 72 436 437 /* VIRTCHNL_OP_REQUEST_QUEUES 438 * VF sends this message to request the PF to allocate additional queues to 439 * this VF. Each VF gets a guaranteed number of queues on init but asking for 440 * additional queues must be negotiated. This is a best effort request as it 441 * is possible the PF does not have enough queues left to support the request. 442 * If the PF cannot support the number requested it will respond with the 443 * maximum number it is able to support. If the request is successful, PF will 444 * then reset the VF to institute required changes. 445 */ 446 447 /* VF resource request */ 448 struct virtchnl_vf_res_request { 449 u16 num_queue_pairs; 450 }; 451 452 /* VIRTCHNL_OP_CONFIG_IRQ_MAP 453 * VF uses this message to map vectors to queues. 454 * The rxq_map and txq_map fields are bitmaps used to indicate which queues 455 * are to be associated with the specified vector. 456 * The "other" causes are always mapped to vector 0. The VF may not request 457 * that vector 0 be used for traffic. 458 * PF configures interrupt mapping and returns status. 459 * NOTE: due to hardware requirements, all active queues (both TX and RX) 460 * should be mapped to interrupts, even if the driver intends to operate 461 * only in polling mode. In this case the interrupt may be disabled, but 462 * the ITR timer will still run to trigger writebacks. 463 */ 464 struct virtchnl_vector_map { 465 u16 vsi_id; 466 u16 vector_id; 467 u16 rxq_map; 468 u16 txq_map; 469 u16 rxitr_idx; 470 u16 txitr_idx; 471 }; 472 473 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); 474 475 struct virtchnl_irq_map_info { 476 u16 num_vectors; 477 struct virtchnl_vector_map vecmap[]; 478 }; 479 480 VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info); 481 #define virtchnl_irq_map_info_LEGACY_SIZEOF 14 482 483 /* VIRTCHNL_OP_ENABLE_QUEUES 484 * VIRTCHNL_OP_DISABLE_QUEUES 485 * VF sends these message to enable or disable TX/RX queue pairs. 486 * The queues fields are bitmaps indicating which queues to act upon. 487 * (Currently, we only support 16 queues per VF, but we make the field 488 * u32 to allow for expansion.) 489 * PF performs requested action and returns status. 490 * NOTE: The VF is not required to enable/disable all queues in a single 491 * request. It may send multiple messages. 492 * PF drivers must correctly handle all VF requests. 493 */ 494 struct virtchnl_queue_select { 495 u16 vsi_id; 496 u16 pad; 497 u32 rx_queues; 498 u32 tx_queues; 499 }; 500 501 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); 502 503 /* VIRTCHNL_OP_ADD_ETH_ADDR 504 * VF sends this message in order to add one or more unicast or multicast 505 * address filters for the specified VSI. 506 * PF adds the filters and returns status. 507 */ 508 509 /* VIRTCHNL_OP_DEL_ETH_ADDR 510 * VF sends this message in order to remove one or more unicast or multicast 511 * filters for the specified VSI. 512 * PF removes the filters and returns status. 513 */ 514 515 /* VIRTCHNL_ETHER_ADDR_LEGACY 516 * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad 517 * bytes. Moving forward all VF drivers should not set type to 518 * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy 519 * behavior. The control plane function (i.e. PF) can use a best effort method 520 * of tracking the primary/device unicast in this case, but there is no 521 * guarantee and functionality depends on the implementation of the PF. 522 */ 523 524 /* VIRTCHNL_ETHER_ADDR_PRIMARY 525 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the 526 * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and 527 * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane 528 * function (i.e. PF) to accurately track and use this MAC address for 529 * displaying on the host and for VM/function reset. 530 */ 531 532 /* VIRTCHNL_ETHER_ADDR_EXTRA 533 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra 534 * unicast and/or multicast filters that are being added/deleted via 535 * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively. 536 */ 537 struct virtchnl_ether_addr { 538 u8 addr[ETH_ALEN]; 539 u8 type; 540 #define VIRTCHNL_ETHER_ADDR_LEGACY 0 541 #define VIRTCHNL_ETHER_ADDR_PRIMARY 1 542 #define VIRTCHNL_ETHER_ADDR_EXTRA 2 543 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */ 544 u8 pad; 545 }; 546 547 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); 548 549 struct virtchnl_ether_addr_list { 550 u16 vsi_id; 551 u16 num_elements; 552 struct virtchnl_ether_addr list[]; 553 }; 554 555 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list); 556 #define virtchnl_ether_addr_list_LEGACY_SIZEOF 12 557 558 /* VIRTCHNL_OP_ADD_VLAN 559 * VF sends this message to add one or more VLAN tag filters for receives. 560 * PF adds the filters and returns status. 561 * If a port VLAN is configured by the PF, this operation will return an 562 * error to the VF. 563 */ 564 565 /* VIRTCHNL_OP_DEL_VLAN 566 * VF sends this message to remove one or more VLAN tag filters for receives. 567 * PF removes the filters and returns status. 568 * If a port VLAN is configured by the PF, this operation will return an 569 * error to the VF. 570 */ 571 572 struct virtchnl_vlan_filter_list { 573 u16 vsi_id; 574 u16 num_elements; 575 u16 vlan_id[]; 576 }; 577 578 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list); 579 #define virtchnl_vlan_filter_list_LEGACY_SIZEOF 6 580 581 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related 582 * structures and opcodes. 583 * 584 * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver 585 * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED. 586 * 587 * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype. 588 * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype. 589 * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype. 590 * 591 * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported 592 * by the PF concurrently. For example, if the PF can support 593 * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it 594 * would OR the following bits: 595 * 596 * VIRTHCNL_VLAN_ETHERTYPE_8100 | 597 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 598 * VIRTCHNL_VLAN_ETHERTYPE_AND; 599 * 600 * The VF would interpret this as VLAN filtering can be supported on both 0x8100 601 * and 0x88A8 VLAN ethertypes. 602 * 603 * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported 604 * by the PF concurrently. For example if the PF can support 605 * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping 606 * offload it would OR the following bits: 607 * 608 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 609 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 610 * VIRTCHNL_VLAN_ETHERTYPE_XOR; 611 * 612 * The VF would interpret this as VLAN stripping can be supported on either 613 * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via 614 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override 615 * the previously set value. 616 * 617 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or 618 * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors. 619 * 620 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware 621 * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor. 622 * 623 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware 624 * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor. 625 * 626 * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for 627 * VLAN filtering if the underlying PF supports it. 628 * 629 * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a 630 * certain VLAN capability can be toggled. For example if the underlying PF/CP 631 * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should 632 * set this bit along with the supported ethertypes. 633 */ 634 enum virtchnl_vlan_support { 635 VIRTCHNL_VLAN_UNSUPPORTED = 0, 636 VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0), 637 VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1), 638 VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2), 639 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8), 640 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9), 641 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10), 642 VIRTCHNL_VLAN_PRIO = BIT(24), 643 VIRTCHNL_VLAN_FILTER_MASK = BIT(28), 644 VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29), 645 VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30), 646 VIRTCHNL_VLAN_TOGGLE = BIT(31), 647 }; 648 649 /* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS 650 * for filtering, insertion, and stripping capabilities. 651 * 652 * If only outer capabilities are supported (for filtering, insertion, and/or 653 * stripping) then this refers to the outer most or single VLAN from the VF's 654 * perspective. 655 * 656 * If only inner capabilities are supported (for filtering, insertion, and/or 657 * stripping) then this refers to the outer most or single VLAN from the VF's 658 * perspective. Functionally this is the same as if only outer capabilities are 659 * supported. The VF driver is just forced to use the inner fields when 660 * adding/deleting filters and enabling/disabling offloads (if supported). 661 * 662 * If both outer and inner capabilities are supported (for filtering, insertion, 663 * and/or stripping) then outer refers to the outer most or single VLAN and 664 * inner refers to the second VLAN, if it exists, in the packet. 665 * 666 * There is no support for tunneled VLAN offloads, so outer or inner are never 667 * referring to a tunneled packet from the VF's perspective. 668 */ 669 struct virtchnl_vlan_supported_caps { 670 u32 outer; 671 u32 inner; 672 }; 673 674 /* The PF populates these fields based on the supported VLAN filtering. If a 675 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will 676 * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using 677 * the unsupported fields. 678 * 679 * Also, a VF is only allowed to toggle its VLAN filtering setting if the 680 * VIRTCHNL_VLAN_TOGGLE bit is set. 681 * 682 * The ethertype(s) specified in the ethertype_init field are the ethertypes 683 * enabled for VLAN filtering. VLAN filtering in this case refers to the outer 684 * most VLAN from the VF's perspective. If both inner and outer filtering are 685 * allowed then ethertype_init only refers to the outer most VLAN as only 686 * VLAN ethertype supported for inner VLAN filtering is 687 * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled 688 * when both inner and outer filtering are allowed. 689 * 690 * The max_filters field tells the VF how many VLAN filters it's allowed to have 691 * at any one time. If it exceeds this amount and tries to add another filter, 692 * then the request will be rejected by the PF. To prevent failures, the VF 693 * should keep track of how many VLAN filters it has added and not attempt to 694 * add more than max_filters. 695 */ 696 struct virtchnl_vlan_filtering_caps { 697 struct virtchnl_vlan_supported_caps filtering_support; 698 u32 ethertype_init; 699 u16 max_filters; 700 u8 pad[2]; 701 }; 702 703 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps); 704 705 /* This enum is used for the virtchnl_vlan_offload_caps structure to specify 706 * if the PF supports a different ethertype for stripping and insertion. 707 * 708 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified 709 * for stripping affect the ethertype(s) specified for insertion and visa versa 710 * as well. If the VF tries to configure VLAN stripping via 711 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then 712 * that will be the ethertype for both stripping and insertion. 713 * 714 * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for 715 * stripping do not affect the ethertype(s) specified for insertion and visa 716 * versa. 717 */ 718 enum virtchnl_vlan_ethertype_match { 719 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0, 720 VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1, 721 }; 722 723 /* The PF populates these fields based on the supported VLAN offloads. If a 724 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will 725 * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or 726 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields. 727 * 728 * Also, a VF is only allowed to toggle its VLAN offload setting if the 729 * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set. 730 * 731 * The VF driver needs to be aware of how the tags are stripped by hardware and 732 * inserted by the VF driver based on the level of offload support. The PF will 733 * populate these fields based on where the VLAN tags are expected to be 734 * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to 735 * interpret these fields. See the definition of the 736 * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support 737 * enumeration. 738 */ 739 struct virtchnl_vlan_offload_caps { 740 struct virtchnl_vlan_supported_caps stripping_support; 741 struct virtchnl_vlan_supported_caps insertion_support; 742 u32 ethertype_init; 743 u8 ethertype_match; 744 u8 pad[3]; 745 }; 746 747 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps); 748 749 /* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS 750 * VF sends this message to determine its VLAN capabilities. 751 * 752 * PF will mark which capabilities it supports based on hardware support and 753 * current configuration. For example, if a port VLAN is configured the PF will 754 * not allow outer VLAN filtering, stripping, or insertion to be configured so 755 * it will block these features from the VF. 756 * 757 * The VF will need to cross reference its capabilities with the PFs 758 * capabilities in the response message from the PF to determine the VLAN 759 * support. 760 */ 761 struct virtchnl_vlan_caps { 762 struct virtchnl_vlan_filtering_caps filtering; 763 struct virtchnl_vlan_offload_caps offloads; 764 }; 765 766 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps); 767 768 struct virtchnl_vlan { 769 u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */ 770 u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in 771 * filtering caps 772 */ 773 u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in 774 * filtering caps. Note that tpid here does not refer to 775 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the 776 * actual 2-byte VLAN TPID 777 */ 778 u8 pad[2]; 779 }; 780 781 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan); 782 783 struct virtchnl_vlan_filter { 784 struct virtchnl_vlan inner; 785 struct virtchnl_vlan outer; 786 u8 pad[16]; 787 }; 788 789 VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter); 790 791 /* VIRTCHNL_OP_ADD_VLAN_V2 792 * VIRTCHNL_OP_DEL_VLAN_V2 793 * 794 * VF sends these messages to add/del one or more VLAN tag filters for Rx 795 * traffic. 796 * 797 * The PF attempts to add the filters and returns status. 798 * 799 * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the 800 * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS. 801 */ 802 struct virtchnl_vlan_filter_list_v2 { 803 u16 vport_id; 804 u16 num_elements; 805 u8 pad[4]; 806 struct virtchnl_vlan_filter filters[]; 807 }; 808 809 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2); 810 #define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF 40 811 812 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 813 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 814 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 815 * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 816 * 817 * VF sends this message to enable or disable VLAN stripping or insertion. It 818 * also needs to specify an ethertype. The VF knows which VLAN ethertypes are 819 * allowed and whether or not it's allowed to enable/disable the specific 820 * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to 821 * parse the virtchnl_vlan_caps.offloads fields to determine which offload 822 * messages are allowed. 823 * 824 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the 825 * following manner the VF will be allowed to enable and/or disable 0x8100 inner 826 * VLAN insertion and/or stripping via the opcodes listed above. Inner in this 827 * case means the outer most or single VLAN from the VF's perspective. This is 828 * because no outer offloads are supported. See the comments above the 829 * virtchnl_vlan_supported_caps structure for more details. 830 * 831 * virtchnl_vlan_caps.offloads.stripping_support.inner = 832 * VIRTCHNL_VLAN_TOGGLE | 833 * VIRTCHNL_VLAN_ETHERTYPE_8100; 834 * 835 * virtchnl_vlan_caps.offloads.insertion_support.inner = 836 * VIRTCHNL_VLAN_TOGGLE | 837 * VIRTCHNL_VLAN_ETHERTYPE_8100; 838 * 839 * In order to enable inner (again note that in this case inner is the outer 840 * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100 841 * VLANs, the VF would populate the virtchnl_vlan_setting structure in the 842 * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message. 843 * 844 * virtchnl_vlan_setting.inner_ethertype_setting = 845 * VIRTCHNL_VLAN_ETHERTYPE_8100; 846 * 847 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on 848 * initialization. 849 * 850 * The reason that VLAN TPID(s) are not being used for the 851 * outer_ethertype_setting and inner_ethertype_setting fields is because it's 852 * possible a device could support VLAN insertion and/or stripping offload on 853 * multiple ethertypes concurrently, so this method allows a VF to request 854 * multiple ethertypes in one message using the virtchnl_vlan_support 855 * enumeration. 856 * 857 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the 858 * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer 859 * VLAN insertion and stripping simultaneously. The 860 * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be 861 * populated based on what the PF can support. 862 * 863 * virtchnl_vlan_caps.offloads.stripping_support.outer = 864 * VIRTCHNL_VLAN_TOGGLE | 865 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 866 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 867 * VIRTCHNL_VLAN_ETHERTYPE_AND; 868 * 869 * virtchnl_vlan_caps.offloads.insertion_support.outer = 870 * VIRTCHNL_VLAN_TOGGLE | 871 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 872 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 873 * VIRTCHNL_VLAN_ETHERTYPE_AND; 874 * 875 * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF 876 * would populate the virthcnl_vlan_offload_structure in the following manner 877 * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message. 878 * 879 * virtchnl_vlan_setting.outer_ethertype_setting = 880 * VIRTHCNL_VLAN_ETHERTYPE_8100 | 881 * VIRTHCNL_VLAN_ETHERTYPE_88A8; 882 * 883 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on 884 * initialization. 885 * 886 * There is also the case where a PF and the underlying hardware can support 887 * VLAN offloads on multiple ethertypes, but not concurrently. For example, if 888 * the PF populates the virtchnl_vlan_caps.offloads in the following manner the 889 * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN 890 * offloads. The ethertypes must match for stripping and insertion. 891 * 892 * virtchnl_vlan_caps.offloads.stripping_support.outer = 893 * VIRTCHNL_VLAN_TOGGLE | 894 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 895 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 896 * VIRTCHNL_VLAN_ETHERTYPE_XOR; 897 * 898 * virtchnl_vlan_caps.offloads.insertion_support.outer = 899 * VIRTCHNL_VLAN_TOGGLE | 900 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 901 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 902 * VIRTCHNL_VLAN_ETHERTYPE_XOR; 903 * 904 * virtchnl_vlan_caps.offloads.ethertype_match = 905 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 906 * 907 * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would 908 * populate the virtchnl_vlan_setting structure in the following manner and send 909 * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the 910 * ethertype for VLAN insertion if it's enabled. So, for completeness, a 911 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent. 912 * 913 * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8; 914 * 915 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on 916 * initialization. 917 */ 918 struct virtchnl_vlan_setting { 919 u32 outer_ethertype_setting; 920 u32 inner_ethertype_setting; 921 u16 vport_id; 922 u8 pad[6]; 923 }; 924 925 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting); 926 927 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE 928 * VF sends VSI id and flags. 929 * PF returns status code in retval. 930 * Note: we assume that broadcast accept mode is always enabled. 931 */ 932 struct virtchnl_promisc_info { 933 u16 vsi_id; 934 u16 flags; 935 }; 936 937 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); 938 939 #define FLAG_VF_UNICAST_PROMISC 0x00000001 940 #define FLAG_VF_MULTICAST_PROMISC 0x00000002 941 942 /* VIRTCHNL_OP_GET_STATS 943 * VF sends this message to request stats for the selected VSI. VF uses 944 * the virtchnl_queue_select struct to specify the VSI. The queue_id 945 * field is ignored by the PF. 946 * 947 * PF replies with struct eth_stats in an external buffer. 948 */ 949 950 /* VIRTCHNL_OP_CONFIG_RSS_KEY 951 * VIRTCHNL_OP_CONFIG_RSS_LUT 952 * VF sends these messages to configure RSS. Only supported if both PF 953 * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during 954 * configuration negotiation. If this is the case, then the RSS fields in 955 * the VF resource struct are valid. 956 * Both the key and LUT are initialized to 0 by the PF, meaning that 957 * RSS is effectively disabled until set up by the VF. 958 */ 959 struct virtchnl_rss_key { 960 u16 vsi_id; 961 u16 key_len; 962 u8 key[]; /* RSS hash key, packed bytes */ 963 }; 964 965 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key); 966 #define virtchnl_rss_key_LEGACY_SIZEOF 6 967 968 struct virtchnl_rss_lut { 969 u16 vsi_id; 970 u16 lut_entries; 971 u8 lut[]; /* RSS lookup table */ 972 }; 973 974 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut); 975 #define virtchnl_rss_lut_LEGACY_SIZEOF 6 976 977 /* VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS 978 * VIRTCHNL_OP_SET_RSS_HASHCFG 979 * VF sends these messages to get and set the hash filter configuration for RSS. 980 * By default, the PF sets these to all possible traffic types that the 981 * hardware supports. The VF can query this value if it wants to change the 982 * traffic types that are hashed by the hardware. 983 */ 984 struct virtchnl_rss_hashcfg { 985 /* Bits defined by enum libie_filter_pctype */ 986 u64 hashcfg; 987 }; 988 989 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hashcfg); 990 991 /* Type of RSS algorithm */ 992 enum virtchnl_rss_algorithm { 993 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0, 994 VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1, 995 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2, 996 VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3, 997 }; 998 999 /* VIRTCHNL_OP_CONFIG_RSS_HFUNC 1000 * VF sends this message to configure the RSS hash function. Only supported 1001 * if both PF and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during 1002 * configuration negotiation. 1003 * The hash function is initialized to VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC 1004 * by the PF. 1005 */ 1006 struct virtchnl_rss_hfunc { 1007 u16 vsi_id; 1008 u16 rss_algorithm; /* enum virtchnl_rss_algorithm */ 1009 u32 reserved; 1010 }; 1011 1012 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hfunc); 1013 1014 /* VIRTCHNL_OP_ENABLE_CHANNELS 1015 * VIRTCHNL_OP_DISABLE_CHANNELS 1016 * VF sends these messages to enable or disable channels based on 1017 * the user specified queue count and queue offset for each traffic class. 1018 * This struct encompasses all the information that the PF needs from 1019 * VF to create a channel. 1020 */ 1021 struct virtchnl_channel_info { 1022 u16 count; /* number of queues in a channel */ 1023 u16 offset; /* queues in a channel start from 'offset' */ 1024 u32 pad; 1025 u64 max_tx_rate; 1026 }; 1027 1028 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); 1029 1030 struct virtchnl_tc_info { 1031 u32 num_tc; 1032 u32 pad; 1033 struct virtchnl_channel_info list[]; 1034 }; 1035 1036 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info); 1037 #define virtchnl_tc_info_LEGACY_SIZEOF 24 1038 1039 /* VIRTCHNL_ADD_CLOUD_FILTER 1040 * VIRTCHNL_DEL_CLOUD_FILTER 1041 * VF sends these messages to add or delete a cloud filter based on the 1042 * user specified match and action filters. These structures encompass 1043 * all the information that the PF needs from the VF to add/delete a 1044 * cloud filter. 1045 */ 1046 1047 struct virtchnl_l4_spec { 1048 u8 src_mac[ETH_ALEN]; 1049 u8 dst_mac[ETH_ALEN]; 1050 __be16 vlan_id; 1051 __be16 pad; /* reserved for future use */ 1052 __be32 src_ip[4]; 1053 __be32 dst_ip[4]; 1054 __be16 src_port; 1055 __be16 dst_port; 1056 }; 1057 1058 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec); 1059 1060 union virtchnl_flow_spec { 1061 struct virtchnl_l4_spec tcp_spec; 1062 u8 buffer[128]; /* reserved for future use */ 1063 }; 1064 1065 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec); 1066 1067 enum virtchnl_action { 1068 /* action types */ 1069 VIRTCHNL_ACTION_DROP = 0, 1070 VIRTCHNL_ACTION_TC_REDIRECT, 1071 VIRTCHNL_ACTION_PASSTHRU, 1072 VIRTCHNL_ACTION_QUEUE, 1073 VIRTCHNL_ACTION_Q_REGION, 1074 VIRTCHNL_ACTION_MARK, 1075 VIRTCHNL_ACTION_COUNT, 1076 }; 1077 1078 enum virtchnl_flow_type { 1079 /* flow types */ 1080 VIRTCHNL_TCP_V4_FLOW = 0, 1081 VIRTCHNL_TCP_V6_FLOW, 1082 }; 1083 1084 struct virtchnl_filter { 1085 union virtchnl_flow_spec data; 1086 union virtchnl_flow_spec mask; 1087 1088 /* see enum virtchnl_flow_type */ 1089 s32 flow_type; 1090 1091 /* see enum virtchnl_action */ 1092 s32 action; 1093 u32 action_meta; 1094 u8 field_flags; 1095 u8 pad[3]; 1096 }; 1097 1098 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); 1099 1100 /* VIRTCHNL_OP_EVENT 1101 * PF sends this message to inform the VF driver of events that may affect it. 1102 * No direct response is expected from the VF, though it may generate other 1103 * messages in response to this one. 1104 */ 1105 enum virtchnl_event_codes { 1106 VIRTCHNL_EVENT_UNKNOWN = 0, 1107 VIRTCHNL_EVENT_LINK_CHANGE, 1108 VIRTCHNL_EVENT_RESET_IMPENDING, 1109 VIRTCHNL_EVENT_PF_DRIVER_CLOSE, 1110 }; 1111 1112 #define PF_EVENT_SEVERITY_INFO 0 1113 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 1114 1115 struct virtchnl_pf_event { 1116 /* see enum virtchnl_event_codes */ 1117 s32 event; 1118 union { 1119 /* If the PF driver does not support the new speed reporting 1120 * capabilities then use link_event else use link_event_adv to 1121 * get the speed and link information. The ability to understand 1122 * new speeds is indicated by setting the capability flag 1123 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter 1124 * in virtchnl_vf_resource struct and can be used to determine 1125 * which link event struct to use below. 1126 */ 1127 struct { 1128 enum virtchnl_link_speed link_speed; 1129 bool link_status; 1130 u8 pad[3]; 1131 } link_event; 1132 struct { 1133 /* link_speed provided in Mbps */ 1134 u32 link_speed; 1135 u8 link_status; 1136 u8 pad[3]; 1137 } link_event_adv; 1138 } event_data; 1139 1140 s32 severity; 1141 }; 1142 1143 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); 1144 1145 /* used to specify if a ceq_idx or aeq_idx is invalid */ 1146 #define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF 1147 /* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP 1148 * VF uses this message to request PF to map RDMA vectors to RDMA queues. 1149 * The request for this originates from the VF RDMA driver through 1150 * a client interface between VF LAN and VF RDMA driver. 1151 * A vector could have an AEQ and CEQ attached to it although 1152 * there is a single AEQ per VF RDMA instance in which case 1153 * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid 1154 * idx for ceqs There will never be a case where there will be multiple CEQs 1155 * attached to a single vector. 1156 * PF configures interrupt mapping and returns status. 1157 */ 1158 1159 struct virtchnl_rdma_qv_info { 1160 u32 v_idx; /* msix_vector */ 1161 u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */ 1162 u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */ 1163 u8 itr_idx; 1164 u8 pad[3]; 1165 }; 1166 1167 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info); 1168 1169 struct virtchnl_rdma_qvlist_info { 1170 u32 num_vectors; 1171 struct virtchnl_rdma_qv_info qv_info[]; 1172 }; 1173 1174 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info); 1175 #define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF 16 1176 1177 /* VF reset states - these are written into the RSTAT register: 1178 * VFGEN_RSTAT on the VF 1179 * When the PF initiates a reset, it writes 0 1180 * When the reset is complete, it writes 1 1181 * When the PF detects that the VF has recovered, it writes 2 1182 * VF checks this register periodically to determine if a reset has occurred, 1183 * then polls it to know when the reset is complete. 1184 * If either the PF or VF reads the register while the hardware 1185 * is in a reset state, it will return DEADBEEF, which, when masked 1186 * will result in 3. 1187 */ 1188 enum virtchnl_vfr_states { 1189 VIRTCHNL_VFR_INPROGRESS = 0, 1190 VIRTCHNL_VFR_COMPLETED, 1191 VIRTCHNL_VFR_VFACTIVE, 1192 }; 1193 1194 #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32 1195 #define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024 1196 #define PROTO_HDR_SHIFT 5 1197 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT) 1198 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1) 1199 1200 /* VF use these macros to configure each protocol header. 1201 * Specify which protocol headers and protocol header fields base on 1202 * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field. 1203 * @param hdr: a struct of virtchnl_proto_hdr 1204 * @param hdr_type: ETH/IPV4/TCP, etc 1205 * @param field: SRC/DST/TEID/SPI, etc 1206 */ 1207 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \ 1208 ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK)) 1209 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \ 1210 ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK)) 1211 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \ 1212 ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK)) 1213 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector) 1214 1215 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \ 1216 (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \ 1217 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field)) 1218 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \ 1219 (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \ 1220 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field)) 1221 1222 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \ 1223 ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type) 1224 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \ 1225 (((hdr)->type) >> PROTO_HDR_SHIFT) 1226 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \ 1227 ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT))) 1228 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \ 1229 (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \ 1230 VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val))) 1231 1232 /* Protocol header type within a packet segment. A segment consists of one or 1233 * more protocol headers that make up a logical group of protocol headers. Each 1234 * logical group of protocol headers encapsulates or is encapsulated using/by 1235 * tunneling or encapsulation protocols for network virtualization. 1236 */ 1237 enum virtchnl_proto_hdr_type { 1238 VIRTCHNL_PROTO_HDR_NONE, 1239 VIRTCHNL_PROTO_HDR_ETH, 1240 VIRTCHNL_PROTO_HDR_S_VLAN, 1241 VIRTCHNL_PROTO_HDR_C_VLAN, 1242 VIRTCHNL_PROTO_HDR_IPV4, 1243 VIRTCHNL_PROTO_HDR_IPV6, 1244 VIRTCHNL_PROTO_HDR_TCP, 1245 VIRTCHNL_PROTO_HDR_UDP, 1246 VIRTCHNL_PROTO_HDR_SCTP, 1247 VIRTCHNL_PROTO_HDR_GTPU_IP, 1248 VIRTCHNL_PROTO_HDR_GTPU_EH, 1249 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, 1250 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, 1251 VIRTCHNL_PROTO_HDR_PPPOE, 1252 VIRTCHNL_PROTO_HDR_L2TPV3, 1253 VIRTCHNL_PROTO_HDR_ESP, 1254 VIRTCHNL_PROTO_HDR_AH, 1255 VIRTCHNL_PROTO_HDR_PFCP, 1256 VIRTCHNL_PROTO_HDR_GTPC, 1257 VIRTCHNL_PROTO_HDR_ECPRI, 1258 VIRTCHNL_PROTO_HDR_L2TPV2, 1259 VIRTCHNL_PROTO_HDR_PPP, 1260 /* IPv4 and IPv6 Fragment header types are only associated to 1261 * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively, 1262 * cannot be used independently. 1263 */ 1264 VIRTCHNL_PROTO_HDR_IPV4_FRAG, 1265 VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, 1266 VIRTCHNL_PROTO_HDR_GRE, 1267 }; 1268 1269 /* Protocol header field within a protocol header. */ 1270 enum virtchnl_proto_hdr_field { 1271 /* ETHER */ 1272 VIRTCHNL_PROTO_HDR_ETH_SRC = 1273 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH), 1274 VIRTCHNL_PROTO_HDR_ETH_DST, 1275 VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, 1276 /* S-VLAN */ 1277 VIRTCHNL_PROTO_HDR_S_VLAN_ID = 1278 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN), 1279 /* C-VLAN */ 1280 VIRTCHNL_PROTO_HDR_C_VLAN_ID = 1281 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN), 1282 /* IPV4 */ 1283 VIRTCHNL_PROTO_HDR_IPV4_SRC = 1284 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4), 1285 VIRTCHNL_PROTO_HDR_IPV4_DST, 1286 VIRTCHNL_PROTO_HDR_IPV4_DSCP, 1287 VIRTCHNL_PROTO_HDR_IPV4_TTL, 1288 VIRTCHNL_PROTO_HDR_IPV4_PROT, 1289 VIRTCHNL_PROTO_HDR_IPV4_CHKSUM, 1290 /* IPV6 */ 1291 VIRTCHNL_PROTO_HDR_IPV6_SRC = 1292 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6), 1293 VIRTCHNL_PROTO_HDR_IPV6_DST, 1294 VIRTCHNL_PROTO_HDR_IPV6_TC, 1295 VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, 1296 VIRTCHNL_PROTO_HDR_IPV6_PROT, 1297 /* IPV6 Prefix */ 1298 VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC, 1299 VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST, 1300 VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC, 1301 VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST, 1302 VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC, 1303 VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST, 1304 VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC, 1305 VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST, 1306 VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC, 1307 VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST, 1308 VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC, 1309 VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST, 1310 /* TCP */ 1311 VIRTCHNL_PROTO_HDR_TCP_SRC_PORT = 1312 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP), 1313 VIRTCHNL_PROTO_HDR_TCP_DST_PORT, 1314 VIRTCHNL_PROTO_HDR_TCP_CHKSUM, 1315 /* UDP */ 1316 VIRTCHNL_PROTO_HDR_UDP_SRC_PORT = 1317 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP), 1318 VIRTCHNL_PROTO_HDR_UDP_DST_PORT, 1319 VIRTCHNL_PROTO_HDR_UDP_CHKSUM, 1320 /* SCTP */ 1321 VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT = 1322 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP), 1323 VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, 1324 VIRTCHNL_PROTO_HDR_SCTP_CHKSUM, 1325 /* GTPU_IP */ 1326 VIRTCHNL_PROTO_HDR_GTPU_IP_TEID = 1327 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP), 1328 /* GTPU_EH */ 1329 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU = 1330 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH), 1331 VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, 1332 /* PPPOE */ 1333 VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID = 1334 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE), 1335 /* L2TPV3 */ 1336 VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID = 1337 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3), 1338 /* ESP */ 1339 VIRTCHNL_PROTO_HDR_ESP_SPI = 1340 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP), 1341 /* AH */ 1342 VIRTCHNL_PROTO_HDR_AH_SPI = 1343 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH), 1344 /* PFCP */ 1345 VIRTCHNL_PROTO_HDR_PFCP_S_FIELD = 1346 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP), 1347 VIRTCHNL_PROTO_HDR_PFCP_SEID, 1348 /* GTPC */ 1349 VIRTCHNL_PROTO_HDR_GTPC_TEID = 1350 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC), 1351 /* ECPRI */ 1352 VIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE = 1353 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI), 1354 VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID, 1355 /* IPv4 Dummy Fragment */ 1356 VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID = 1357 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG), 1358 /* IPv6 Extension Fragment */ 1359 VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID = 1360 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG), 1361 /* GTPU_DWN/UP */ 1362 VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI = 1363 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN), 1364 VIRTCHNL_PROTO_HDR_GTPU_UP_QFI = 1365 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP), 1366 /* L2TPv2 */ 1367 VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID = 1368 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2), 1369 VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID, 1370 }; 1371 1372 struct virtchnl_proto_hdr { 1373 /* see enum virtchnl_proto_hdr_type */ 1374 s32 type; 1375 u32 field_selector; /* a bit mask to select field for header type */ 1376 u8 buffer[64]; 1377 /** 1378 * binary buffer in network order for specific header type. 1379 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4 1380 * header is expected to be copied into the buffer. 1381 */ 1382 }; 1383 1384 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr); 1385 1386 struct virtchnl_proto_hdrs { 1387 u8 tunnel_level; 1388 u8 pad[3]; 1389 /** 1390 * specify where protocol header start from. 1391 * must be 0 when sending a raw packet request. 1392 * 0 - from the outer layer 1393 * 1 - from the first inner layer 1394 * 2 - from the second inner layer 1395 * .... 1396 **/ 1397 u32 count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ 1398 union { 1399 struct virtchnl_proto_hdr 1400 proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; 1401 struct { 1402 u16 pkt_len; 1403 u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET]; 1404 u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET]; 1405 } raw; 1406 }; 1407 }; 1408 1409 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs); 1410 1411 struct virtchnl_rss_cfg { 1412 struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */ 1413 1414 /* see enum virtchnl_rss_algorithm; rss algorithm type */ 1415 s32 rss_algorithm; 1416 u8 reserved[128]; /* reserve for future */ 1417 }; 1418 1419 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg); 1420 1421 /* action configuration for FDIR */ 1422 struct virtchnl_filter_action { 1423 /* see enum virtchnl_action type */ 1424 s32 type; 1425 union { 1426 /* used for queue and qgroup action */ 1427 struct { 1428 u16 index; 1429 u8 region; 1430 } queue; 1431 /* used for count action */ 1432 struct { 1433 /* share counter ID with other flow rules */ 1434 u8 shared; 1435 u32 id; /* counter ID */ 1436 } count; 1437 /* used for mark action */ 1438 u32 mark_id; 1439 u8 reserve[32]; 1440 } act_conf; 1441 }; 1442 1443 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action); 1444 1445 #define VIRTCHNL_MAX_NUM_ACTIONS 8 1446 1447 struct virtchnl_filter_action_set { 1448 /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */ 1449 u32 count; 1450 struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS]; 1451 }; 1452 1453 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set); 1454 1455 /* pattern and action for FDIR rule */ 1456 struct virtchnl_fdir_rule { 1457 struct virtchnl_proto_hdrs proto_hdrs; 1458 struct virtchnl_filter_action_set action_set; 1459 }; 1460 1461 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule); 1462 1463 /* Status returned to VF after VF requests FDIR commands 1464 * VIRTCHNL_FDIR_SUCCESS 1465 * VF FDIR related request is successfully done by PF 1466 * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER. 1467 * 1468 * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE 1469 * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource. 1470 * 1471 * VIRTCHNL_FDIR_FAILURE_RULE_EXIST 1472 * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed. 1473 * 1474 * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT 1475 * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule. 1476 * 1477 * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST 1478 * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist. 1479 * 1480 * VIRTCHNL_FDIR_FAILURE_RULE_INVALID 1481 * OP_ADD_FDIR_FILTER request is failed due to parameters validation 1482 * or HW doesn't support. 1483 * 1484 * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT 1485 * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out 1486 * for programming. 1487 * 1488 * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID 1489 * OP_QUERY_FDIR_FILTER request is failed due to parameters validation, 1490 * for example, VF query counter of a rule who has no counter action. 1491 */ 1492 enum virtchnl_fdir_prgm_status { 1493 VIRTCHNL_FDIR_SUCCESS = 0, 1494 VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE, 1495 VIRTCHNL_FDIR_FAILURE_RULE_EXIST, 1496 VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT, 1497 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST, 1498 VIRTCHNL_FDIR_FAILURE_RULE_INVALID, 1499 VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT, 1500 VIRTCHNL_FDIR_FAILURE_QUERY_INVALID, 1501 }; 1502 1503 /* VIRTCHNL_OP_ADD_FDIR_FILTER 1504 * VF sends this request to PF by filling out vsi_id, 1505 * validate_only and rule_cfg. PF will return flow_id 1506 * if the request is successfully done and return add_status to VF. 1507 */ 1508 struct virtchnl_fdir_add { 1509 u16 vsi_id; /* INPUT */ 1510 /* 1511 * 1 for validating a fdir rule, 0 for creating a fdir rule. 1512 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER. 1513 */ 1514 u16 validate_only; /* INPUT */ 1515 u32 flow_id; /* OUTPUT */ 1516 struct virtchnl_fdir_rule rule_cfg; /* INPUT */ 1517 1518 /* see enum virtchnl_fdir_prgm_status; OUTPUT */ 1519 s32 status; 1520 }; 1521 1522 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add); 1523 1524 /* VIRTCHNL_OP_DEL_FDIR_FILTER 1525 * VF sends this request to PF by filling out vsi_id 1526 * and flow_id. PF will return del_status to VF. 1527 */ 1528 struct virtchnl_fdir_del { 1529 u16 vsi_id; /* INPUT */ 1530 u16 pad; 1531 u32 flow_id; /* INPUT */ 1532 1533 /* see enum virtchnl_fdir_prgm_status; OUTPUT */ 1534 s32 status; 1535 }; 1536 1537 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); 1538 1539 #define VIRTCHNL_1588_PTP_CAP_RX_TSTAMP BIT(1) 1540 #define VIRTCHNL_1588_PTP_CAP_READ_PHC BIT(2) 1541 1542 /** 1543 * struct virtchnl_ptp_caps - Defines the PTP caps available to the VF. 1544 * @caps: On send, VF sets what capabilities it requests. On reply, PF 1545 * indicates what has been enabled for this VF. The PF shall not set 1546 * bits which were not requested by the VF. 1547 * @rsvd: Reserved bits for future extension. 1548 * 1549 * Structure that defines the PTP capabilities available to the VF. The VF 1550 * sends VIRTCHNL_OP_1588_PTP_GET_CAPS, and must fill in the ptp_caps field 1551 * indicating what capabilities it is requesting. The PF will respond with the 1552 * same message with the virtchnl_ptp_caps structure indicating what is 1553 * enabled for the VF. 1554 * 1555 * VIRTCHNL_1588_PTP_CAP_RX_TSTAMP indicates that the VF receive queues have 1556 * receive timestamps enabled in the flexible descriptors. Note that this 1557 * requires a VF to also negotiate to enable advanced flexible descriptors in 1558 * the receive path instead of the default legacy descriptor format. 1559 * 1560 * VIRTCHNL_1588_PTP_CAP_READ_PHC indicates that the VF may read the PHC time 1561 * via the VIRTCHNL_OP_1588_PTP_GET_TIME command. 1562 * 1563 * Note that in the future, additional capability flags may be added which 1564 * indicate additional extended support. All fields marked as reserved by this 1565 * header will be set to zero. VF implementations should verify this to ensure 1566 * that future extensions do not break compatibility. 1567 */ 1568 struct virtchnl_ptp_caps { 1569 u32 caps; 1570 u8 rsvd[44]; 1571 }; 1572 1573 VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_ptp_caps); 1574 1575 /** 1576 * struct virtchnl_phc_time - Contains the 64bits of PHC clock time in ns. 1577 * @time: PHC time in nanoseconds 1578 * @rsvd: Reserved for future extension 1579 * 1580 * Structure received with VIRTCHNL_OP_1588_PTP_GET_TIME. Contains the 64bits 1581 * of PHC clock time in nanoseconds. 1582 * 1583 * VIRTCHNL_OP_1588_PTP_GET_TIME may be sent to request the current time of 1584 * the PHC. This op is available in case direct access via the PHC registers 1585 * is not available. 1586 */ 1587 struct virtchnl_phc_time { 1588 u64 time; 1589 u8 rsvd[8]; 1590 }; 1591 1592 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_time); 1593 1594 struct virtchnl_shaper_bw { 1595 /* Unit is Kbps */ 1596 u32 committed; 1597 u32 peak; 1598 }; 1599 1600 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw); 1601 1602 /* VIRTCHNL_OP_GET_QOS_CAPS 1603 * VF sends this message to get its QoS Caps, such as 1604 * TC number, Arbiter and Bandwidth. 1605 */ 1606 struct virtchnl_qos_cap_elem { 1607 u8 tc_num; 1608 u8 tc_prio; 1609 #define VIRTCHNL_ABITER_STRICT 0 1610 #define VIRTCHNL_ABITER_ETS 2 1611 u8 arbiter; 1612 #define VIRTCHNL_STRICT_WEIGHT 1 1613 u8 weight; 1614 enum virtchnl_bw_limit_type type; 1615 union { 1616 struct virtchnl_shaper_bw shaper; 1617 u8 pad2[32]; 1618 }; 1619 }; 1620 1621 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem); 1622 1623 struct virtchnl_qos_cap_list { 1624 u16 vsi_id; 1625 u16 num_elem; 1626 struct virtchnl_qos_cap_elem cap[]; 1627 }; 1628 1629 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_qos_cap_list); 1630 #define virtchnl_qos_cap_list_LEGACY_SIZEOF 44 1631 1632 /* VIRTCHNL_OP_CONFIG_QUEUE_BW */ 1633 struct virtchnl_queue_bw { 1634 u16 queue_id; 1635 u8 tc; 1636 u8 pad; 1637 struct virtchnl_shaper_bw shaper; 1638 }; 1639 1640 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw); 1641 1642 struct virtchnl_queues_bw_cfg { 1643 u16 vsi_id; 1644 u16 num_queues; 1645 struct virtchnl_queue_bw cfg[]; 1646 }; 1647 1648 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_queues_bw_cfg); 1649 #define virtchnl_queues_bw_cfg_LEGACY_SIZEOF 16 1650 1651 enum virtchnl_queue_type { 1652 VIRTCHNL_QUEUE_TYPE_TX = 0, 1653 VIRTCHNL_QUEUE_TYPE_RX = 1, 1654 }; 1655 1656 /* structure to specify a chunk of contiguous queues */ 1657 struct virtchnl_queue_chunk { 1658 /* see enum virtchnl_queue_type */ 1659 s32 type; 1660 u16 start_queue_id; 1661 u16 num_queues; 1662 }; 1663 1664 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk); 1665 1666 struct virtchnl_quanta_cfg { 1667 u16 quanta_size; 1668 u16 pad; 1669 struct virtchnl_queue_chunk queue_select; 1670 }; 1671 1672 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg); 1673 1674 #define __vss_byone(p, member, count, old) \ 1675 (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0))) 1676 1677 #define __vss_byelem(p, member, count, old) \ 1678 (struct_size(p, member, count - 1) + (old - struct_size(p, member, 0))) 1679 1680 #define __vss_full(p, member, count, old) \ 1681 (struct_size(p, member, count) + (old - struct_size(p, member, 0))) 1682 1683 #define __vss(type, func, p, member, count) \ 1684 struct type: func(p, member, count, type##_LEGACY_SIZEOF) 1685 1686 #define virtchnl_struct_size(p, m, c) \ 1687 _Generic(*p, \ 1688 __vss(virtchnl_vf_resource, __vss_full, p, m, c), \ 1689 __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c), \ 1690 __vss(virtchnl_irq_map_info, __vss_full, p, m, c), \ 1691 __vss(virtchnl_ether_addr_list, __vss_full, p, m, c), \ 1692 __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c), \ 1693 __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \ 1694 __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \ 1695 __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \ 1696 __vss(virtchnl_qos_cap_list, __vss_byelem, p, m, c), \ 1697 __vss(virtchnl_queues_bw_cfg, __vss_byelem, p, m, c), \ 1698 __vss(virtchnl_rss_key, __vss_byone, p, m, c), \ 1699 __vss(virtchnl_rss_lut, __vss_byone, p, m, c)) 1700 1701 /** 1702 * virtchnl_vc_validate_vf_msg 1703 * @ver: Virtchnl version info 1704 * @v_opcode: Opcode for the message 1705 * @msg: pointer to the msg buffer 1706 * @msglen: msg length 1707 * 1708 * validate msg format against struct for each opcode 1709 */ 1710 static inline int 1711 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, 1712 u8 *msg, u16 msglen) 1713 { 1714 bool err_msg_format = false; 1715 u32 valid_len = 0; 1716 1717 /* Validate message length. */ 1718 switch (v_opcode) { 1719 case VIRTCHNL_OP_VERSION: 1720 valid_len = sizeof(struct virtchnl_version_info); 1721 break; 1722 case VIRTCHNL_OP_RESET_VF: 1723 break; 1724 case VIRTCHNL_OP_GET_VF_RESOURCES: 1725 if (VF_IS_V11(ver)) 1726 valid_len = sizeof(u32); 1727 break; 1728 case VIRTCHNL_OP_CONFIG_TX_QUEUE: 1729 valid_len = sizeof(struct virtchnl_txq_info); 1730 break; 1731 case VIRTCHNL_OP_CONFIG_RX_QUEUE: 1732 valid_len = sizeof(struct virtchnl_rxq_info); 1733 break; 1734 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 1735 valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF; 1736 if (msglen >= valid_len) { 1737 struct virtchnl_vsi_queue_config_info *vqc = 1738 (struct virtchnl_vsi_queue_config_info *)msg; 1739 valid_len = virtchnl_struct_size(vqc, qpair, 1740 vqc->num_queue_pairs); 1741 if (vqc->num_queue_pairs == 0) 1742 err_msg_format = true; 1743 } 1744 break; 1745 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 1746 valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF; 1747 if (msglen >= valid_len) { 1748 struct virtchnl_irq_map_info *vimi = 1749 (struct virtchnl_irq_map_info *)msg; 1750 valid_len = virtchnl_struct_size(vimi, vecmap, 1751 vimi->num_vectors); 1752 if (vimi->num_vectors == 0) 1753 err_msg_format = true; 1754 } 1755 break; 1756 case VIRTCHNL_OP_ENABLE_QUEUES: 1757 case VIRTCHNL_OP_DISABLE_QUEUES: 1758 valid_len = sizeof(struct virtchnl_queue_select); 1759 break; 1760 case VIRTCHNL_OP_ADD_ETH_ADDR: 1761 case VIRTCHNL_OP_DEL_ETH_ADDR: 1762 valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF; 1763 if (msglen >= valid_len) { 1764 struct virtchnl_ether_addr_list *veal = 1765 (struct virtchnl_ether_addr_list *)msg; 1766 valid_len = virtchnl_struct_size(veal, list, 1767 veal->num_elements); 1768 if (veal->num_elements == 0) 1769 err_msg_format = true; 1770 } 1771 break; 1772 case VIRTCHNL_OP_ADD_VLAN: 1773 case VIRTCHNL_OP_DEL_VLAN: 1774 valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF; 1775 if (msglen >= valid_len) { 1776 struct virtchnl_vlan_filter_list *vfl = 1777 (struct virtchnl_vlan_filter_list *)msg; 1778 valid_len = virtchnl_struct_size(vfl, vlan_id, 1779 vfl->num_elements); 1780 if (vfl->num_elements == 0) 1781 err_msg_format = true; 1782 } 1783 break; 1784 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 1785 valid_len = sizeof(struct virtchnl_promisc_info); 1786 break; 1787 case VIRTCHNL_OP_GET_STATS: 1788 valid_len = sizeof(struct virtchnl_queue_select); 1789 break; 1790 case VIRTCHNL_OP_RDMA: 1791 /* These messages are opaque to us and will be validated in 1792 * the RDMA client code. We just need to check for nonzero 1793 * length. The firmware will enforce max length restrictions. 1794 */ 1795 if (msglen) 1796 valid_len = msglen; 1797 else 1798 err_msg_format = true; 1799 break; 1800 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP: 1801 break; 1802 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP: 1803 valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF; 1804 if (msglen >= valid_len) { 1805 struct virtchnl_rdma_qvlist_info *qv = 1806 (struct virtchnl_rdma_qvlist_info *)msg; 1807 1808 valid_len = virtchnl_struct_size(qv, qv_info, 1809 qv->num_vectors); 1810 } 1811 break; 1812 case VIRTCHNL_OP_CONFIG_RSS_KEY: 1813 valid_len = virtchnl_rss_key_LEGACY_SIZEOF; 1814 if (msglen >= valid_len) { 1815 struct virtchnl_rss_key *vrk = 1816 (struct virtchnl_rss_key *)msg; 1817 valid_len = virtchnl_struct_size(vrk, key, 1818 vrk->key_len); 1819 } 1820 break; 1821 case VIRTCHNL_OP_CONFIG_RSS_LUT: 1822 valid_len = virtchnl_rss_lut_LEGACY_SIZEOF; 1823 if (msglen >= valid_len) { 1824 struct virtchnl_rss_lut *vrl = 1825 (struct virtchnl_rss_lut *)msg; 1826 valid_len = virtchnl_struct_size(vrl, lut, 1827 vrl->lut_entries); 1828 } 1829 break; 1830 case VIRTCHNL_OP_CONFIG_RSS_HFUNC: 1831 valid_len = sizeof(struct virtchnl_rss_hfunc); 1832 break; 1833 case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: 1834 break; 1835 case VIRTCHNL_OP_SET_RSS_HASHCFG: 1836 valid_len = sizeof(struct virtchnl_rss_hashcfg); 1837 break; 1838 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 1839 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 1840 break; 1841 case VIRTCHNL_OP_REQUEST_QUEUES: 1842 valid_len = sizeof(struct virtchnl_vf_res_request); 1843 break; 1844 case VIRTCHNL_OP_ENABLE_CHANNELS: 1845 valid_len = virtchnl_tc_info_LEGACY_SIZEOF; 1846 if (msglen >= valid_len) { 1847 struct virtchnl_tc_info *vti = 1848 (struct virtchnl_tc_info *)msg; 1849 valid_len = virtchnl_struct_size(vti, list, 1850 vti->num_tc); 1851 if (vti->num_tc == 0) 1852 err_msg_format = true; 1853 } 1854 break; 1855 case VIRTCHNL_OP_DISABLE_CHANNELS: 1856 break; 1857 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 1858 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 1859 valid_len = sizeof(struct virtchnl_filter); 1860 break; 1861 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: 1862 break; 1863 case VIRTCHNL_OP_ADD_RSS_CFG: 1864 case VIRTCHNL_OP_DEL_RSS_CFG: 1865 valid_len = sizeof(struct virtchnl_rss_cfg); 1866 break; 1867 case VIRTCHNL_OP_ADD_FDIR_FILTER: 1868 valid_len = sizeof(struct virtchnl_fdir_add); 1869 break; 1870 case VIRTCHNL_OP_DEL_FDIR_FILTER: 1871 valid_len = sizeof(struct virtchnl_fdir_del); 1872 break; 1873 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: 1874 break; 1875 case VIRTCHNL_OP_ADD_VLAN_V2: 1876 case VIRTCHNL_OP_DEL_VLAN_V2: 1877 valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF; 1878 if (msglen >= valid_len) { 1879 struct virtchnl_vlan_filter_list_v2 *vfl = 1880 (struct virtchnl_vlan_filter_list_v2 *)msg; 1881 1882 valid_len = virtchnl_struct_size(vfl, filters, 1883 vfl->num_elements); 1884 1885 if (vfl->num_elements == 0) { 1886 err_msg_format = true; 1887 break; 1888 } 1889 } 1890 break; 1891 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 1892 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 1893 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 1894 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 1895 valid_len = sizeof(struct virtchnl_vlan_setting); 1896 break; 1897 case VIRTCHNL_OP_GET_QOS_CAPS: 1898 break; 1899 case VIRTCHNL_OP_CONFIG_QUEUE_BW: 1900 valid_len = virtchnl_queues_bw_cfg_LEGACY_SIZEOF; 1901 if (msglen >= valid_len) { 1902 struct virtchnl_queues_bw_cfg *q_bw = 1903 (struct virtchnl_queues_bw_cfg *)msg; 1904 1905 valid_len = virtchnl_struct_size(q_bw, cfg, 1906 q_bw->num_queues); 1907 if (q_bw->num_queues == 0) { 1908 err_msg_format = true; 1909 break; 1910 } 1911 } 1912 break; 1913 case VIRTCHNL_OP_CONFIG_QUANTA: 1914 valid_len = sizeof(struct virtchnl_quanta_cfg); 1915 if (msglen >= valid_len) { 1916 struct virtchnl_quanta_cfg *q_quanta = 1917 (struct virtchnl_quanta_cfg *)msg; 1918 1919 if (q_quanta->quanta_size == 0 || 1920 q_quanta->queue_select.num_queues == 0) { 1921 err_msg_format = true; 1922 break; 1923 } 1924 } 1925 break; 1926 case VIRTCHNL_OP_1588_PTP_GET_CAPS: 1927 valid_len = sizeof(struct virtchnl_ptp_caps); 1928 break; 1929 case VIRTCHNL_OP_1588_PTP_GET_TIME: 1930 valid_len = sizeof(struct virtchnl_phc_time); 1931 break; 1932 /* These are always errors coming from the VF. */ 1933 case VIRTCHNL_OP_EVENT: 1934 case VIRTCHNL_OP_UNKNOWN: 1935 default: 1936 return VIRTCHNL_STATUS_ERR_PARAM; 1937 } 1938 /* few more checks */ 1939 if (err_msg_format || valid_len != msglen) 1940 return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; 1941 1942 return 0; 1943 } 1944 #endif /* _VIRTCHNL_H_ */ 1945