1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2024, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #ifndef _VIRTCHNL_H_ 33 #define _VIRTCHNL_H_ 34 35 /* Description: 36 * This header file describes the Virtual Function (VF) - Physical Function 37 * (PF) communication protocol used by the drivers for all devices starting 38 * from our 40G product line 39 * 40 * Admin queue buffer usage: 41 * desc->opcode is always aqc_opc_send_msg_to_pf 42 * flags, retval, datalen, and data addr are all used normally. 43 * The Firmware copies the cookie fields when sending messages between the 44 * PF and VF, but uses all other fields internally. Due to this limitation, 45 * we must send all messages as "indirect", i.e. using an external buffer. 46 * 47 * All the VSI indexes are relative to the VF. Each VF can have maximum of 48 * three VSIs. All the queue indexes are relative to the VSI. Each VF can 49 * have a maximum of sixteen queues for all of its VSIs. 50 * 51 * The PF is required to return a status code in v_retval for all messages 52 * except RESET_VF, which does not require any response. The returned value 53 * is of virtchnl_status_code type, defined here. 54 * 55 * In general, VF driver initialization should roughly follow the order of 56 * these opcodes. The VF driver must first validate the API version of the 57 * PF driver, then request a reset, then get resources, then configure 58 * queues and interrupts. After these operations are complete, the VF 59 * driver may start its queues, optionally add MAC and VLAN filters, and 60 * process traffic. 61 */ 62 63 /* START GENERIC DEFINES 64 * Need to ensure the following enums and defines hold the same meaning and 65 * value in current and future projects 66 */ 67 68 #define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6 69 70 /* These macros are used to generate compilation errors if a structure/union 71 * is not exactly the correct length. It gives a divide by zero error if the 72 * structure/union is not of the correct size, otherwise it creates an enum 73 * that is never used. 74 */ 75 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ 76 { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } 77 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ 78 { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } 79 80 /* Error Codes 81 * Note that many older versions of various iAVF drivers convert the reported 82 * status code directly into an iavf_status enumeration. For this reason, it 83 * is important that the values of these enumerations line up. 84 */ 85 enum virtchnl_status_code { 86 VIRTCHNL_STATUS_SUCCESS = 0, 87 VIRTCHNL_STATUS_ERR_PARAM = -5, 88 VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, 89 VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, 90 VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, 91 VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, 92 VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, 93 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, 94 }; 95 96 /* Backward compatibility */ 97 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM 98 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED 99 100 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 101 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 102 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 103 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 104 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 105 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 106 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 107 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 108 109 enum virtchnl_link_speed { 110 VIRTCHNL_LINK_SPEED_UNKNOWN = 0, 111 VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), 112 VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), 113 VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), 114 VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), 115 VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), 116 VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), 117 VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), 118 VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), 119 }; 120 121 /* for hsplit_0 field of Rx HMC context */ 122 /* deprecated with AVF 1.0 */ 123 enum virtchnl_rx_hsplit { 124 VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, 125 VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, 126 VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, 127 VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, 128 VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, 129 }; 130 131 enum virtchnl_bw_limit_type { 132 VIRTCHNL_BW_SHAPER = 0, 133 }; 134 /* END GENERIC DEFINES */ 135 136 /* Opcodes for VF-PF communication. These are placed in the v_opcode field 137 * of the virtchnl_msg structure. 138 */ 139 enum virtchnl_ops { 140 /* The PF sends status change events to VFs using 141 * the VIRTCHNL_OP_EVENT opcode. 142 * VFs send requests to the PF using the other ops. 143 * Use of "advanced opcode" features must be negotiated as part of capabilities 144 * exchange and are not considered part of base mode feature set. 145 * 146 */ 147 VIRTCHNL_OP_UNKNOWN = 0, 148 VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ 149 VIRTCHNL_OP_RESET_VF = 2, 150 VIRTCHNL_OP_GET_VF_RESOURCES = 3, 151 VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, 152 VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, 153 VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, 154 VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, 155 VIRTCHNL_OP_ENABLE_QUEUES = 8, 156 VIRTCHNL_OP_DISABLE_QUEUES = 9, 157 VIRTCHNL_OP_ADD_ETH_ADDR = 10, 158 VIRTCHNL_OP_DEL_ETH_ADDR = 11, 159 VIRTCHNL_OP_ADD_VLAN = 12, 160 VIRTCHNL_OP_DEL_VLAN = 13, 161 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, 162 VIRTCHNL_OP_GET_STATS = 15, 163 VIRTCHNL_OP_RSVD = 16, 164 VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ 165 /* opcode 19 is reserved */ 166 /* opcodes 20, 21, and 22 are reserved */ 167 VIRTCHNL_OP_CONFIG_RSS_KEY = 23, 168 VIRTCHNL_OP_CONFIG_RSS_LUT = 24, 169 VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, 170 VIRTCHNL_OP_SET_RSS_HENA = 26, 171 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, 172 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, 173 VIRTCHNL_OP_REQUEST_QUEUES = 29, 174 VIRTCHNL_OP_ENABLE_CHANNELS = 30, 175 VIRTCHNL_OP_DISABLE_CHANNELS = 31, 176 VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, 177 VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, 178 /* opcode 34 is reserved */ 179 /* opcodes 38, 39, 40, 41, 42 and 43 are reserved */ 180 /* opcode 44 is reserved */ 181 VIRTCHNL_OP_ADD_RSS_CFG = 45, 182 VIRTCHNL_OP_DEL_RSS_CFG = 46, 183 VIRTCHNL_OP_ADD_FDIR_FILTER = 47, 184 VIRTCHNL_OP_DEL_FDIR_FILTER = 48, 185 VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50, 186 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51, 187 VIRTCHNL_OP_ADD_VLAN_V2 = 52, 188 VIRTCHNL_OP_DEL_VLAN_V2 = 53, 189 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54, 190 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55, 191 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56, 192 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57, 193 VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58, 194 VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59, 195 /* opcodes 60 through 65 are reserved */ 196 VIRTCHNL_OP_GET_QOS_CAPS = 66, 197 VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67, 198 /* opcode 68 through 70 are reserved */ 199 VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107, 200 VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108, 201 VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111, 202 VIRTCHNL_OP_CONFIG_QUEUE_BW = 112, 203 VIRTCHNL_OP_CONFIG_QUANTA = 113, 204 VIRTCHNL_OP_FLOW_SUBSCRIBE = 114, 205 VIRTCHNL_OP_FLOW_UNSUBSCRIBE = 115, 206 /* opcode 116 through 130 are reserved */ 207 VIRTCHNL_OP_MAX, 208 }; 209 210 static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode) 211 { 212 switch (v_opcode) { 213 case VIRTCHNL_OP_UNKNOWN: 214 return "VIRTCHNL_OP_UNKNOWN"; 215 case VIRTCHNL_OP_VERSION: 216 return "VIRTCHNL_OP_VERSION"; 217 case VIRTCHNL_OP_RESET_VF: 218 return "VIRTCHNL_OP_RESET_VF"; 219 case VIRTCHNL_OP_GET_VF_RESOURCES: 220 return "VIRTCHNL_OP_GET_VF_RESOURCES"; 221 case VIRTCHNL_OP_CONFIG_TX_QUEUE: 222 return "VIRTCHNL_OP_CONFIG_TX_QUEUE"; 223 case VIRTCHNL_OP_CONFIG_RX_QUEUE: 224 return "VIRTCHNL_OP_CONFIG_RX_QUEUE"; 225 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 226 return "VIRTCHNL_OP_CONFIG_VSI_QUEUES"; 227 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 228 return "VIRTCHNL_OP_CONFIG_IRQ_MAP"; 229 case VIRTCHNL_OP_ENABLE_QUEUES: 230 return "VIRTCHNL_OP_ENABLE_QUEUES"; 231 case VIRTCHNL_OP_DISABLE_QUEUES: 232 return "VIRTCHNL_OP_DISABLE_QUEUES"; 233 case VIRTCHNL_OP_ADD_ETH_ADDR: 234 return "VIRTCHNL_OP_ADD_ETH_ADDR"; 235 case VIRTCHNL_OP_DEL_ETH_ADDR: 236 return "VIRTCHNL_OP_DEL_ETH_ADDR"; 237 case VIRTCHNL_OP_ADD_VLAN: 238 return "VIRTCHNL_OP_ADD_VLAN"; 239 case VIRTCHNL_OP_DEL_VLAN: 240 return "VIRTCHNL_OP_DEL_VLAN"; 241 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 242 return "VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE"; 243 case VIRTCHNL_OP_GET_STATS: 244 return "VIRTCHNL_OP_GET_STATS"; 245 case VIRTCHNL_OP_RSVD: 246 return "VIRTCHNL_OP_RSVD"; 247 case VIRTCHNL_OP_EVENT: 248 return "VIRTCHNL_OP_EVENT"; 249 case VIRTCHNL_OP_CONFIG_RSS_KEY: 250 return "VIRTCHNL_OP_CONFIG_RSS_KEY"; 251 case VIRTCHNL_OP_CONFIG_RSS_LUT: 252 return "VIRTCHNL_OP_CONFIG_RSS_LUT"; 253 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 254 return "VIRTCHNL_OP_GET_RSS_HENA_CAPS"; 255 case VIRTCHNL_OP_SET_RSS_HENA: 256 return "VIRTCHNL_OP_SET_RSS_HENA"; 257 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 258 return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING"; 259 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 260 return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING"; 261 case VIRTCHNL_OP_REQUEST_QUEUES: 262 return "VIRTCHNL_OP_REQUEST_QUEUES"; 263 case VIRTCHNL_OP_ENABLE_CHANNELS: 264 return "VIRTCHNL_OP_ENABLE_CHANNELS"; 265 case VIRTCHNL_OP_DISABLE_CHANNELS: 266 return "VIRTCHNL_OP_DISABLE_CHANNELS"; 267 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 268 return "VIRTCHNL_OP_ADD_CLOUD_FILTER"; 269 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 270 return "VIRTCHNL_OP_DEL_CLOUD_FILTER"; 271 case VIRTCHNL_OP_ADD_RSS_CFG: 272 return "VIRTCHNL_OP_ADD_RSS_CFG"; 273 case VIRTCHNL_OP_DEL_RSS_CFG: 274 return "VIRTCHNL_OP_DEL_RSS_CFG"; 275 case VIRTCHNL_OP_ADD_FDIR_FILTER: 276 return "VIRTCHNL_OP_ADD_FDIR_FILTER"; 277 case VIRTCHNL_OP_DEL_FDIR_FILTER: 278 return "VIRTCHNL_OP_DEL_FDIR_FILTER"; 279 case VIRTCHNL_OP_GET_MAX_RSS_QREGION: 280 return "VIRTCHNL_OP_GET_MAX_RSS_QREGION"; 281 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: 282 return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS"; 283 case VIRTCHNL_OP_ADD_VLAN_V2: 284 return "VIRTCHNL_OP_ADD_VLAN_V2"; 285 case VIRTCHNL_OP_DEL_VLAN_V2: 286 return "VIRTCHNL_OP_DEL_VLAN_V2"; 287 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 288 return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2"; 289 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 290 return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2"; 291 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 292 return "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2"; 293 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 294 return "VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2"; 295 case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2: 296 return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2"; 297 case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2: 298 return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2"; 299 case VIRTCHNL_OP_ENABLE_QUEUES_V2: 300 return "VIRTCHNL_OP_ENABLE_QUEUES_V2"; 301 case VIRTCHNL_OP_DISABLE_QUEUES_V2: 302 return "VIRTCHNL_OP_DISABLE_QUEUES_V2"; 303 case VIRTCHNL_OP_MAP_QUEUE_VECTOR: 304 return "VIRTCHNL_OP_MAP_QUEUE_VECTOR"; 305 case VIRTCHNL_OP_FLOW_SUBSCRIBE: 306 return "VIRTCHNL_OP_FLOW_SUBSCRIBE"; 307 case VIRTCHNL_OP_FLOW_UNSUBSCRIBE: 308 return "VIRTCHNL_OP_FLOW_UNSUBSCRIBE"; 309 case VIRTCHNL_OP_MAX: 310 return "VIRTCHNL_OP_MAX"; 311 default: 312 return "Unsupported (update virtchnl.h)"; 313 } 314 } 315 316 static inline const char *virtchnl_stat_str(enum virtchnl_status_code v_status) 317 { 318 switch (v_status) { 319 case VIRTCHNL_STATUS_SUCCESS: 320 return "VIRTCHNL_STATUS_SUCCESS"; 321 case VIRTCHNL_STATUS_ERR_PARAM: 322 return "VIRTCHNL_STATUS_ERR_PARAM"; 323 case VIRTCHNL_STATUS_ERR_NO_MEMORY: 324 return "VIRTCHNL_STATUS_ERR_NO_MEMORY"; 325 case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: 326 return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH"; 327 case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: 328 return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR"; 329 case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: 330 return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID"; 331 case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: 332 return "VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR"; 333 case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: 334 return "VIRTCHNL_STATUS_ERR_NOT_SUPPORTED"; 335 default: 336 return "Unknown status code (update virtchnl.h)"; 337 } 338 } 339 340 /* Virtual channel message descriptor. This overlays the admin queue 341 * descriptor. All other data is passed in external buffers. 342 */ 343 344 struct virtchnl_msg { 345 u8 pad[8]; /* AQ flags/opcode/len/retval fields */ 346 347 /* avoid confusion with desc->opcode */ 348 enum virtchnl_ops v_opcode; 349 350 /* ditto for desc->retval */ 351 enum virtchnl_status_code v_retval; 352 u32 vfid; /* used by PF when sending to VF */ 353 }; 354 355 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); 356 357 /* Message descriptions and data structures. */ 358 359 /* VIRTCHNL_OP_VERSION 360 * VF posts its version number to the PF. PF responds with its version number 361 * in the same format, along with a return code. 362 * Reply from PF has its major/minor versions also in param0 and param1. 363 * If there is a major version mismatch, then the VF cannot operate. 364 * If there is a minor version mismatch, then the VF can operate but should 365 * add a warning to the system log. 366 * 367 * This enum element MUST always be specified as == 1, regardless of other 368 * changes in the API. The PF must always respond to this message without 369 * error regardless of version mismatch. 370 */ 371 #define VIRTCHNL_VERSION_MAJOR 1 372 #define VIRTCHNL_VERSION_MINOR 1 373 #define VIRTCHNL_VERSION_MAJOR_2 2 374 #define VIRTCHNL_VERSION_MINOR_0 0 375 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 376 377 struct virtchnl_version_info { 378 u32 major; 379 u32 minor; 380 }; 381 382 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); 383 384 #define VF_IS_V10(_ver) (((_ver)->major == 1) && ((_ver)->minor == 0)) 385 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) 386 #define VF_IS_V20(_ver) (((_ver)->major == 2) && ((_ver)->minor == 0)) 387 388 /* VIRTCHNL_OP_RESET_VF 389 * VF sends this request to PF with no parameters 390 * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register 391 * until reset completion is indicated. The admin queue must be reinitialized 392 * after this operation. 393 * 394 * When reset is complete, PF must ensure that all queues in all VSIs associated 395 * with the VF are stopped, all queue configurations in the HMC are set to 0, 396 * and all MAC and VLAN filters (except the default MAC address) on all VSIs 397 * are cleared. 398 */ 399 400 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV 401 * vsi_type should always be 6 for backward compatibility. Add other fields 402 * as needed. 403 */ 404 enum virtchnl_vsi_type { 405 VIRTCHNL_VSI_TYPE_INVALID = 0, 406 VIRTCHNL_VSI_SRIOV = 6, 407 }; 408 409 /* VIRTCHNL_OP_GET_VF_RESOURCES 410 * Version 1.0 VF sends this request to PF with no parameters 411 * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities 412 * PF responds with an indirect message containing 413 * virtchnl_vf_resource and one or more 414 * virtchnl_vsi_resource structures. 415 */ 416 417 struct virtchnl_vsi_resource { 418 u16 vsi_id; 419 u16 num_queue_pairs; 420 421 /* see enum virtchnl_vsi_type */ 422 s32 vsi_type; 423 u16 qset_handle; 424 u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; 425 }; 426 427 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); 428 429 /* VF capability flags 430 * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including 431 * TX/RX Checksum offloading and TSO for non-tunnelled packets. 432 */ 433 #define VIRTCHNL_VF_OFFLOAD_L2 BIT(0) 434 #define VIRTCHNL_VF_OFFLOAD_IWARP BIT(1) 435 #define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_IWARP 436 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3) 437 #define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4) 438 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5) 439 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6) 440 /* used to negotiate communicating link speeds in Mbps */ 441 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7) 442 /* BIT(8) is reserved */ 443 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS BIT(9) 444 #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10) 445 #define VIRTCHNL_VF_OFFLOAD_FSUB_PF BIT(14) 446 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15) 447 #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16) 448 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17) 449 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18) 450 #define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19) 451 #define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20) 452 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21) 453 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22) 454 #define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23) 455 #define VIRTCHNL_VF_OFFLOAD_ADQ_V2 BIT(24) 456 #define VIRTCHNL_VF_OFFLOAD_USO BIT(25) 457 /* BIT(26) is reserved */ 458 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27) 459 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28) 460 #define VIRTCHNL_VF_OFFLOAD_QOS BIT(29) 461 /* BIT(30) is reserved */ 462 /* BIT(31) is reserved */ 463 464 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ 465 VIRTCHNL_VF_OFFLOAD_VLAN | \ 466 VIRTCHNL_VF_OFFLOAD_RSS_PF) 467 468 struct virtchnl_vf_resource { 469 u16 num_vsis; 470 u16 num_queue_pairs; 471 u16 max_vectors; 472 u16 max_mtu; 473 474 u32 vf_cap_flags; 475 u32 rss_key_size; 476 u32 rss_lut_size; 477 478 struct virtchnl_vsi_resource vsi_res[1]; 479 }; 480 481 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); 482 483 /* VIRTCHNL_OP_CONFIG_TX_QUEUE 484 * VF sends this message to set up parameters for one TX queue. 485 * External data buffer contains one instance of virtchnl_txq_info. 486 * PF configures requested queue and returns a status code. 487 */ 488 489 /* Tx queue config info */ 490 struct virtchnl_txq_info { 491 u16 vsi_id; 492 u16 queue_id; 493 u16 ring_len; /* number of descriptors, multiple of 8 */ 494 u16 headwb_enabled; /* deprecated with AVF 1.0 */ 495 u64 dma_ring_addr; 496 u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ 497 }; 498 499 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); 500 501 /* RX descriptor IDs (range from 0 to 63) */ 502 enum virtchnl_rx_desc_ids { 503 VIRTCHNL_RXDID_0_16B_BASE = 0, 504 VIRTCHNL_RXDID_1_32B_BASE = 1, 505 VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2, 506 VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3, 507 VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4, 508 VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5, 509 VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6, 510 VIRTCHNL_RXDID_7_HW_RSVD = 7, 511 /* 8 through 15 are reserved */ 512 VIRTCHNL_RXDID_16_COMMS_GENERIC = 16, 513 VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17, 514 VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18, 515 VIRTCHNL_RXDID_19_COMMS_AUX_IPV6 = 19, 516 VIRTCHNL_RXDID_20_COMMS_AUX_FLOW = 20, 517 VIRTCHNL_RXDID_21_COMMS_AUX_TCP = 21, 518 /* 22 through 63 are reserved */ 519 }; 520 521 /* RX descriptor ID bitmasks */ 522 enum virtchnl_rx_desc_id_bitmasks { 523 VIRTCHNL_RXDID_0_16B_BASE_M = BIT(VIRTCHNL_RXDID_0_16B_BASE), 524 VIRTCHNL_RXDID_1_32B_BASE_M = BIT(VIRTCHNL_RXDID_1_32B_BASE), 525 VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC), 526 VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = BIT(VIRTCHNL_RXDID_3_FLEX_SQ_SW), 527 VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = BIT(VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB), 528 VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M = BIT(VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL), 529 VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M = BIT(VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2), 530 VIRTCHNL_RXDID_7_HW_RSVD_M = BIT(VIRTCHNL_RXDID_7_HW_RSVD), 531 /* 9 through 15 are reserved */ 532 VIRTCHNL_RXDID_16_COMMS_GENERIC_M = BIT(VIRTCHNL_RXDID_16_COMMS_GENERIC), 533 VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M = BIT(VIRTCHNL_RXDID_17_COMMS_AUX_VLAN), 534 VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M = BIT(VIRTCHNL_RXDID_18_COMMS_AUX_IPV4), 535 VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M = BIT(VIRTCHNL_RXDID_19_COMMS_AUX_IPV6), 536 VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M = BIT(VIRTCHNL_RXDID_20_COMMS_AUX_FLOW), 537 VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M = BIT(VIRTCHNL_RXDID_21_COMMS_AUX_TCP), 538 /* 22 through 63 are reserved */ 539 }; 540 541 /* VIRTCHNL_OP_CONFIG_RX_QUEUE 542 * VF sends this message to set up parameters for one RX queue. 543 * External data buffer contains one instance of virtchnl_rxq_info. 544 * PF configures requested queue and returns a status code. The 545 * crc_disable flag disables CRC stripping on the VF. Setting 546 * the crc_disable flag to 1 will disable CRC stripping for each 547 * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC 548 * offload must have been set prior to sending this info or the PF 549 * will ignore the request. This flag should be set the same for 550 * all of the queues for a VF. 551 */ 552 553 /* Rx queue config info */ 554 struct virtchnl_rxq_info { 555 u16 vsi_id; 556 u16 queue_id; 557 u32 ring_len; /* number of descriptors, multiple of 32 */ 558 u16 hdr_size; 559 u16 splithdr_enabled; /* deprecated with AVF 1.0 */ 560 u32 databuffer_size; 561 u32 max_pkt_size; 562 u8 crc_disable; 563 u8 pad1[3]; 564 u64 dma_ring_addr; 565 566 /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */ 567 s32 rx_split_pos; 568 u32 pad2; 569 }; 570 571 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); 572 573 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES 574 * VF sends this message to set parameters for active TX and RX queues 575 * associated with the specified VSI. 576 * PF configures queues and returns status. 577 * If the number of queues specified is greater than the number of queues 578 * associated with the VSI, an error is returned and no queues are configured. 579 * NOTE: The VF is not required to configure all queues in a single request. 580 * It may send multiple messages. PF drivers must correctly handle all VF 581 * requests. 582 */ 583 struct virtchnl_queue_pair_info { 584 /* NOTE: vsi_id and queue_id should be identical for both queues. */ 585 struct virtchnl_txq_info txq; 586 struct virtchnl_rxq_info rxq; 587 }; 588 589 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); 590 591 struct virtchnl_vsi_queue_config_info { 592 u16 vsi_id; 593 u16 num_queue_pairs; 594 u32 pad; 595 struct virtchnl_queue_pair_info qpair[1]; 596 }; 597 598 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); 599 600 /* VIRTCHNL_OP_REQUEST_QUEUES 601 * VF sends this message to request the PF to allocate additional queues to 602 * this VF. Each VF gets a guaranteed number of queues on init but asking for 603 * additional queues must be negotiated. This is a best effort request as it 604 * is possible the PF does not have enough queues left to support the request. 605 * If the PF cannot support the number requested it will respond with the 606 * maximum number it is able to support. If the request is successful, PF will 607 * then reset the VF to institute required changes. 608 */ 609 610 /* VF resource request */ 611 struct virtchnl_vf_res_request { 612 u16 num_queue_pairs; 613 }; 614 615 /* VIRTCHNL_OP_CONFIG_IRQ_MAP 616 * VF uses this message to map vectors to queues. 617 * The rxq_map and txq_map fields are bitmaps used to indicate which queues 618 * are to be associated with the specified vector. 619 * The "other" causes are always mapped to vector 0. The VF may not request 620 * that vector 0 be used for traffic. 621 * PF configures interrupt mapping and returns status. 622 * NOTE: due to hardware requirements, all active queues (both TX and RX) 623 * should be mapped to interrupts, even if the driver intends to operate 624 * only in polling mode. In this case the interrupt may be disabled, but 625 * the ITR timer will still run to trigger writebacks. 626 */ 627 struct virtchnl_vector_map { 628 u16 vsi_id; 629 u16 vector_id; 630 u16 rxq_map; 631 u16 txq_map; 632 u16 rxitr_idx; 633 u16 txitr_idx; 634 }; 635 636 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); 637 638 struct virtchnl_irq_map_info { 639 u16 num_vectors; 640 struct virtchnl_vector_map vecmap[1]; 641 }; 642 643 VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); 644 645 /* VIRTCHNL_OP_ENABLE_QUEUES 646 * VIRTCHNL_OP_DISABLE_QUEUES 647 * VF sends these message to enable or disable TX/RX queue pairs. 648 * The queues fields are bitmaps indicating which queues to act upon. 649 * (Currently, we only support 16 queues per VF, but we make the field 650 * u32 to allow for expansion.) 651 * PF performs requested action and returns status. 652 * NOTE: The VF is not required to enable/disable all queues in a single 653 * request. It may send multiple messages. 654 * PF drivers must correctly handle all VF requests. 655 */ 656 struct virtchnl_queue_select { 657 u16 vsi_id; 658 u16 pad; 659 u32 rx_queues; 660 u32 tx_queues; 661 }; 662 663 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); 664 665 /* VIRTCHNL_OP_GET_MAX_RSS_QREGION 666 * 667 * if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES 668 * then this op must be supported. 669 * 670 * VF sends this message in order to query the max RSS queue region 671 * size supported by PF, when VIRTCHNL_VF_LARGE_NUM_QPAIRS is enabled. 672 * This information should be used when configuring the RSS LUT and/or 673 * configuring queue region based filters. 674 * 675 * The maximum RSS queue region is 2^qregion_width. So, a qregion_width 676 * of 6 would inform the VF that the PF supports a maximum RSS queue region 677 * of 64. 678 * 679 * A queue region represents a range of queues that can be used to configure 680 * a RSS LUT. For example, if a VF is given 64 queues, but only a max queue 681 * region size of 16 (i.e. 2^qregion_width = 16) then it will only be able 682 * to configure the RSS LUT with queue indices from 0 to 15. However, other 683 * filters can be used to direct packets to queues >15 via specifying a queue 684 * base/offset and queue region width. 685 */ 686 struct virtchnl_max_rss_qregion { 687 u16 vport_id; 688 u16 qregion_width; 689 u8 pad[4]; 690 }; 691 692 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_max_rss_qregion); 693 694 /* VIRTCHNL_OP_ADD_ETH_ADDR 695 * VF sends this message in order to add one or more unicast or multicast 696 * address filters for the specified VSI. 697 * PF adds the filters and returns status. 698 */ 699 700 /* VIRTCHNL_OP_DEL_ETH_ADDR 701 * VF sends this message in order to remove one or more unicast or multicast 702 * filters for the specified VSI. 703 * PF removes the filters and returns status. 704 */ 705 706 /* VIRTCHNL_ETHER_ADDR_LEGACY 707 * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad 708 * bytes. Moving forward all VF drivers should not set type to 709 * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy 710 * behavior. The control plane function (i.e. PF) can use a best effort method 711 * of tracking the primary/device unicast in this case, but there is no 712 * guarantee and functionality depends on the implementation of the PF. 713 */ 714 715 /* VIRTCHNL_ETHER_ADDR_PRIMARY 716 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the 717 * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and 718 * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane 719 * function (i.e. PF) to accurately track and use this MAC address for 720 * displaying on the host and for VM/function reset. 721 */ 722 723 /* VIRTCHNL_ETHER_ADDR_EXTRA 724 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra 725 * unicast and/or multicast filters that are being added/deleted via 726 * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively. 727 */ 728 struct virtchnl_ether_addr { 729 u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; 730 u8 type; 731 #define VIRTCHNL_ETHER_ADDR_LEGACY 0 732 #define VIRTCHNL_ETHER_ADDR_PRIMARY 1 733 #define VIRTCHNL_ETHER_ADDR_EXTRA 2 734 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */ 735 u8 pad; 736 }; 737 738 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); 739 740 struct virtchnl_ether_addr_list { 741 u16 vsi_id; 742 u16 num_elements; 743 struct virtchnl_ether_addr list[1]; 744 }; 745 746 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); 747 748 /* VIRTCHNL_OP_ADD_VLAN 749 * VF sends this message to add one or more VLAN tag filters for receives. 750 * PF adds the filters and returns status. 751 * If a port VLAN is configured by the PF, this operation will return an 752 * error to the VF. 753 */ 754 755 /* VIRTCHNL_OP_DEL_VLAN 756 * VF sends this message to remove one or more VLAN tag filters for receives. 757 * PF removes the filters and returns status. 758 * If a port VLAN is configured by the PF, this operation will return an 759 * error to the VF. 760 */ 761 762 struct virtchnl_vlan_filter_list { 763 u16 vsi_id; 764 u16 num_elements; 765 u16 vlan_id[1]; 766 }; 767 768 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); 769 770 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related 771 * structures and opcodes. 772 * 773 * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver 774 * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED. 775 * 776 * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype. 777 * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype. 778 * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype. 779 * 780 * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported 781 * by the PF concurrently. For example, if the PF can support 782 * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it 783 * would OR the following bits: 784 * 785 * VIRTHCNL_VLAN_ETHERTYPE_8100 | 786 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 787 * VIRTCHNL_VLAN_ETHERTYPE_AND; 788 * 789 * The VF would interpret this as VLAN filtering can be supported on both 0x8100 790 * and 0x88A8 VLAN ethertypes. 791 * 792 * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported 793 * by the PF concurrently. For example if the PF can support 794 * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping 795 * offload it would OR the following bits: 796 * 797 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 798 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 799 * VIRTCHNL_VLAN_ETHERTYPE_XOR; 800 * 801 * The VF would interpret this as VLAN stripping can be supported on either 802 * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via 803 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override 804 * the previously set value. 805 * 806 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or 807 * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors. 808 * 809 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware 810 * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor. 811 * 812 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware 813 * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor. 814 * 815 * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for 816 * VLAN filtering if the underlying PF supports it. 817 * 818 * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a 819 * certain VLAN capability can be toggled. For example if the underlying PF/CP 820 * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should 821 * set this bit along with the supported ethertypes. 822 */ 823 enum virtchnl_vlan_support { 824 VIRTCHNL_VLAN_UNSUPPORTED = 0, 825 VIRTCHNL_VLAN_ETHERTYPE_8100 = 0x00000001, 826 VIRTCHNL_VLAN_ETHERTYPE_88A8 = 0x00000002, 827 VIRTCHNL_VLAN_ETHERTYPE_9100 = 0x00000004, 828 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = 0x00000100, 829 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = 0x00000200, 830 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = 0x00000400, 831 VIRTCHNL_VLAN_PRIO = 0x01000000, 832 VIRTCHNL_VLAN_FILTER_MASK = 0x10000000, 833 VIRTCHNL_VLAN_ETHERTYPE_AND = 0x20000000, 834 VIRTCHNL_VLAN_ETHERTYPE_XOR = 0x40000000, 835 VIRTCHNL_VLAN_TOGGLE = 0x80000000 836 }; 837 838 /* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS 839 * for filtering, insertion, and stripping capabilities. 840 * 841 * If only outer capabilities are supported (for filtering, insertion, and/or 842 * stripping) then this refers to the outer most or single VLAN from the VF's 843 * perspective. 844 * 845 * If only inner capabilities are supported (for filtering, insertion, and/or 846 * stripping) then this refers to the outer most or single VLAN from the VF's 847 * perspective. Functionally this is the same as if only outer capabilities are 848 * supported. The VF driver is just forced to use the inner fields when 849 * adding/deleting filters and enabling/disabling offloads (if supported). 850 * 851 * If both outer and inner capabilities are supported (for filtering, insertion, 852 * and/or stripping) then outer refers to the outer most or single VLAN and 853 * inner refers to the second VLAN, if it exists, in the packet. 854 * 855 * There is no support for tunneled VLAN offloads, so outer or inner are never 856 * referring to a tunneled packet from the VF's perspective. 857 */ 858 struct virtchnl_vlan_supported_caps { 859 u32 outer; 860 u32 inner; 861 }; 862 863 /* The PF populates these fields based on the supported VLAN filtering. If a 864 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will 865 * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using 866 * the unsupported fields. 867 * 868 * Also, a VF is only allowed to toggle its VLAN filtering setting if the 869 * VIRTCHNL_VLAN_TOGGLE bit is set. 870 * 871 * The ethertype(s) specified in the ethertype_init field are the ethertypes 872 * enabled for VLAN filtering. VLAN filtering in this case refers to the outer 873 * most VLAN from the VF's perspective. If both inner and outer filtering are 874 * allowed then ethertype_init only refers to the outer most VLAN as only 875 * VLAN ethertype supported for inner VLAN filtering is 876 * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled 877 * when both inner and outer filtering are allowed. 878 * 879 * The max_filters field tells the VF how many VLAN filters it's allowed to have 880 * at any one time. If it exceeds this amount and tries to add another filter, 881 * then the request will be rejected by the PF. To prevent failures, the VF 882 * should keep track of how many VLAN filters it has added and not attempt to 883 * add more than max_filters. 884 */ 885 struct virtchnl_vlan_filtering_caps { 886 struct virtchnl_vlan_supported_caps filtering_support; 887 u32 ethertype_init; 888 u16 max_filters; 889 u8 pad[2]; 890 }; 891 892 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps); 893 894 /* This enum is used for the virtchnl_vlan_offload_caps structure to specify 895 * if the PF supports a different ethertype for stripping and insertion. 896 * 897 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified 898 * for stripping affect the ethertype(s) specified for insertion and visa versa 899 * as well. If the VF tries to configure VLAN stripping via 900 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then 901 * that will be the ethertype for both stripping and insertion. 902 * 903 * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for 904 * stripping do not affect the ethertype(s) specified for insertion and visa 905 * versa. 906 */ 907 enum virtchnl_vlan_ethertype_match { 908 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0, 909 VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1, 910 }; 911 912 /* The PF populates these fields based on the supported VLAN offloads. If a 913 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will 914 * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or 915 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields. 916 * 917 * Also, a VF is only allowed to toggle its VLAN offload setting if the 918 * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set. 919 * 920 * The VF driver needs to be aware of how the tags are stripped by hardware and 921 * inserted by the VF driver based on the level of offload support. The PF will 922 * populate these fields based on where the VLAN tags are expected to be 923 * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to 924 * interpret these fields. See the definition of the 925 * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support 926 * enumeration. 927 */ 928 struct virtchnl_vlan_offload_caps { 929 struct virtchnl_vlan_supported_caps stripping_support; 930 struct virtchnl_vlan_supported_caps insertion_support; 931 u32 ethertype_init; 932 u8 ethertype_match; 933 u8 pad[3]; 934 }; 935 936 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps); 937 938 /* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS 939 * VF sends this message to determine its VLAN capabilities. 940 * 941 * PF will mark which capabilities it supports based on hardware support and 942 * current configuration. For example, if a port VLAN is configured the PF will 943 * not allow outer VLAN filtering, stripping, or insertion to be configured so 944 * it will block these features from the VF. 945 * 946 * The VF will need to cross reference its capabilities with the PFs 947 * capabilities in the response message from the PF to determine the VLAN 948 * support. 949 */ 950 struct virtchnl_vlan_caps { 951 struct virtchnl_vlan_filtering_caps filtering; 952 struct virtchnl_vlan_offload_caps offloads; 953 }; 954 955 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps); 956 957 struct virtchnl_vlan { 958 u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */ 959 u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in 960 * filtering caps 961 */ 962 u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in 963 * filtering caps. Note that tpid here does not refer to 964 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the 965 * actual 2-byte VLAN TPID 966 */ 967 u8 pad[2]; 968 }; 969 970 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan); 971 972 struct virtchnl_vlan_filter { 973 struct virtchnl_vlan inner; 974 struct virtchnl_vlan outer; 975 u8 pad[16]; 976 }; 977 978 VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter); 979 980 /* VIRTCHNL_OP_ADD_VLAN_V2 981 * VIRTCHNL_OP_DEL_VLAN_V2 982 * 983 * VF sends these messages to add/del one or more VLAN tag filters for Rx 984 * traffic. 985 * 986 * The PF attempts to add the filters and returns status. 987 * 988 * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the 989 * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS. 990 */ 991 struct virtchnl_vlan_filter_list_v2 { 992 u16 vport_id; 993 u16 num_elements; 994 u8 pad[4]; 995 struct virtchnl_vlan_filter filters[1]; 996 }; 997 998 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2); 999 1000 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 1001 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 1002 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 1003 * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 1004 * 1005 * VF sends this message to enable or disable VLAN stripping or insertion. It 1006 * also needs to specify an ethertype. The VF knows which VLAN ethertypes are 1007 * allowed and whether or not it's allowed to enable/disable the specific 1008 * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to 1009 * parse the virtchnl_vlan_caps.offloads fields to determine which offload 1010 * messages are allowed. 1011 * 1012 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the 1013 * following manner the VF will be allowed to enable and/or disable 0x8100 inner 1014 * VLAN insertion and/or stripping via the opcodes listed above. Inner in this 1015 * case means the outer most or single VLAN from the VF's perspective. This is 1016 * because no outer offloads are supported. See the comments above the 1017 * virtchnl_vlan_supported_caps structure for more details. 1018 * 1019 * virtchnl_vlan_caps.offloads.stripping_support.inner = 1020 * VIRTCHNL_VLAN_TOGGLE | 1021 * VIRTCHNL_VLAN_ETHERTYPE_8100; 1022 * 1023 * virtchnl_vlan_caps.offloads.insertion_support.inner = 1024 * VIRTCHNL_VLAN_TOGGLE | 1025 * VIRTCHNL_VLAN_ETHERTYPE_8100; 1026 * 1027 * In order to enable inner (again note that in this case inner is the outer 1028 * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100 1029 * VLANs, the VF would populate the virtchnl_vlan_setting structure in the 1030 * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message. 1031 * 1032 * virtchnl_vlan_setting.inner_ethertype_setting = 1033 * VIRTCHNL_VLAN_ETHERTYPE_8100; 1034 * 1035 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on 1036 * initialization. 1037 * 1038 * The reason that VLAN TPID(s) are not being used for the 1039 * outer_ethertype_setting and inner_ethertype_setting fields is because it's 1040 * possible a device could support VLAN insertion and/or stripping offload on 1041 * multiple ethertypes concurrently, so this method allows a VF to request 1042 * multiple ethertypes in one message using the virtchnl_vlan_support 1043 * enumeration. 1044 * 1045 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the 1046 * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer 1047 * VLAN insertion and stripping simultaneously. The 1048 * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be 1049 * populated based on what the PF can support. 1050 * 1051 * virtchnl_vlan_caps.offloads.stripping_support.outer = 1052 * VIRTCHNL_VLAN_TOGGLE | 1053 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 1054 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 1055 * VIRTCHNL_VLAN_ETHERTYPE_AND; 1056 * 1057 * virtchnl_vlan_caps.offloads.insertion_support.outer = 1058 * VIRTCHNL_VLAN_TOGGLE | 1059 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 1060 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 1061 * VIRTCHNL_VLAN_ETHERTYPE_AND; 1062 * 1063 * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF 1064 * would populate the virthcnl_vlan_offload_structure in the following manner 1065 * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message. 1066 * 1067 * virtchnl_vlan_setting.outer_ethertype_setting = 1068 * VIRTHCNL_VLAN_ETHERTYPE_8100 | 1069 * VIRTHCNL_VLAN_ETHERTYPE_88A8; 1070 * 1071 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on 1072 * initialization. 1073 * 1074 * There is also the case where a PF and the underlying hardware can support 1075 * VLAN offloads on multiple ethertypes, but not concurrently. For example, if 1076 * the PF populates the virtchnl_vlan_caps.offloads in the following manner the 1077 * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN 1078 * offloads. The ethertypes must match for stripping and insertion. 1079 * 1080 * virtchnl_vlan_caps.offloads.stripping_support.outer = 1081 * VIRTCHNL_VLAN_TOGGLE | 1082 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 1083 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 1084 * VIRTCHNL_VLAN_ETHERTYPE_XOR; 1085 * 1086 * virtchnl_vlan_caps.offloads.insertion_support.outer = 1087 * VIRTCHNL_VLAN_TOGGLE | 1088 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 1089 * VIRTCHNL_VLAN_ETHERTYPE_88A8 | 1090 * VIRTCHNL_VLAN_ETHERTYPE_XOR; 1091 * 1092 * virtchnl_vlan_caps.offloads.ethertype_match = 1093 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 1094 * 1095 * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would 1096 * populate the virtchnl_vlan_setting structure in the following manner and send 1097 * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the 1098 * ethertype for VLAN insertion if it's enabled. So, for completeness, a 1099 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent. 1100 * 1101 * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8; 1102 * 1103 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on 1104 * initialization. 1105 * 1106 * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 1107 * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 1108 * 1109 * VF sends this message to enable or disable VLAN filtering. It also needs to 1110 * specify an ethertype. The VF knows which VLAN ethertypes are allowed and 1111 * whether or not it's allowed to enable/disable filtering via the 1112 * VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to 1113 * parse the virtchnl_vlan_caps.filtering fields to determine which, if any, 1114 * filtering messages are allowed. 1115 * 1116 * For example, if the PF populates the virtchnl_vlan_caps.filtering in the 1117 * following manner the VF will be allowed to enable/disable 0x8100 and 0x88a8 1118 * outer VLAN filtering together. Note, that the VIRTCHNL_VLAN_ETHERTYPE_AND 1119 * means that all filtering ethertypes will to be enabled and disabled together 1120 * regardless of the request from the VF. This means that the underlying 1121 * hardware only supports VLAN filtering for all VLAN the specified ethertypes 1122 * or none of them. 1123 * 1124 * virtchnl_vlan_caps.filtering.filtering_support.outer = 1125 * VIRTCHNL_VLAN_TOGGLE | 1126 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 1127 * VIRTHCNL_VLAN_ETHERTYPE_88A8 | 1128 * VIRTCHNL_VLAN_ETHERTYPE_9100 | 1129 * VIRTCHNL_VLAN_ETHERTYPE_AND; 1130 * 1131 * In order to enable outer VLAN filtering for 0x88a8 and 0x8100 VLANs (0x9100 1132 * VLANs aren't supported by the VF driver), the VF would populate the 1133 * virtchnl_vlan_setting structure in the following manner and send the 1134 * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2. The same message format would be used 1135 * to disable outer VLAN filtering for 0x88a8 and 0x8100 VLANs, but the 1136 * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 opcode is used. 1137 * 1138 * virtchnl_vlan_setting.outer_ethertype_setting = 1139 * VIRTCHNL_VLAN_ETHERTYPE_8100 | 1140 * VIRTCHNL_VLAN_ETHERTYPE_88A8; 1141 * 1142 */ 1143 struct virtchnl_vlan_setting { 1144 u32 outer_ethertype_setting; 1145 u32 inner_ethertype_setting; 1146 u16 vport_id; 1147 u8 pad[6]; 1148 }; 1149 1150 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting); 1151 1152 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE 1153 * VF sends VSI id and flags. 1154 * PF returns status code in retval. 1155 * Note: we assume that broadcast accept mode is always enabled. 1156 */ 1157 struct virtchnl_promisc_info { 1158 u16 vsi_id; 1159 u16 flags; 1160 }; 1161 1162 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); 1163 1164 #define FLAG_VF_UNICAST_PROMISC 0x00000001 1165 #define FLAG_VF_MULTICAST_PROMISC 0x00000002 1166 1167 /* VIRTCHNL_OP_GET_STATS 1168 * VF sends this message to request stats for the selected VSI. VF uses 1169 * the virtchnl_queue_select struct to specify the VSI. The queue_id 1170 * field is ignored by the PF. 1171 * 1172 * PF replies with struct virtchnl_eth_stats in an external buffer. 1173 */ 1174 1175 struct virtchnl_eth_stats { 1176 u64 rx_bytes; /* received bytes */ 1177 u64 rx_unicast; /* received unicast pkts */ 1178 u64 rx_multicast; /* received multicast pkts */ 1179 u64 rx_broadcast; /* received broadcast pkts */ 1180 u64 rx_discards; 1181 u64 rx_unknown_protocol; 1182 u64 tx_bytes; /* transmitted bytes */ 1183 u64 tx_unicast; /* transmitted unicast pkts */ 1184 u64 tx_multicast; /* transmitted multicast pkts */ 1185 u64 tx_broadcast; /* transmitted broadcast pkts */ 1186 u64 tx_discards; 1187 u64 tx_errors; 1188 }; 1189 1190 /* VIRTCHNL_OP_CONFIG_RSS_KEY 1191 * VIRTCHNL_OP_CONFIG_RSS_LUT 1192 * VF sends these messages to configure RSS. Only supported if both PF 1193 * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during 1194 * configuration negotiation. If this is the case, then the RSS fields in 1195 * the VF resource struct are valid. 1196 * Both the key and LUT are initialized to 0 by the PF, meaning that 1197 * RSS is effectively disabled until set up by the VF. 1198 */ 1199 struct virtchnl_rss_key { 1200 u16 vsi_id; 1201 u16 key_len; 1202 u8 key[1]; /* RSS hash key, packed bytes */ 1203 }; 1204 1205 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); 1206 1207 struct virtchnl_rss_lut { 1208 u16 vsi_id; 1209 u16 lut_entries; 1210 u8 lut[1]; /* RSS lookup table */ 1211 }; 1212 1213 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); 1214 1215 /* enum virthcnl_hash_filter 1216 * 1217 * Bits defining the hash filters in the hena field of the virtchnl_rss_hena 1218 * structure. Each bit indicates a specific hash filter for RSS. 1219 * 1220 * Note that not all bits are supported on all hardware. The VF should use 1221 * VIRTCHNL_OP_GET_RSS_HENA_CAPS to determine which bits the PF is capable of 1222 * before using VIRTCHNL_OP_SET_RSS_HENA to enable specific filters. 1223 */ 1224 enum virtchnl_hash_filter { 1225 /* Bits 0 through 28 are reserved for future use */ 1226 /* Bit 29, 30, and 32 are not supported on XL710 a X710 */ 1227 VIRTCHNL_HASH_FILTER_UNICAST_IPV4_UDP = 29, 1228 VIRTCHNL_HASH_FILTER_MULTICAST_IPV4_UDP = 30, 1229 VIRTCHNL_HASH_FILTER_IPV4_UDP = 31, 1230 VIRTCHNL_HASH_FILTER_IPV4_TCP_SYN_NO_ACK = 32, 1231 VIRTCHNL_HASH_FILTER_IPV4_TCP = 33, 1232 VIRTCHNL_HASH_FILTER_IPV4_SCTP = 34, 1233 VIRTCHNL_HASH_FILTER_IPV4_OTHER = 35, 1234 VIRTCHNL_HASH_FILTER_FRAG_IPV4 = 36, 1235 /* Bits 37 and 38 are reserved for future use */ 1236 /* Bit 39, 40, and 42 are not supported on XL710 a X710 */ 1237 VIRTCHNL_HASH_FILTER_UNICAST_IPV6_UDP = 39, 1238 VIRTCHNL_HASH_FILTER_MULTICAST_IPV6_UDP = 40, 1239 VIRTCHNL_HASH_FILTER_IPV6_UDP = 41, 1240 VIRTCHNL_HASH_FILTER_IPV6_TCP_SYN_NO_ACK = 42, 1241 VIRTCHNL_HASH_FILTER_IPV6_TCP = 43, 1242 VIRTCHNL_HASH_FILTER_IPV6_SCTP = 44, 1243 VIRTCHNL_HASH_FILTER_IPV6_OTHER = 45, 1244 VIRTCHNL_HASH_FILTER_FRAG_IPV6 = 46, 1245 /* Bit 37 is reserved for future use */ 1246 VIRTCHNL_HASH_FILTER_FCOE_OX = 48, 1247 VIRTCHNL_HASH_FILTER_FCOE_RX = 49, 1248 VIRTCHNL_HASH_FILTER_FCOE_OTHER = 50, 1249 /* Bits 51 through 62 are reserved for future use */ 1250 VIRTCHNL_HASH_FILTER_L2_PAYLOAD = 63, 1251 }; 1252 1253 #define VIRTCHNL_HASH_FILTER_INVALID (0) 1254 1255 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS 1256 * VIRTCHNL_OP_SET_RSS_HENA 1257 * VF sends these messages to get and set the hash filter enable bits for RSS. 1258 * By default, the PF sets these to all possible traffic types that the 1259 * hardware supports. The VF can query this value if it wants to change the 1260 * traffic types that are hashed by the hardware. 1261 */ 1262 struct virtchnl_rss_hena { 1263 /* see enum virtchnl_hash_filter */ 1264 u64 hena; 1265 }; 1266 1267 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); 1268 1269 /* Type of RSS algorithm */ 1270 enum virtchnl_rss_algorithm { 1271 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0, 1272 VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1, 1273 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2, 1274 VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3, 1275 }; 1276 1277 /* This is used by PF driver to enforce how many channels can be supported. 1278 * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise 1279 * PF driver will allow only max 4 channels 1280 */ 1281 #define VIRTCHNL_MAX_ADQ_CHANNELS 4 1282 #define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16 1283 1284 /* VIRTCHNL_OP_ENABLE_CHANNELS 1285 * VIRTCHNL_OP_DISABLE_CHANNELS 1286 * VF sends these messages to enable or disable channels based on 1287 * the user specified queue count and queue offset for each traffic class. 1288 * This struct encompasses all the information that the PF needs from 1289 * VF to create a channel. 1290 */ 1291 struct virtchnl_channel_info { 1292 u16 count; /* number of queues in a channel */ 1293 u16 offset; /* queues in a channel start from 'offset' */ 1294 u32 pad; 1295 u64 max_tx_rate; 1296 }; 1297 1298 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); 1299 1300 struct virtchnl_tc_info { 1301 u32 num_tc; 1302 u32 pad; 1303 struct virtchnl_channel_info list[1]; 1304 }; 1305 1306 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info); 1307 1308 /* VIRTCHNL_ADD_CLOUD_FILTER 1309 * VIRTCHNL_DEL_CLOUD_FILTER 1310 * VF sends these messages to add or delete a cloud filter based on the 1311 * user specified match and action filters. These structures encompass 1312 * all the information that the PF needs from the VF to add/delete a 1313 * cloud filter. 1314 */ 1315 1316 struct virtchnl_l4_spec { 1317 u8 src_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; 1318 u8 dst_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; 1319 /* vlan_prio is part of this 16 bit field even from OS perspective 1320 * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio 1321 * in future, when decided to offload vlan_prio, pass that information 1322 * as part of the "vlan_id" field, Bit14..12 1323 */ 1324 __be16 vlan_id; 1325 __be16 pad; /* reserved for future use */ 1326 __be32 src_ip[4]; 1327 __be32 dst_ip[4]; 1328 __be16 src_port; 1329 __be16 dst_port; 1330 }; 1331 1332 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec); 1333 1334 union virtchnl_flow_spec { 1335 struct virtchnl_l4_spec tcp_spec; 1336 u8 buffer[128]; /* reserved for future use */ 1337 }; 1338 1339 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec); 1340 1341 enum virtchnl_action { 1342 /* action types */ 1343 VIRTCHNL_ACTION_DROP = 0, 1344 VIRTCHNL_ACTION_TC_REDIRECT, 1345 VIRTCHNL_ACTION_PASSTHRU, 1346 VIRTCHNL_ACTION_QUEUE, 1347 VIRTCHNL_ACTION_Q_REGION, 1348 VIRTCHNL_ACTION_MARK, 1349 VIRTCHNL_ACTION_COUNT, 1350 }; 1351 1352 enum virtchnl_flow_type { 1353 /* flow types */ 1354 VIRTCHNL_TCP_V4_FLOW = 0, 1355 VIRTCHNL_TCP_V6_FLOW, 1356 VIRTCHNL_UDP_V4_FLOW, 1357 VIRTCHNL_UDP_V6_FLOW, 1358 }; 1359 1360 struct virtchnl_filter { 1361 union virtchnl_flow_spec data; 1362 union virtchnl_flow_spec mask; 1363 1364 /* see enum virtchnl_flow_type */ 1365 s32 flow_type; 1366 1367 /* see enum virtchnl_action */ 1368 s32 action; 1369 u32 action_meta; 1370 u8 field_flags; 1371 }; 1372 1373 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); 1374 1375 struct virtchnl_shaper_bw { 1376 /* Unit is Kbps */ 1377 u32 committed; 1378 u32 peak; 1379 }; 1380 1381 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw); 1382 1383 /* VIRTCHNL_OP_EVENT 1384 * PF sends this message to inform the VF driver of events that may affect it. 1385 * No direct response is expected from the VF, though it may generate other 1386 * messages in response to this one. 1387 */ 1388 enum virtchnl_event_codes { 1389 VIRTCHNL_EVENT_UNKNOWN = 0, 1390 VIRTCHNL_EVENT_LINK_CHANGE, 1391 VIRTCHNL_EVENT_RESET_IMPENDING, 1392 VIRTCHNL_EVENT_PF_DRIVER_CLOSE, 1393 }; 1394 1395 #define PF_EVENT_SEVERITY_INFO 0 1396 #define PF_EVENT_SEVERITY_ATTENTION 1 1397 #define PF_EVENT_SEVERITY_ACTION_REQUIRED 2 1398 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 1399 1400 struct virtchnl_pf_event { 1401 /* see enum virtchnl_event_codes */ 1402 s32 event; 1403 union { 1404 /* If the PF driver does not support the new speed reporting 1405 * capabilities then use link_event else use link_event_adv to 1406 * get the speed and link information. The ability to understand 1407 * new speeds is indicated by setting the capability flag 1408 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter 1409 * in virtchnl_vf_resource struct and can be used to determine 1410 * which link event struct to use below. 1411 */ 1412 struct { 1413 enum virtchnl_link_speed link_speed; 1414 bool link_status; 1415 u8 pad[3]; 1416 } link_event; 1417 struct { 1418 /* link_speed provided in Mbps */ 1419 u32 link_speed; 1420 u8 link_status; 1421 u8 pad[3]; 1422 } link_event_adv; 1423 } event_data; 1424 1425 s32 severity; 1426 }; 1427 1428 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); 1429 1430 /* VF reset states - these are written into the RSTAT register: 1431 * VFGEN_RSTAT on the VF 1432 * When the PF initiates a reset, it writes 0 1433 * When the reset is complete, it writes 1 1434 * When the PF detects that the VF has recovered, it writes 2 1435 * VF checks this register periodically to determine if a reset has occurred, 1436 * then polls it to know when the reset is complete. 1437 * If either the PF or VF reads the register while the hardware 1438 * is in a reset state, it will return DEADBEEF, which, when masked 1439 * will result in 3. 1440 */ 1441 enum virtchnl_vfr_states { 1442 VIRTCHNL_VFR_INPROGRESS = 0, 1443 VIRTCHNL_VFR_COMPLETED, 1444 VIRTCHNL_VFR_VFACTIVE, 1445 }; 1446 1447 #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32 1448 #define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK 16 1449 #define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024 1450 #define PROTO_HDR_SHIFT 5 1451 #define PROTO_HDR_FIELD_START(proto_hdr_type) \ 1452 (proto_hdr_type << PROTO_HDR_SHIFT) 1453 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1) 1454 1455 /* VF use these macros to configure each protocol header. 1456 * Specify which protocol headers and protocol header fields base on 1457 * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field. 1458 * @param hdr: a struct of virtchnl_proto_hdr 1459 * @param hdr_type: ETH/IPV4/TCP, etc 1460 * @param field: SRC/DST/TEID/SPI, etc 1461 */ 1462 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \ 1463 ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK)) 1464 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \ 1465 ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK)) 1466 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \ 1467 ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK)) 1468 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector) 1469 1470 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \ 1471 (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \ 1472 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field)) 1473 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \ 1474 (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \ 1475 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field)) 1476 1477 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \ 1478 ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type) 1479 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \ 1480 (((hdr)->type) >> PROTO_HDR_SHIFT) 1481 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \ 1482 ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT))) 1483 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \ 1484 (VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) && \ 1485 VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val)) 1486 1487 /* Protocol header type within a packet segment. A segment consists of one or 1488 * more protocol headers that make up a logical group of protocol headers. Each 1489 * logical group of protocol headers encapsulates or is encapsulated using/by 1490 * tunneling or encapsulation protocols for network virtualization. 1491 */ 1492 enum virtchnl_proto_hdr_type { 1493 VIRTCHNL_PROTO_HDR_NONE, 1494 VIRTCHNL_PROTO_HDR_ETH, 1495 VIRTCHNL_PROTO_HDR_S_VLAN, 1496 VIRTCHNL_PROTO_HDR_C_VLAN, 1497 VIRTCHNL_PROTO_HDR_IPV4, 1498 VIRTCHNL_PROTO_HDR_IPV6, 1499 VIRTCHNL_PROTO_HDR_TCP, 1500 VIRTCHNL_PROTO_HDR_UDP, 1501 VIRTCHNL_PROTO_HDR_SCTP, 1502 VIRTCHNL_PROTO_HDR_GTPU_IP, 1503 VIRTCHNL_PROTO_HDR_GTPU_EH, 1504 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, 1505 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, 1506 VIRTCHNL_PROTO_HDR_PPPOE, 1507 VIRTCHNL_PROTO_HDR_L2TPV3, 1508 VIRTCHNL_PROTO_HDR_ESP, 1509 VIRTCHNL_PROTO_HDR_AH, 1510 VIRTCHNL_PROTO_HDR_PFCP, 1511 VIRTCHNL_PROTO_HDR_GTPC, 1512 VIRTCHNL_PROTO_HDR_ECPRI, 1513 VIRTCHNL_PROTO_HDR_L2TPV2, 1514 VIRTCHNL_PROTO_HDR_PPP, 1515 /* IPv4 and IPv6 Fragment header types are only associated to 1516 * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively, 1517 * cannot be used independently. 1518 */ 1519 VIRTCHNL_PROTO_HDR_IPV4_FRAG, 1520 VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, 1521 VIRTCHNL_PROTO_HDR_GRE, 1522 }; 1523 1524 /* Protocol header field within a protocol header. */ 1525 enum virtchnl_proto_hdr_field { 1526 /* ETHER */ 1527 VIRTCHNL_PROTO_HDR_ETH_SRC = 1528 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH), 1529 VIRTCHNL_PROTO_HDR_ETH_DST, 1530 VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, 1531 /* S-VLAN */ 1532 VIRTCHNL_PROTO_HDR_S_VLAN_ID = 1533 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN), 1534 /* C-VLAN */ 1535 VIRTCHNL_PROTO_HDR_C_VLAN_ID = 1536 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN), 1537 /* IPV4 */ 1538 VIRTCHNL_PROTO_HDR_IPV4_SRC = 1539 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4), 1540 VIRTCHNL_PROTO_HDR_IPV4_DST, 1541 VIRTCHNL_PROTO_HDR_IPV4_DSCP, 1542 VIRTCHNL_PROTO_HDR_IPV4_TTL, 1543 VIRTCHNL_PROTO_HDR_IPV4_PROT, 1544 VIRTCHNL_PROTO_HDR_IPV4_CHKSUM, 1545 /* IPV6 */ 1546 VIRTCHNL_PROTO_HDR_IPV6_SRC = 1547 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6), 1548 VIRTCHNL_PROTO_HDR_IPV6_DST, 1549 VIRTCHNL_PROTO_HDR_IPV6_TC, 1550 VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, 1551 VIRTCHNL_PROTO_HDR_IPV6_PROT, 1552 /* IPV6 Prefix */ 1553 VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC, 1554 VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST, 1555 VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC, 1556 VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST, 1557 VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC, 1558 VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST, 1559 VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC, 1560 VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST, 1561 VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC, 1562 VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST, 1563 VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC, 1564 VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST, 1565 /* TCP */ 1566 VIRTCHNL_PROTO_HDR_TCP_SRC_PORT = 1567 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP), 1568 VIRTCHNL_PROTO_HDR_TCP_DST_PORT, 1569 VIRTCHNL_PROTO_HDR_TCP_CHKSUM, 1570 /* UDP */ 1571 VIRTCHNL_PROTO_HDR_UDP_SRC_PORT = 1572 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP), 1573 VIRTCHNL_PROTO_HDR_UDP_DST_PORT, 1574 VIRTCHNL_PROTO_HDR_UDP_CHKSUM, 1575 /* SCTP */ 1576 VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT = 1577 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP), 1578 VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, 1579 VIRTCHNL_PROTO_HDR_SCTP_CHKSUM, 1580 /* GTPU_IP */ 1581 VIRTCHNL_PROTO_HDR_GTPU_IP_TEID = 1582 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP), 1583 /* GTPU_EH */ 1584 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU = 1585 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH), 1586 VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, 1587 /* PPPOE */ 1588 VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID = 1589 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE), 1590 /* L2TPV3 */ 1591 VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID = 1592 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3), 1593 /* ESP */ 1594 VIRTCHNL_PROTO_HDR_ESP_SPI = 1595 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP), 1596 /* AH */ 1597 VIRTCHNL_PROTO_HDR_AH_SPI = 1598 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH), 1599 /* PFCP */ 1600 VIRTCHNL_PROTO_HDR_PFCP_S_FIELD = 1601 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP), 1602 VIRTCHNL_PROTO_HDR_PFCP_SEID, 1603 /* GTPC */ 1604 VIRTCHNL_PROTO_HDR_GTPC_TEID = 1605 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC), 1606 /* ECPRI */ 1607 VIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE = 1608 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI), 1609 VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID, 1610 /* IPv4 Dummy Fragment */ 1611 VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID = 1612 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG), 1613 /* IPv6 Extension Fragment */ 1614 VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID = 1615 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG), 1616 /* GTPU_DWN/UP */ 1617 VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI = 1618 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN), 1619 VIRTCHNL_PROTO_HDR_GTPU_UP_QFI = 1620 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP), 1621 /* L2TPv2 */ 1622 VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID = 1623 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2), 1624 VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID, 1625 }; 1626 1627 struct virtchnl_proto_hdr { 1628 /* see enum virtchnl_proto_hdr_type */ 1629 s32 type; 1630 u32 field_selector; /* a bit mask to select field for header type */ 1631 u8 buffer[64]; 1632 /** 1633 * binary buffer in network order for specific header type. 1634 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4 1635 * header is expected to be copied into the buffer. 1636 */ 1637 }; 1638 1639 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr); 1640 1641 struct virtchnl_proto_hdr_w_msk { 1642 /* see enum virtchnl_proto_hdr_type */ 1643 s32 type; 1644 u32 pad; 1645 /** 1646 * binary buffer in network order for specific header type. 1647 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4 1648 * header is expected to be copied into the buffer. 1649 */ 1650 u8 buffer_spec[64]; 1651 /* binary buffer for bit-mask applied to specific header type */ 1652 u8 buffer_mask[64]; 1653 }; 1654 1655 VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk); 1656 1657 struct virtchnl_proto_hdrs { 1658 u8 tunnel_level; 1659 /** 1660 * specify where protocol header start from. 1661 * must be 0 when sending a raw packet request. 1662 * 0 - from the outer layer 1663 * 1 - from the first inner layer 1664 * 2 - from the second inner layer 1665 * .... 1666 */ 1667 int count; 1668 /** 1669 * count must <= 1670 * VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK 1671 * count = 0 : select raw 1672 * 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS : select proto_hdr 1673 * count > VIRTCHNL_MAX_NUM_PROTO_HDRS : select proto_hdr_w_msk 1674 * last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS 1675 */ 1676 union { 1677 struct virtchnl_proto_hdr 1678 proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; 1679 struct virtchnl_proto_hdr_w_msk 1680 proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK]; 1681 struct { 1682 u16 pkt_len; 1683 u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET]; 1684 u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET]; 1685 } raw; 1686 }; 1687 }; 1688 1689 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs); 1690 1691 struct virtchnl_rss_cfg { 1692 struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */ 1693 1694 /* see enum virtchnl_rss_algorithm; rss algorithm type */ 1695 s32 rss_algorithm; 1696 u8 reserved[128]; /* reserve for future */ 1697 }; 1698 1699 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg); 1700 1701 /* action configuration for FDIR and FSUB */ 1702 struct virtchnl_filter_action { 1703 /* see enum virtchnl_action type */ 1704 s32 type; 1705 union { 1706 /* used for queue and qgroup action */ 1707 struct { 1708 u16 index; 1709 u8 region; 1710 } queue; 1711 /* used for count action */ 1712 struct { 1713 /* share counter ID with other flow rules */ 1714 u8 shared; 1715 u32 id; /* counter ID */ 1716 } count; 1717 /* used for mark action */ 1718 u32 mark_id; 1719 u8 reserve[32]; 1720 } act_conf; 1721 }; 1722 1723 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action); 1724 1725 #define VIRTCHNL_MAX_NUM_ACTIONS 8 1726 1727 struct virtchnl_filter_action_set { 1728 /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */ 1729 int count; 1730 struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS]; 1731 }; 1732 1733 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set); 1734 1735 /* pattern and action for FDIR rule */ 1736 struct virtchnl_fdir_rule { 1737 struct virtchnl_proto_hdrs proto_hdrs; 1738 struct virtchnl_filter_action_set action_set; 1739 }; 1740 1741 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule); 1742 1743 /* Status returned to VF after VF requests FDIR commands 1744 * VIRTCHNL_FDIR_SUCCESS 1745 * VF FDIR related request is successfully done by PF 1746 * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER. 1747 * 1748 * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE 1749 * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource. 1750 * 1751 * VIRTCHNL_FDIR_FAILURE_RULE_EXIST 1752 * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed. 1753 * 1754 * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT 1755 * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule. 1756 * 1757 * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST 1758 * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist. 1759 * 1760 * VIRTCHNL_FDIR_FAILURE_RULE_INVALID 1761 * OP_ADD_FDIR_FILTER request is failed due to parameters validation 1762 * or HW doesn't support. 1763 * 1764 * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT 1765 * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out 1766 * for programming. 1767 * 1768 * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID 1769 * OP_QUERY_FDIR_FILTER request is failed due to parameters validation, 1770 * for example, VF query counter of a rule who has no counter action. 1771 */ 1772 enum virtchnl_fdir_prgm_status { 1773 VIRTCHNL_FDIR_SUCCESS = 0, 1774 VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE, 1775 VIRTCHNL_FDIR_FAILURE_RULE_EXIST, 1776 VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT, 1777 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST, 1778 VIRTCHNL_FDIR_FAILURE_RULE_INVALID, 1779 VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT, 1780 VIRTCHNL_FDIR_FAILURE_QUERY_INVALID, 1781 }; 1782 1783 /* VIRTCHNL_OP_ADD_FDIR_FILTER 1784 * VF sends this request to PF by filling out vsi_id, 1785 * validate_only and rule_cfg. PF will return flow_id 1786 * if the request is successfully done and return add_status to VF. 1787 */ 1788 struct virtchnl_fdir_add { 1789 u16 vsi_id; /* INPUT */ 1790 /* 1791 * 1 for validating a fdir rule, 0 for creating a fdir rule. 1792 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER. 1793 */ 1794 u16 validate_only; /* INPUT */ 1795 u32 flow_id; /* OUTPUT */ 1796 struct virtchnl_fdir_rule rule_cfg; /* INPUT */ 1797 1798 /* see enum virtchnl_fdir_prgm_status; OUTPUT */ 1799 s32 status; 1800 }; 1801 1802 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add); 1803 1804 /* VIRTCHNL_OP_DEL_FDIR_FILTER 1805 * VF sends this request to PF by filling out vsi_id 1806 * and flow_id. PF will return del_status to VF. 1807 */ 1808 struct virtchnl_fdir_del { 1809 u16 vsi_id; /* INPUT */ 1810 u16 pad; 1811 u32 flow_id; /* INPUT */ 1812 1813 /* see enum virtchnl_fdir_prgm_status; OUTPUT */ 1814 s32 status; 1815 }; 1816 1817 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); 1818 1819 /* Status returned to VF after VF requests FSUB commands 1820 * VIRTCHNL_FSUB_SUCCESS 1821 * VF FLOW related request is successfully done by PF 1822 * The request can be OP_FLOW_SUBSCRIBE/UNSUBSCRIBE. 1823 * 1824 * VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE 1825 * OP_FLOW_SUBSCRIBE request is failed due to no Hardware resource. 1826 * 1827 * VIRTCHNL_FSUB_FAILURE_RULE_EXIST 1828 * OP_FLOW_SUBSCRIBE request is failed due to the rule is already existed. 1829 * 1830 * VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST 1831 * OP_FLOW_UNSUBSCRIBE request is failed due to this rule doesn't exist. 1832 * 1833 * VIRTCHNL_FSUB_FAILURE_RULE_INVALID 1834 * OP_FLOW_SUBSCRIBE request is failed due to parameters validation 1835 * or HW doesn't support. 1836 */ 1837 enum virtchnl_fsub_prgm_status { 1838 VIRTCHNL_FSUB_SUCCESS = 0, 1839 VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE, 1840 VIRTCHNL_FSUB_FAILURE_RULE_EXIST, 1841 VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST, 1842 VIRTCHNL_FSUB_FAILURE_RULE_INVALID, 1843 }; 1844 1845 /* VIRTCHNL_OP_FLOW_SUBSCRIBE 1846 * VF sends this request to PF by filling out vsi_id, 1847 * validate_only, priority, proto_hdrs and actions. 1848 * PF will return flow_id 1849 * if the request is successfully done and return status to VF. 1850 */ 1851 struct virtchnl_flow_sub { 1852 u16 vsi_id; /* INPUT */ 1853 u8 validate_only; /* INPUT */ 1854 /* 0 is the highest priority; INPUT */ 1855 u8 priority; 1856 u32 flow_id; /* OUTPUT */ 1857 struct virtchnl_proto_hdrs proto_hdrs; /* INPUT */ 1858 struct virtchnl_filter_action_set actions; /* INPUT */ 1859 /* see enum virtchnl_fsub_prgm_status; OUTPUT */ 1860 s32 status; 1861 }; 1862 1863 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_flow_sub); 1864 1865 /* VIRTCHNL_OP_FLOW_UNSUBSCRIBE 1866 * VF sends this request to PF by filling out vsi_id 1867 * and flow_id. PF will return status to VF. 1868 */ 1869 struct virtchnl_flow_unsub { 1870 u16 vsi_id; /* INPUT */ 1871 u16 pad; 1872 u32 flow_id; /* INPUT */ 1873 /* see enum virtchnl_fsub_prgm_status; OUTPUT */ 1874 s32 status; 1875 }; 1876 1877 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_flow_unsub); 1878 1879 /* VIRTCHNL_OP_GET_QOS_CAPS 1880 * VF sends this message to get its QoS Caps, such as 1881 * TC number, Arbiter and Bandwidth. 1882 */ 1883 struct virtchnl_qos_cap_elem { 1884 u8 tc_num; 1885 u8 tc_prio; 1886 #define VIRTCHNL_ABITER_STRICT 0 1887 #define VIRTCHNL_ABITER_ETS 2 1888 u8 arbiter; 1889 #define VIRTCHNL_STRICT_WEIGHT 1 1890 u8 weight; 1891 enum virtchnl_bw_limit_type type; 1892 union { 1893 struct virtchnl_shaper_bw shaper; 1894 u8 pad2[32]; 1895 }; 1896 }; 1897 1898 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem); 1899 1900 struct virtchnl_qos_cap_list { 1901 u16 vsi_id; 1902 u16 num_elem; 1903 struct virtchnl_qos_cap_elem cap[1]; 1904 }; 1905 1906 VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_qos_cap_list); 1907 1908 /* VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP 1909 * VF sends message virtchnl_queue_tc_mapping to set queue to tc 1910 * mapping for all the Tx and Rx queues with a specified VSI, and 1911 * would get response about bitmap of valid user priorities 1912 * associated with queues. 1913 */ 1914 struct virtchnl_queue_tc_mapping { 1915 u16 vsi_id; 1916 u16 num_tc; 1917 u16 num_queue_pairs; 1918 u8 pad[2]; 1919 union { 1920 struct { 1921 u16 start_queue_id; 1922 u16 queue_count; 1923 } req; 1924 struct { 1925 #define VIRTCHNL_USER_PRIO_TYPE_UP 0 1926 #define VIRTCHNL_USER_PRIO_TYPE_DSCP 1 1927 u16 prio_type; 1928 u16 valid_prio_bitmap; 1929 } resp; 1930 } tc[1]; 1931 }; 1932 1933 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping); 1934 1935 /* VIRTCHNL_OP_CONFIG_QUEUE_BW */ 1936 struct virtchnl_queue_bw { 1937 u16 queue_id; 1938 u8 tc; 1939 u8 pad; 1940 struct virtchnl_shaper_bw shaper; 1941 }; 1942 1943 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw); 1944 1945 struct virtchnl_queues_bw_cfg { 1946 u16 vsi_id; 1947 u16 num_queues; 1948 struct virtchnl_queue_bw cfg[1]; 1949 }; 1950 1951 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg); 1952 1953 /* queue types */ 1954 enum virtchnl_queue_type { 1955 VIRTCHNL_QUEUE_TYPE_TX = 0, 1956 VIRTCHNL_QUEUE_TYPE_RX = 1, 1957 }; 1958 1959 /* structure to specify a chunk of contiguous queues */ 1960 struct virtchnl_queue_chunk { 1961 /* see enum virtchnl_queue_type */ 1962 s32 type; 1963 u16 start_queue_id; 1964 u16 num_queues; 1965 }; 1966 1967 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk); 1968 1969 /* structure to specify several chunks of contiguous queues */ 1970 struct virtchnl_queue_chunks { 1971 u16 num_chunks; 1972 u16 rsvd; 1973 struct virtchnl_queue_chunk chunks[1]; 1974 }; 1975 1976 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks); 1977 1978 /* VIRTCHNL_OP_ENABLE_QUEUES_V2 1979 * VIRTCHNL_OP_DISABLE_QUEUES_V2 1980 * 1981 * These opcodes can be used if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in 1982 * VIRTCHNL_OP_GET_VF_RESOURCES 1983 * 1984 * VF sends virtchnl_ena_dis_queues struct to specify the queues to be 1985 * enabled/disabled in chunks. Also applicable to single queue RX or 1986 * TX. PF performs requested action and returns status. 1987 */ 1988 struct virtchnl_del_ena_dis_queues { 1989 u16 vport_id; 1990 u16 pad; 1991 struct virtchnl_queue_chunks chunks; 1992 }; 1993 1994 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_del_ena_dis_queues); 1995 1996 /* Virtchannel interrupt throttling rate index */ 1997 enum virtchnl_itr_idx { 1998 VIRTCHNL_ITR_IDX_0 = 0, 1999 VIRTCHNL_ITR_IDX_1 = 1, 2000 VIRTCHNL_ITR_IDX_NO_ITR = 3, 2001 }; 2002 2003 /* Queue to vector mapping */ 2004 struct virtchnl_queue_vector { 2005 u16 queue_id; 2006 u16 vector_id; 2007 u8 pad[4]; 2008 2009 /* see enum virtchnl_itr_idx */ 2010 s32 itr_idx; 2011 2012 /* see enum virtchnl_queue_type */ 2013 s32 queue_type; 2014 }; 2015 2016 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector); 2017 2018 /* VIRTCHNL_OP_MAP_QUEUE_VECTOR 2019 * 2020 * This opcode can be used only if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated 2021 * in VIRTCHNL_OP_GET_VF_RESOURCES 2022 * 2023 * VF sends this message to map queues to vectors and ITR index registers. 2024 * External data buffer contains virtchnl_queue_vector_maps structure 2025 * that contains num_qv_maps of virtchnl_queue_vector structures. 2026 * PF maps the requested queue vector maps after validating the queue and vector 2027 * ids and returns a status code. 2028 */ 2029 struct virtchnl_queue_vector_maps { 2030 u16 vport_id; 2031 u16 num_qv_maps; 2032 u8 pad[4]; 2033 struct virtchnl_queue_vector qv_maps[1]; 2034 }; 2035 2036 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps); 2037 2038 struct virtchnl_quanta_cfg { 2039 u16 quanta_size; 2040 struct virtchnl_queue_chunk queue_select; 2041 }; 2042 2043 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg); 2044 2045 /* 2046 * VIRTCHNL_OP_HQOS_READ_TREE 2047 * VIRTCHNL_OP_HQOS_ELEM_ADD 2048 * VIRTCHNL_OP_HQOS_ELEM_DEL 2049 * VIRTCHNL_OP_HQOS_ELEM_BW_SET 2050 * List with tc and queus HW QoS values 2051 */ 2052 struct virtchnl_hqos_cfg { 2053 #define VIRTCHNL_HQOS_ELEM_TYPE_NODE 0 2054 #define VIRTCHNL_HQOS_ELEM_TYPE_LEAF 1 2055 u8 node_type; 2056 u8 pad[7]; 2057 u32 teid; 2058 u32 parent_teid; 2059 u64 tx_max; 2060 u64 tx_share; 2061 u32 tx_priority; 2062 u32 tx_weight; 2063 }; 2064 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_hqos_cfg); 2065 2066 struct virtchnl_hqos_cfg_list { 2067 u16 num_elem; 2068 u8 pad[6]; 2069 struct virtchnl_hqos_cfg cfg[1]; 2070 }; 2071 VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_hqos_cfg_list); 2072 2073 /* Since VF messages are limited by u16 size, precalculate the maximum possible 2074 * values of nested elements in virtchnl structures that virtual channel can 2075 * possibly handle in a single message. 2076 */ 2077 enum virtchnl_vector_limits { 2078 VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX = 2079 ((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) / 2080 sizeof(struct virtchnl_queue_pair_info), 2081 2082 VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX = 2083 ((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) / 2084 sizeof(struct virtchnl_vector_map), 2085 2086 VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX = 2087 ((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) / 2088 sizeof(struct virtchnl_ether_addr), 2089 2090 VIRTCHNL_OP_ADD_DEL_VLAN_MAX = 2091 ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) / 2092 sizeof(u16), 2093 2094 VIRTCHNL_OP_ENABLE_CHANNELS_MAX = 2095 ((u16)(~0) - sizeof(struct virtchnl_tc_info)) / 2096 sizeof(struct virtchnl_channel_info), 2097 2098 VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX = 2099 ((u16)(~0) - sizeof(struct virtchnl_del_ena_dis_queues)) / 2100 sizeof(struct virtchnl_queue_chunk), 2101 2102 VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX = 2103 ((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) / 2104 sizeof(struct virtchnl_queue_vector), 2105 2106 VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX = 2107 ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list_v2)) / 2108 sizeof(struct virtchnl_vlan_filter), 2109 }; 2110 2111 /** 2112 * virtchnl_vc_validate_vf_msg 2113 * @ver: Virtchnl version info 2114 * @v_opcode: Opcode for the message 2115 * @msg: pointer to the msg buffer 2116 * @msglen: msg length 2117 * 2118 * validate msg format against struct for each opcode 2119 */ 2120 static inline int 2121 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, 2122 u8 *msg, u16 msglen) 2123 { 2124 bool err_msg_format = false; 2125 u32 valid_len = 0; 2126 2127 /* Validate message length. */ 2128 switch (v_opcode) { 2129 case VIRTCHNL_OP_VERSION: 2130 valid_len = sizeof(struct virtchnl_version_info); 2131 break; 2132 case VIRTCHNL_OP_RESET_VF: 2133 break; 2134 case VIRTCHNL_OP_GET_VF_RESOURCES: 2135 if (VF_IS_V11(ver)) 2136 valid_len = sizeof(u32); 2137 break; 2138 case VIRTCHNL_OP_CONFIG_TX_QUEUE: 2139 valid_len = sizeof(struct virtchnl_txq_info); 2140 break; 2141 case VIRTCHNL_OP_CONFIG_RX_QUEUE: 2142 valid_len = sizeof(struct virtchnl_rxq_info); 2143 break; 2144 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 2145 valid_len = sizeof(struct virtchnl_vsi_queue_config_info); 2146 if (msglen >= valid_len) { 2147 struct virtchnl_vsi_queue_config_info *vqc = 2148 (struct virtchnl_vsi_queue_config_info *)msg; 2149 2150 if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs > 2151 VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) { 2152 err_msg_format = true; 2153 break; 2154 } 2155 2156 valid_len += (vqc->num_queue_pairs * 2157 sizeof(struct 2158 virtchnl_queue_pair_info)); 2159 } 2160 break; 2161 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 2162 valid_len = sizeof(struct virtchnl_irq_map_info); 2163 if (msglen >= valid_len) { 2164 struct virtchnl_irq_map_info *vimi = 2165 (struct virtchnl_irq_map_info *)msg; 2166 2167 if (vimi->num_vectors == 0 || vimi->num_vectors > 2168 VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) { 2169 err_msg_format = true; 2170 break; 2171 } 2172 2173 valid_len += (vimi->num_vectors * 2174 sizeof(struct virtchnl_vector_map)); 2175 } 2176 break; 2177 case VIRTCHNL_OP_ENABLE_QUEUES: 2178 case VIRTCHNL_OP_DISABLE_QUEUES: 2179 valid_len = sizeof(struct virtchnl_queue_select); 2180 break; 2181 case VIRTCHNL_OP_GET_MAX_RSS_QREGION: 2182 break; 2183 case VIRTCHNL_OP_ADD_ETH_ADDR: 2184 case VIRTCHNL_OP_DEL_ETH_ADDR: 2185 valid_len = sizeof(struct virtchnl_ether_addr_list); 2186 if (msglen >= valid_len) { 2187 struct virtchnl_ether_addr_list *veal = 2188 (struct virtchnl_ether_addr_list *)msg; 2189 2190 if (veal->num_elements == 0 || veal->num_elements > 2191 VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) { 2192 err_msg_format = true; 2193 break; 2194 } 2195 2196 valid_len += veal->num_elements * 2197 sizeof(struct virtchnl_ether_addr); 2198 } 2199 break; 2200 case VIRTCHNL_OP_ADD_VLAN: 2201 case VIRTCHNL_OP_DEL_VLAN: 2202 valid_len = sizeof(struct virtchnl_vlan_filter_list); 2203 if (msglen >= valid_len) { 2204 struct virtchnl_vlan_filter_list *vfl = 2205 (struct virtchnl_vlan_filter_list *)msg; 2206 2207 if (vfl->num_elements == 0 || vfl->num_elements > 2208 VIRTCHNL_OP_ADD_DEL_VLAN_MAX) { 2209 err_msg_format = true; 2210 break; 2211 } 2212 2213 valid_len += vfl->num_elements * sizeof(u16); 2214 } 2215 break; 2216 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 2217 valid_len = sizeof(struct virtchnl_promisc_info); 2218 break; 2219 case VIRTCHNL_OP_GET_STATS: 2220 valid_len = sizeof(struct virtchnl_queue_select); 2221 break; 2222 case VIRTCHNL_OP_CONFIG_RSS_KEY: 2223 valid_len = sizeof(struct virtchnl_rss_key); 2224 if (msglen >= valid_len) { 2225 struct virtchnl_rss_key *vrk = 2226 (struct virtchnl_rss_key *)msg; 2227 2228 if (vrk->key_len == 0) { 2229 /* zero length is allowed as input */ 2230 break; 2231 } 2232 2233 valid_len += vrk->key_len - 1; 2234 } 2235 break; 2236 case VIRTCHNL_OP_CONFIG_RSS_LUT: 2237 valid_len = sizeof(struct virtchnl_rss_lut); 2238 if (msglen >= valid_len) { 2239 struct virtchnl_rss_lut *vrl = 2240 (struct virtchnl_rss_lut *)msg; 2241 2242 if (vrl->lut_entries == 0) { 2243 /* zero entries is allowed as input */ 2244 break; 2245 } 2246 2247 valid_len += vrl->lut_entries - 1; 2248 } 2249 break; 2250 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 2251 break; 2252 case VIRTCHNL_OP_SET_RSS_HENA: 2253 valid_len = sizeof(struct virtchnl_rss_hena); 2254 break; 2255 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 2256 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 2257 break; 2258 case VIRTCHNL_OP_REQUEST_QUEUES: 2259 valid_len = sizeof(struct virtchnl_vf_res_request); 2260 break; 2261 case VIRTCHNL_OP_ENABLE_CHANNELS: 2262 valid_len = sizeof(struct virtchnl_tc_info); 2263 if (msglen >= valid_len) { 2264 struct virtchnl_tc_info *vti = 2265 (struct virtchnl_tc_info *)msg; 2266 2267 if (vti->num_tc == 0 || vti->num_tc > 2268 VIRTCHNL_OP_ENABLE_CHANNELS_MAX) { 2269 err_msg_format = true; 2270 break; 2271 } 2272 2273 valid_len += (vti->num_tc - 1) * 2274 sizeof(struct virtchnl_channel_info); 2275 } 2276 break; 2277 case VIRTCHNL_OP_DISABLE_CHANNELS: 2278 break; 2279 case VIRTCHNL_OP_ADD_CLOUD_FILTER: 2280 case VIRTCHNL_OP_DEL_CLOUD_FILTER: 2281 valid_len = sizeof(struct virtchnl_filter); 2282 break; 2283 case VIRTCHNL_OP_ADD_RSS_CFG: 2284 case VIRTCHNL_OP_DEL_RSS_CFG: 2285 valid_len = sizeof(struct virtchnl_rss_cfg); 2286 break; 2287 case VIRTCHNL_OP_ADD_FDIR_FILTER: 2288 valid_len = sizeof(struct virtchnl_fdir_add); 2289 break; 2290 case VIRTCHNL_OP_DEL_FDIR_FILTER: 2291 valid_len = sizeof(struct virtchnl_fdir_del); 2292 break; 2293 case VIRTCHNL_OP_FLOW_SUBSCRIBE: 2294 valid_len = sizeof(struct virtchnl_flow_sub); 2295 break; 2296 case VIRTCHNL_OP_FLOW_UNSUBSCRIBE: 2297 valid_len = sizeof(struct virtchnl_flow_unsub); 2298 break; 2299 case VIRTCHNL_OP_GET_QOS_CAPS: 2300 break; 2301 case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP: 2302 valid_len = sizeof(struct virtchnl_queue_tc_mapping); 2303 if (msglen >= valid_len) { 2304 struct virtchnl_queue_tc_mapping *q_tc = 2305 (struct virtchnl_queue_tc_mapping *)msg; 2306 if (q_tc->num_tc == 0) { 2307 err_msg_format = true; 2308 break; 2309 } 2310 valid_len += (q_tc->num_tc - 1) * 2311 sizeof(q_tc->tc[0]); 2312 } 2313 break; 2314 case VIRTCHNL_OP_CONFIG_QUEUE_BW: 2315 valid_len = sizeof(struct virtchnl_queues_bw_cfg); 2316 if (msglen >= valid_len) { 2317 struct virtchnl_queues_bw_cfg *q_bw = 2318 (struct virtchnl_queues_bw_cfg *)msg; 2319 if (q_bw->num_queues == 0) { 2320 err_msg_format = true; 2321 break; 2322 } 2323 valid_len += (q_bw->num_queues - 1) * 2324 sizeof(q_bw->cfg[0]); 2325 } 2326 break; 2327 case VIRTCHNL_OP_CONFIG_QUANTA: 2328 valid_len = sizeof(struct virtchnl_quanta_cfg); 2329 if (msglen >= valid_len) { 2330 struct virtchnl_quanta_cfg *q_quanta = 2331 (struct virtchnl_quanta_cfg *)msg; 2332 if (q_quanta->quanta_size == 0 || 2333 q_quanta->queue_select.num_queues == 0) { 2334 err_msg_format = true; 2335 break; 2336 } 2337 } 2338 break; 2339 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: 2340 break; 2341 case VIRTCHNL_OP_ADD_VLAN_V2: 2342 case VIRTCHNL_OP_DEL_VLAN_V2: 2343 valid_len = sizeof(struct virtchnl_vlan_filter_list_v2); 2344 if (msglen >= valid_len) { 2345 struct virtchnl_vlan_filter_list_v2 *vfl = 2346 (struct virtchnl_vlan_filter_list_v2 *)msg; 2347 2348 if (vfl->num_elements == 0 || vfl->num_elements > 2349 VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX) { 2350 err_msg_format = true; 2351 break; 2352 } 2353 2354 valid_len += (vfl->num_elements - 1) * 2355 sizeof(struct virtchnl_vlan_filter); 2356 } 2357 break; 2358 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 2359 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 2360 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 2361 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 2362 case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2: 2363 case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2: 2364 valid_len = sizeof(struct virtchnl_vlan_setting); 2365 break; 2366 case VIRTCHNL_OP_ENABLE_QUEUES_V2: 2367 case VIRTCHNL_OP_DISABLE_QUEUES_V2: 2368 valid_len = sizeof(struct virtchnl_del_ena_dis_queues); 2369 if (msglen >= valid_len) { 2370 struct virtchnl_del_ena_dis_queues *qs = 2371 (struct virtchnl_del_ena_dis_queues *)msg; 2372 if (qs->chunks.num_chunks == 0 || 2373 qs->chunks.num_chunks > VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX) { 2374 err_msg_format = true; 2375 break; 2376 } 2377 valid_len += (qs->chunks.num_chunks - 1) * 2378 sizeof(struct virtchnl_queue_chunk); 2379 } 2380 break; 2381 case VIRTCHNL_OP_MAP_QUEUE_VECTOR: 2382 valid_len = sizeof(struct virtchnl_queue_vector_maps); 2383 if (msglen >= valid_len) { 2384 struct virtchnl_queue_vector_maps *v_qp = 2385 (struct virtchnl_queue_vector_maps *)msg; 2386 if (v_qp->num_qv_maps == 0 || 2387 v_qp->num_qv_maps > VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) { 2388 err_msg_format = true; 2389 break; 2390 } 2391 valid_len += (v_qp->num_qv_maps - 1) * 2392 sizeof(struct virtchnl_queue_vector); 2393 } 2394 break; 2395 /* These are always errors coming from the VF. */ 2396 case VIRTCHNL_OP_EVENT: 2397 case VIRTCHNL_OP_UNKNOWN: 2398 default: 2399 return VIRTCHNL_STATUS_ERR_PARAM; 2400 } 2401 /* few more checks */ 2402 if (err_msg_format || valid_len != msglen) 2403 return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; 2404 2405 return 0; 2406 } 2407 #endif /* _VIRTCHNL_H_ */ 2408