xref: /freebsd/sys/dev/ice/virtchnl.h (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2023, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #ifndef _VIRTCHNL_H_
34 #define _VIRTCHNL_H_
35 
36 /* Description:
37  * This header file describes the Virtual Function (VF) - Physical Function
38  * (PF) communication protocol used by the drivers for all devices starting
39  * from our 40G product line
40  *
41  * Admin queue buffer usage:
42  * desc->opcode is always aqc_opc_send_msg_to_pf
43  * flags, retval, datalen, and data addr are all used normally.
44  * The Firmware copies the cookie fields when sending messages between the
45  * PF and VF, but uses all other fields internally. Due to this limitation,
46  * we must send all messages as "indirect", i.e. using an external buffer.
47  *
48  * All the VSI indexes are relative to the VF. Each VF can have maximum of
49  * three VSIs. All the queue indexes are relative to the VSI.  Each VF can
50  * have a maximum of sixteen queues for all of its VSIs.
51  *
52  * The PF is required to return a status code in v_retval for all messages
53  * except RESET_VF, which does not require any response. The returned value
54  * is of virtchnl_status_code type, defined here.
55  *
56  * In general, VF driver initialization should roughly follow the order of
57  * these opcodes. The VF driver must first validate the API version of the
58  * PF driver, then request a reset, then get resources, then configure
59  * queues and interrupts. After these operations are complete, the VF
60  * driver may start its queues, optionally add MAC and VLAN filters, and
61  * process traffic.
62  */
63 
64 /* START GENERIC DEFINES
65  * Need to ensure the following enums and defines hold the same meaning and
66  * value in current and future projects
67  */
68 
69 #define VIRTCHNL_ETH_LENGTH_OF_ADDRESS	6
70 
71 /* These macros are used to generate compilation errors if a structure/union
72  * is not exactly the correct length. It gives a divide by zero error if the
73  * structure/union is not of the correct size, otherwise it creates an enum
74  * that is never used.
75  */
76 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
77 	{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
78 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
79 	{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
80 
81 /* Error Codes
82  * Note that many older versions of various iAVF drivers convert the reported
83  * status code directly into an iavf_status enumeration. For this reason, it
84  * is important that the values of these enumerations line up.
85  */
86 enum virtchnl_status_code {
87 	VIRTCHNL_STATUS_SUCCESS				= 0,
88 	VIRTCHNL_STATUS_ERR_PARAM			= -5,
89 	VIRTCHNL_STATUS_ERR_NO_MEMORY			= -18,
90 	VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH		= -38,
91 	VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR		= -39,
92 	VIRTCHNL_STATUS_ERR_INVALID_VF_ID		= -40,
93 	VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR		= -53,
94 	VIRTCHNL_STATUS_ERR_NOT_SUPPORTED		= -64,
95 };
96 
97 /* Backward compatibility */
98 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
99 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
100 
101 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT		0x0
102 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT		0x1
103 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT	0x2
104 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT		0x3
105 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT		0x4
106 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT		0x5
107 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT		0x6
108 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT		0x7
109 
110 enum virtchnl_link_speed {
111 	VIRTCHNL_LINK_SPEED_UNKNOWN	= 0,
112 	VIRTCHNL_LINK_SPEED_100MB	= BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
113 	VIRTCHNL_LINK_SPEED_1GB		= BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
114 	VIRTCHNL_LINK_SPEED_10GB	= BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
115 	VIRTCHNL_LINK_SPEED_40GB	= BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
116 	VIRTCHNL_LINK_SPEED_20GB	= BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
117 	VIRTCHNL_LINK_SPEED_25GB	= BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
118 	VIRTCHNL_LINK_SPEED_2_5GB	= BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
119 	VIRTCHNL_LINK_SPEED_5GB		= BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
120 };
121 
122 /* for hsplit_0 field of Rx HMC context */
123 /* deprecated with AVF 1.0 */
124 enum virtchnl_rx_hsplit {
125 	VIRTCHNL_RX_HSPLIT_NO_SPLIT      = 0,
126 	VIRTCHNL_RX_HSPLIT_SPLIT_L2      = 1,
127 	VIRTCHNL_RX_HSPLIT_SPLIT_IP      = 2,
128 	VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
129 	VIRTCHNL_RX_HSPLIT_SPLIT_SCTP    = 8,
130 };
131 
132 enum virtchnl_bw_limit_type {
133 	VIRTCHNL_BW_SHAPER = 0,
134 };
135 /* END GENERIC DEFINES */
136 
137 /* Opcodes for VF-PF communication. These are placed in the v_opcode field
138  * of the virtchnl_msg structure.
139  */
140 enum virtchnl_ops {
141 /* The PF sends status change events to VFs using
142  * the VIRTCHNL_OP_EVENT opcode.
143  * VFs send requests to the PF using the other ops.
144  * Use of "advanced opcode" features must be negotiated as part of capabilities
145  * exchange and are not considered part of base mode feature set.
146  *
147  */
148 	VIRTCHNL_OP_UNKNOWN = 0,
149 	VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
150 	VIRTCHNL_OP_RESET_VF = 2,
151 	VIRTCHNL_OP_GET_VF_RESOURCES = 3,
152 	VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
153 	VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
154 	VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
155 	VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
156 	VIRTCHNL_OP_ENABLE_QUEUES = 8,
157 	VIRTCHNL_OP_DISABLE_QUEUES = 9,
158 	VIRTCHNL_OP_ADD_ETH_ADDR = 10,
159 	VIRTCHNL_OP_DEL_ETH_ADDR = 11,
160 	VIRTCHNL_OP_ADD_VLAN = 12,
161 	VIRTCHNL_OP_DEL_VLAN = 13,
162 	VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
163 	VIRTCHNL_OP_GET_STATS = 15,
164 	VIRTCHNL_OP_RSVD = 16,
165 	VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
166 	/* opcode 19 is reserved */
167 	/* opcodes 20, 21, and 22 are reserved */
168 	VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
169 	VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
170 	VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
171 	VIRTCHNL_OP_SET_RSS_HENA = 26,
172 	VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
173 	VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
174 	VIRTCHNL_OP_REQUEST_QUEUES = 29,
175 	VIRTCHNL_OP_ENABLE_CHANNELS = 30,
176 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
177 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
178 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
179 	/* opcode 34 is reserved */
180 	/* opcodes 38, 39, 40, 41, 42 and 43 are reserved */
181 	/* opcode 44 is reserved */
182 	VIRTCHNL_OP_ADD_RSS_CFG = 45,
183 	VIRTCHNL_OP_DEL_RSS_CFG = 46,
184 	VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
185 	VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
186 	VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50,
187 	VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
188 	VIRTCHNL_OP_ADD_VLAN_V2 = 52,
189 	VIRTCHNL_OP_DEL_VLAN_V2 = 53,
190 	VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
191 	VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
192 	VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
193 	VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
194 	VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58,
195 	VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59,
196 	/* opcodes 60 through 65 are reserved */
197 	VIRTCHNL_OP_GET_QOS_CAPS = 66,
198 	VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67,
199 	/* opcode 68 through 70 are reserved */
200 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
201 	VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
202 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
203 	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
204 	VIRTCHNL_OP_CONFIG_QUANTA = 113,
205 	VIRTCHNL_OP_FLOW_SUBSCRIBE = 114,
206 	VIRTCHNL_OP_FLOW_UNSUBSCRIBE = 115,
207 	/* opcode 116 through 128 are reserved */
208 	VIRTCHNL_OP_MAX,
209 };
210 
211 static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
212 {
213 	switch (v_opcode) {
214 	case VIRTCHNL_OP_UNKNOWN:
215 		return "VIRTCHNL_OP_UNKNOWN";
216 	case VIRTCHNL_OP_VERSION:
217 		return "VIRTCHNL_OP_VERSION";
218 	case VIRTCHNL_OP_RESET_VF:
219 		return "VIRTCHNL_OP_RESET_VF";
220 	case VIRTCHNL_OP_GET_VF_RESOURCES:
221 		return "VIRTCHNL_OP_GET_VF_RESOURCES";
222 	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
223 		return "VIRTCHNL_OP_CONFIG_TX_QUEUE";
224 	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
225 		return "VIRTCHNL_OP_CONFIG_RX_QUEUE";
226 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
227 		return "VIRTCHNL_OP_CONFIG_VSI_QUEUES";
228 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
229 		return "VIRTCHNL_OP_CONFIG_IRQ_MAP";
230 	case VIRTCHNL_OP_ENABLE_QUEUES:
231 		return "VIRTCHNL_OP_ENABLE_QUEUES";
232 	case VIRTCHNL_OP_DISABLE_QUEUES:
233 		return "VIRTCHNL_OP_DISABLE_QUEUES";
234 	case VIRTCHNL_OP_ADD_ETH_ADDR:
235 		return "VIRTCHNL_OP_ADD_ETH_ADDR";
236 	case VIRTCHNL_OP_DEL_ETH_ADDR:
237 		return "VIRTCHNL_OP_DEL_ETH_ADDR";
238 	case VIRTCHNL_OP_ADD_VLAN:
239 		return "VIRTCHNL_OP_ADD_VLAN";
240 	case VIRTCHNL_OP_DEL_VLAN:
241 		return "VIRTCHNL_OP_DEL_VLAN";
242 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
243 		return "VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE";
244 	case VIRTCHNL_OP_GET_STATS:
245 		return "VIRTCHNL_OP_GET_STATS";
246 	case VIRTCHNL_OP_RSVD:
247 		return "VIRTCHNL_OP_RSVD";
248 	case VIRTCHNL_OP_EVENT:
249 		return "VIRTCHNL_OP_EVENT";
250 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
251 		return "VIRTCHNL_OP_CONFIG_RSS_KEY";
252 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
253 		return "VIRTCHNL_OP_CONFIG_RSS_LUT";
254 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
255 		return "VIRTCHNL_OP_GET_RSS_HENA_CAPS";
256 	case VIRTCHNL_OP_SET_RSS_HENA:
257 		return "VIRTCHNL_OP_SET_RSS_HENA";
258 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
259 		return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING";
260 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
261 		return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING";
262 	case VIRTCHNL_OP_REQUEST_QUEUES:
263 		return "VIRTCHNL_OP_REQUEST_QUEUES";
264 	case VIRTCHNL_OP_ENABLE_CHANNELS:
265 		return "VIRTCHNL_OP_ENABLE_CHANNELS";
266 	case VIRTCHNL_OP_DISABLE_CHANNELS:
267 		return "VIRTCHNL_OP_DISABLE_CHANNELS";
268 	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
269 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
270 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
271 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
272 	case VIRTCHNL_OP_ADD_RSS_CFG:
273 		return "VIRTCHNL_OP_ADD_RSS_CFG";
274 	case VIRTCHNL_OP_DEL_RSS_CFG:
275 		return "VIRTCHNL_OP_DEL_RSS_CFG";
276 	case VIRTCHNL_OP_ADD_FDIR_FILTER:
277 		return "VIRTCHNL_OP_ADD_FDIR_FILTER";
278 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
279 		return "VIRTCHNL_OP_DEL_FDIR_FILTER";
280 	case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
281 		return "VIRTCHNL_OP_GET_MAX_RSS_QREGION";
282 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
283 		return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS";
284 	case VIRTCHNL_OP_ADD_VLAN_V2:
285 		return "VIRTCHNL_OP_ADD_VLAN_V2";
286 	case VIRTCHNL_OP_DEL_VLAN_V2:
287 		return "VIRTCHNL_OP_DEL_VLAN_V2";
288 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
289 		return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2";
290 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
291 		return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2";
292 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
293 		return "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2";
294 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
295 		return "VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2";
296 	case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2:
297 		return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2";
298 	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
299 		return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2";
300 	case VIRTCHNL_OP_ENABLE_QUEUES_V2:
301 		return "VIRTCHNL_OP_ENABLE_QUEUES_V2";
302 	case VIRTCHNL_OP_DISABLE_QUEUES_V2:
303 		return "VIRTCHNL_OP_DISABLE_QUEUES_V2";
304 	case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
305 		return "VIRTCHNL_OP_MAP_QUEUE_VECTOR";
306 	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
307 		return "VIRTCHNL_OP_FLOW_SUBSCRIBE";
308 	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
309 		return "VIRTCHNL_OP_FLOW_UNSUBSCRIBE";
310 	case VIRTCHNL_OP_MAX:
311 		return "VIRTCHNL_OP_MAX";
312 	default:
313 		return "Unsupported (update virtchnl.h)";
314 	}
315 }
316 
317 static inline const char *virtchnl_stat_str(enum virtchnl_status_code v_status)
318 {
319 	switch (v_status) {
320 	case VIRTCHNL_STATUS_SUCCESS:
321 		return "VIRTCHNL_STATUS_SUCCESS";
322 	case VIRTCHNL_STATUS_ERR_PARAM:
323 		return "VIRTCHNL_STATUS_ERR_PARAM";
324 	case VIRTCHNL_STATUS_ERR_NO_MEMORY:
325 		return "VIRTCHNL_STATUS_ERR_NO_MEMORY";
326 	case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
327 		return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH";
328 	case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
329 		return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR";
330 	case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
331 		return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID";
332 	case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
333 		return "VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR";
334 	case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
335 		return "VIRTCHNL_STATUS_ERR_NOT_SUPPORTED";
336 	default:
337 		return "Unknown status code (update virtchnl.h)";
338 	}
339 }
340 
341 /* Virtual channel message descriptor. This overlays the admin queue
342  * descriptor. All other data is passed in external buffers.
343  */
344 
345 struct virtchnl_msg {
346 	u8 pad[8];			 /* AQ flags/opcode/len/retval fields */
347 
348 	/* avoid confusion with desc->opcode */
349 	enum virtchnl_ops v_opcode;
350 
351 	/* ditto for desc->retval */
352 	enum virtchnl_status_code v_retval;
353 	u32 vfid;			 /* used by PF when sending to VF */
354 };
355 
356 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
357 
358 /* Message descriptions and data structures. */
359 
360 /* VIRTCHNL_OP_VERSION
361  * VF posts its version number to the PF. PF responds with its version number
362  * in the same format, along with a return code.
363  * Reply from PF has its major/minor versions also in param0 and param1.
364  * If there is a major version mismatch, then the VF cannot operate.
365  * If there is a minor version mismatch, then the VF can operate but should
366  * add a warning to the system log.
367  *
368  * This enum element MUST always be specified as == 1, regardless of other
369  * changes in the API. The PF must always respond to this message without
370  * error regardless of version mismatch.
371  */
372 #define VIRTCHNL_VERSION_MAJOR		1
373 #define VIRTCHNL_VERSION_MINOR		1
374 #define VIRTCHNL_VERSION_MAJOR_2	2
375 #define VIRTCHNL_VERSION_MINOR_0	0
376 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
377 
378 struct virtchnl_version_info {
379 	u32 major;
380 	u32 minor;
381 };
382 
383 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
384 
385 #define VF_IS_V10(_ver) (((_ver)->major == 1) && ((_ver)->minor == 0))
386 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
387 #define VF_IS_V20(_ver) (((_ver)->major == 2) && ((_ver)->minor == 0))
388 
389 /* VIRTCHNL_OP_RESET_VF
390  * VF sends this request to PF with no parameters
391  * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
392  * until reset completion is indicated. The admin queue must be reinitialized
393  * after this operation.
394  *
395  * When reset is complete, PF must ensure that all queues in all VSIs associated
396  * with the VF are stopped, all queue configurations in the HMC are set to 0,
397  * and all MAC and VLAN filters (except the default MAC address) on all VSIs
398  * are cleared.
399  */
400 
401 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
402  * vsi_type should always be 6 for backward compatibility. Add other fields
403  * as needed.
404  */
405 enum virtchnl_vsi_type {
406 	VIRTCHNL_VSI_TYPE_INVALID = 0,
407 	VIRTCHNL_VSI_SRIOV = 6,
408 };
409 
410 /* VIRTCHNL_OP_GET_VF_RESOURCES
411  * Version 1.0 VF sends this request to PF with no parameters
412  * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
413  * PF responds with an indirect message containing
414  * virtchnl_vf_resource and one or more
415  * virtchnl_vsi_resource structures.
416  */
417 
418 struct virtchnl_vsi_resource {
419 	u16 vsi_id;
420 	u16 num_queue_pairs;
421 
422 	/* see enum virtchnl_vsi_type */
423 	s32 vsi_type;
424 	u16 qset_handle;
425 	u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
426 };
427 
428 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
429 
430 /* VF capability flags
431  * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
432  * TX/RX Checksum offloading and TSO for non-tunnelled packets.
433  */
434 #define VIRTCHNL_VF_OFFLOAD_L2			BIT(0)
435 #define VIRTCHNL_VF_OFFLOAD_IWARP		BIT(1)
436 #define VIRTCHNL_VF_CAP_RDMA			VIRTCHNL_VF_OFFLOAD_IWARP
437 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ		BIT(3)
438 #define VIRTCHNL_VF_OFFLOAD_RSS_REG		BIT(4)
439 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR		BIT(5)
440 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
441 /* used to negotiate communicating link speeds in Mbps */
442 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
443 	/* BIT(8) is reserved */
444 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
445 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
446 #define VIRTCHNL_VF_OFFLOAD_FSUB_PF		BIT(14)
447 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
448 #define VIRTCHNL_VF_OFFLOAD_VLAN		BIT(16)
449 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING		BIT(17)
450 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2	BIT(18)
451 #define VIRTCHNL_VF_OFFLOAD_RSS_PF		BIT(19)
452 #define VIRTCHNL_VF_OFFLOAD_ENCAP		BIT(20)
453 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM		BIT(21)
454 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM	BIT(22)
455 #define VIRTCHNL_VF_OFFLOAD_ADQ			BIT(23)
456 #define VIRTCHNL_VF_OFFLOAD_ADQ_V2		BIT(24)
457 #define VIRTCHNL_VF_OFFLOAD_USO			BIT(25)
458 	/* BIT(26) is reserved */
459 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF		BIT(27)
460 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF		BIT(28)
461 #define VIRTCHNL_VF_OFFLOAD_QOS			BIT(29)
462 	/* BIT(30) is reserved */
463 	/* BIT(31) is reserved */
464 
465 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
466 			       VIRTCHNL_VF_OFFLOAD_VLAN | \
467 			       VIRTCHNL_VF_OFFLOAD_RSS_PF)
468 
469 struct virtchnl_vf_resource {
470 	u16 num_vsis;
471 	u16 num_queue_pairs;
472 	u16 max_vectors;
473 	u16 max_mtu;
474 
475 	u32 vf_cap_flags;
476 	u32 rss_key_size;
477 	u32 rss_lut_size;
478 
479 	struct virtchnl_vsi_resource vsi_res[1];
480 };
481 
482 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
483 
484 /* VIRTCHNL_OP_CONFIG_TX_QUEUE
485  * VF sends this message to set up parameters for one TX queue.
486  * External data buffer contains one instance of virtchnl_txq_info.
487  * PF configures requested queue and returns a status code.
488  */
489 
490 /* Tx queue config info */
491 struct virtchnl_txq_info {
492 	u16 vsi_id;
493 	u16 queue_id;
494 	u16 ring_len;		/* number of descriptors, multiple of 8 */
495 	u16 headwb_enabled; /* deprecated with AVF 1.0 */
496 	u64 dma_ring_addr;
497 	u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
498 };
499 
500 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
501 
502 /* RX descriptor IDs (range from 0 to 63) */
503 enum virtchnl_rx_desc_ids {
504 	VIRTCHNL_RXDID_0_16B_BASE		= 0,
505 	VIRTCHNL_RXDID_1_32B_BASE		= 1,
506 	VIRTCHNL_RXDID_2_FLEX_SQ_NIC		= 2,
507 	VIRTCHNL_RXDID_3_FLEX_SQ_SW		= 3,
508 	VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB	= 4,
509 	VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL	= 5,
510 	VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2		= 6,
511 	VIRTCHNL_RXDID_7_HW_RSVD		= 7,
512 	/* 8 through 15 are reserved */
513 	VIRTCHNL_RXDID_16_COMMS_GENERIC 	= 16,
514 	VIRTCHNL_RXDID_17_COMMS_AUX_VLAN 	= 17,
515 	VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 	= 18,
516 	VIRTCHNL_RXDID_19_COMMS_AUX_IPV6 	= 19,
517 	VIRTCHNL_RXDID_20_COMMS_AUX_FLOW 	= 20,
518 	VIRTCHNL_RXDID_21_COMMS_AUX_TCP 	= 21,
519 	/* 22 through 63 are reserved */
520 };
521 
522 /* RX descriptor ID bitmasks */
523 enum virtchnl_rx_desc_id_bitmasks {
524 	VIRTCHNL_RXDID_0_16B_BASE_M		= BIT(VIRTCHNL_RXDID_0_16B_BASE),
525 	VIRTCHNL_RXDID_1_32B_BASE_M		= BIT(VIRTCHNL_RXDID_1_32B_BASE),
526 	VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M		= BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC),
527 	VIRTCHNL_RXDID_3_FLEX_SQ_SW_M		= BIT(VIRTCHNL_RXDID_3_FLEX_SQ_SW),
528 	VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M	= BIT(VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB),
529 	VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M	= BIT(VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL),
530 	VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M	= BIT(VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2),
531 	VIRTCHNL_RXDID_7_HW_RSVD_M		= BIT(VIRTCHNL_RXDID_7_HW_RSVD),
532 	/* 9 through 15 are reserved */
533 	VIRTCHNL_RXDID_16_COMMS_GENERIC_M	= BIT(VIRTCHNL_RXDID_16_COMMS_GENERIC),
534 	VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M	= BIT(VIRTCHNL_RXDID_17_COMMS_AUX_VLAN),
535 	VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M	= BIT(VIRTCHNL_RXDID_18_COMMS_AUX_IPV4),
536 	VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M	= BIT(VIRTCHNL_RXDID_19_COMMS_AUX_IPV6),
537 	VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M	= BIT(VIRTCHNL_RXDID_20_COMMS_AUX_FLOW),
538 	VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M	= BIT(VIRTCHNL_RXDID_21_COMMS_AUX_TCP),
539 	/* 22 through 63 are reserved */
540 };
541 
542 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
543  * VF sends this message to set up parameters for one RX queue.
544  * External data buffer contains one instance of virtchnl_rxq_info.
545  * PF configures requested queue and returns a status code. The
546  * crc_disable flag disables CRC stripping on the VF. Setting
547  * the crc_disable flag to 1 will disable CRC stripping for each
548  * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
549  * offload must have been set prior to sending this info or the PF
550  * will ignore the request. This flag should be set the same for
551  * all of the queues for a VF.
552  */
553 
554 /* Rx queue config info */
555 struct virtchnl_rxq_info {
556 	u16 vsi_id;
557 	u16 queue_id;
558 	u32 ring_len;		/* number of descriptors, multiple of 32 */
559 	u16 hdr_size;
560 	u16 splithdr_enabled; /* deprecated with AVF 1.0 */
561 	u32 databuffer_size;
562 	u32 max_pkt_size;
563 	u8 crc_disable;
564 	u8 pad1[3];
565 	u64 dma_ring_addr;
566 
567 	/* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
568 	s32 rx_split_pos;
569 	u32 pad2;
570 };
571 
572 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
573 
574 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES
575  * VF sends this message to set parameters for active TX and RX queues
576  * associated with the specified VSI.
577  * PF configures queues and returns status.
578  * If the number of queues specified is greater than the number of queues
579  * associated with the VSI, an error is returned and no queues are configured.
580  * NOTE: The VF is not required to configure all queues in a single request.
581  * It may send multiple messages. PF drivers must correctly handle all VF
582  * requests.
583  */
584 struct virtchnl_queue_pair_info {
585 	/* NOTE: vsi_id and queue_id should be identical for both queues. */
586 	struct virtchnl_txq_info txq;
587 	struct virtchnl_rxq_info rxq;
588 };
589 
590 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
591 
592 struct virtchnl_vsi_queue_config_info {
593 	u16 vsi_id;
594 	u16 num_queue_pairs;
595 	u32 pad;
596 	struct virtchnl_queue_pair_info qpair[1];
597 };
598 
599 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
600 
601 /* VIRTCHNL_OP_REQUEST_QUEUES
602  * VF sends this message to request the PF to allocate additional queues to
603  * this VF.  Each VF gets a guaranteed number of queues on init but asking for
604  * additional queues must be negotiated.  This is a best effort request as it
605  * is possible the PF does not have enough queues left to support the request.
606  * If the PF cannot support the number requested it will respond with the
607  * maximum number it is able to support.  If the request is successful, PF will
608  * then reset the VF to institute required changes.
609  */
610 
611 /* VF resource request */
612 struct virtchnl_vf_res_request {
613 	u16 num_queue_pairs;
614 };
615 
616 /* VIRTCHNL_OP_CONFIG_IRQ_MAP
617  * VF uses this message to map vectors to queues.
618  * The rxq_map and txq_map fields are bitmaps used to indicate which queues
619  * are to be associated with the specified vector.
620  * The "other" causes are always mapped to vector 0. The VF may not request
621  * that vector 0 be used for traffic.
622  * PF configures interrupt mapping and returns status.
623  * NOTE: due to hardware requirements, all active queues (both TX and RX)
624  * should be mapped to interrupts, even if the driver intends to operate
625  * only in polling mode. In this case the interrupt may be disabled, but
626  * the ITR timer will still run to trigger writebacks.
627  */
628 struct virtchnl_vector_map {
629 	u16 vsi_id;
630 	u16 vector_id;
631 	u16 rxq_map;
632 	u16 txq_map;
633 	u16 rxitr_idx;
634 	u16 txitr_idx;
635 };
636 
637 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
638 
639 struct virtchnl_irq_map_info {
640 	u16 num_vectors;
641 	struct virtchnl_vector_map vecmap[1];
642 };
643 
644 VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
645 
646 /* VIRTCHNL_OP_ENABLE_QUEUES
647  * VIRTCHNL_OP_DISABLE_QUEUES
648  * VF sends these message to enable or disable TX/RX queue pairs.
649  * The queues fields are bitmaps indicating which queues to act upon.
650  * (Currently, we only support 16 queues per VF, but we make the field
651  * u32 to allow for expansion.)
652  * PF performs requested action and returns status.
653  * NOTE: The VF is not required to enable/disable all queues in a single
654  * request. It may send multiple messages.
655  * PF drivers must correctly handle all VF requests.
656  */
657 struct virtchnl_queue_select {
658 	u16 vsi_id;
659 	u16 pad;
660 	u32 rx_queues;
661 	u32 tx_queues;
662 };
663 
664 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
665 
666 /* VIRTCHNL_OP_GET_MAX_RSS_QREGION
667  *
668  * if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
669  * then this op must be supported.
670  *
671  * VF sends this message in order to query the max RSS queue region
672  * size supported by PF, when VIRTCHNL_VF_LARGE_NUM_QPAIRS is enabled.
673  * This information should be used when configuring the RSS LUT and/or
674  * configuring queue region based filters.
675  *
676  * The maximum RSS queue region is 2^qregion_width. So, a qregion_width
677  * of 6 would inform the VF that the PF supports a maximum RSS queue region
678  * of 64.
679  *
680  * A queue region represents a range of queues that can be used to configure
681  * a RSS LUT. For example, if a VF is given 64 queues, but only a max queue
682  * region size of 16 (i.e. 2^qregion_width = 16) then it will only be able
683  * to configure the RSS LUT with queue indices from 0 to 15. However, other
684  * filters can be used to direct packets to queues >15 via specifying a queue
685  * base/offset and queue region width.
686  */
687 struct virtchnl_max_rss_qregion {
688 	u16 vport_id;
689 	u16 qregion_width;
690 	u8 pad[4];
691 };
692 
693 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_max_rss_qregion);
694 
695 /* VIRTCHNL_OP_ADD_ETH_ADDR
696  * VF sends this message in order to add one or more unicast or multicast
697  * address filters for the specified VSI.
698  * PF adds the filters and returns status.
699  */
700 
701 /* VIRTCHNL_OP_DEL_ETH_ADDR
702  * VF sends this message in order to remove one or more unicast or multicast
703  * filters for the specified VSI.
704  * PF removes the filters and returns status.
705  */
706 
707 /* VIRTCHNL_ETHER_ADDR_LEGACY
708  * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
709  * bytes. Moving forward all VF drivers should not set type to
710  * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
711  * behavior. The control plane function (i.e. PF) can use a best effort method
712  * of tracking the primary/device unicast in this case, but there is no
713  * guarantee and functionality depends on the implementation of the PF.
714  */
715 
716 /* VIRTCHNL_ETHER_ADDR_PRIMARY
717  * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
718  * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
719  * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
720  * function (i.e. PF) to accurately track and use this MAC address for
721  * displaying on the host and for VM/function reset.
722  */
723 
724 /* VIRTCHNL_ETHER_ADDR_EXTRA
725  * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
726  * unicast and/or multicast filters that are being added/deleted via
727  * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
728  */
729 struct virtchnl_ether_addr {
730 	u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
731 	u8 type;
732 #define VIRTCHNL_ETHER_ADDR_LEGACY	0
733 #define VIRTCHNL_ETHER_ADDR_PRIMARY	1
734 #define VIRTCHNL_ETHER_ADDR_EXTRA	2
735 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK	3 /* first two bits of type are valid */
736 	u8 pad;
737 };
738 
739 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
740 
741 struct virtchnl_ether_addr_list {
742 	u16 vsi_id;
743 	u16 num_elements;
744 	struct virtchnl_ether_addr list[1];
745 };
746 
747 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
748 
749 /* VIRTCHNL_OP_ADD_VLAN
750  * VF sends this message to add one or more VLAN tag filters for receives.
751  * PF adds the filters and returns status.
752  * If a port VLAN is configured by the PF, this operation will return an
753  * error to the VF.
754  */
755 
756 /* VIRTCHNL_OP_DEL_VLAN
757  * VF sends this message to remove one or more VLAN tag filters for receives.
758  * PF removes the filters and returns status.
759  * If a port VLAN is configured by the PF, this operation will return an
760  * error to the VF.
761  */
762 
763 struct virtchnl_vlan_filter_list {
764 	u16 vsi_id;
765 	u16 num_elements;
766 	u16 vlan_id[1];
767 };
768 
769 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
770 
771 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
772  * structures and opcodes.
773  *
774  * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
775  * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
776  *
777  * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
778  * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
779  * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
780  *
781  * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
782  * by the PF concurrently. For example, if the PF can support
783  * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
784  * would OR the following bits:
785  *
786  *	VIRTHCNL_VLAN_ETHERTYPE_8100 |
787  *	VIRTCHNL_VLAN_ETHERTYPE_88A8 |
788  *	VIRTCHNL_VLAN_ETHERTYPE_AND;
789  *
790  * The VF would interpret this as VLAN filtering can be supported on both 0x8100
791  * and 0x88A8 VLAN ethertypes.
792  *
793  * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
794  * by the PF concurrently. For example if the PF can support
795  * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
796  * offload it would OR the following bits:
797  *
798  *	VIRTCHNL_VLAN_ETHERTYPE_8100 |
799  *	VIRTCHNL_VLAN_ETHERTYPE_88A8 |
800  *	VIRTCHNL_VLAN_ETHERTYPE_XOR;
801  *
802  * The VF would interpret this as VLAN stripping can be supported on either
803  * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
804  * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
805  * the previously set value.
806  *
807  * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
808  * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
809  *
810  * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
811  * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
812  *
813  * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
814  * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
815  *
816  * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
817  * VLAN filtering if the underlying PF supports it.
818  *
819  * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
820  * certain VLAN capability can be toggled. For example if the underlying PF/CP
821  * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
822  * set this bit along with the supported ethertypes.
823  */
824 enum virtchnl_vlan_support {
825 	VIRTCHNL_VLAN_UNSUPPORTED =		0,
826 	VIRTCHNL_VLAN_ETHERTYPE_8100 =		0x00000001,
827 	VIRTCHNL_VLAN_ETHERTYPE_88A8 =		0x00000002,
828 	VIRTCHNL_VLAN_ETHERTYPE_9100 =		0x00000004,
829 	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 =	0x00000100,
830 	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 =	0x00000200,
831 	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 =	0x00000400,
832 	VIRTCHNL_VLAN_PRIO =			0x01000000,
833 	VIRTCHNL_VLAN_FILTER_MASK =		0x10000000,
834 	VIRTCHNL_VLAN_ETHERTYPE_AND =		0x20000000,
835 	VIRTCHNL_VLAN_ETHERTYPE_XOR =		0x40000000,
836 	VIRTCHNL_VLAN_TOGGLE =			0x80000000
837 };
838 
839 /* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
840  * for filtering, insertion, and stripping capabilities.
841  *
842  * If only outer capabilities are supported (for filtering, insertion, and/or
843  * stripping) then this refers to the outer most or single VLAN from the VF's
844  * perspective.
845  *
846  * If only inner capabilities are supported (for filtering, insertion, and/or
847  * stripping) then this refers to the outer most or single VLAN from the VF's
848  * perspective. Functionally this is the same as if only outer capabilities are
849  * supported. The VF driver is just forced to use the inner fields when
850  * adding/deleting filters and enabling/disabling offloads (if supported).
851  *
852  * If both outer and inner capabilities are supported (for filtering, insertion,
853  * and/or stripping) then outer refers to the outer most or single VLAN and
854  * inner refers to the second VLAN, if it exists, in the packet.
855  *
856  * There is no support for tunneled VLAN offloads, so outer or inner are never
857  * referring to a tunneled packet from the VF's perspective.
858  */
859 struct virtchnl_vlan_supported_caps {
860 	u32 outer;
861 	u32 inner;
862 };
863 
864 /* The PF populates these fields based on the supported VLAN filtering. If a
865  * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
866  * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
867  * the unsupported fields.
868  *
869  * Also, a VF is only allowed to toggle its VLAN filtering setting if the
870  * VIRTCHNL_VLAN_TOGGLE bit is set.
871  *
872  * The ethertype(s) specified in the ethertype_init field are the ethertypes
873  * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
874  * most VLAN from the VF's perspective. If both inner and outer filtering are
875  * allowed then ethertype_init only refers to the outer most VLAN as only
876  * VLAN ethertype supported for inner VLAN filtering is
877  * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
878  * when both inner and outer filtering are allowed.
879  *
880  * The max_filters field tells the VF how many VLAN filters it's allowed to have
881  * at any one time. If it exceeds this amount and tries to add another filter,
882  * then the request will be rejected by the PF. To prevent failures, the VF
883  * should keep track of how many VLAN filters it has added and not attempt to
884  * add more than max_filters.
885  */
886 struct virtchnl_vlan_filtering_caps {
887 	struct virtchnl_vlan_supported_caps filtering_support;
888 	u32 ethertype_init;
889 	u16 max_filters;
890 	u8 pad[2];
891 };
892 
893 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
894 
895 /* This enum is used for the virtchnl_vlan_offload_caps structure to specify
896  * if the PF supports a different ethertype for stripping and insertion.
897  *
898  * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
899  * for stripping affect the ethertype(s) specified for insertion and visa versa
900  * as well. If the VF tries to configure VLAN stripping via
901  * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
902  * that will be the ethertype for both stripping and insertion.
903  *
904  * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
905  * stripping do not affect the ethertype(s) specified for insertion and visa
906  * versa.
907  */
908 enum virtchnl_vlan_ethertype_match {
909 	VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
910 	VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
911 };
912 
913 /* The PF populates these fields based on the supported VLAN offloads. If a
914  * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
915  * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
916  * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
917  *
918  * Also, a VF is only allowed to toggle its VLAN offload setting if the
919  * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
920  *
921  * The VF driver needs to be aware of how the tags are stripped by hardware and
922  * inserted by the VF driver based on the level of offload support. The PF will
923  * populate these fields based on where the VLAN tags are expected to be
924  * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
925  * interpret these fields. See the definition of the
926  * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
927  * enumeration.
928  */
929 struct virtchnl_vlan_offload_caps {
930 	struct virtchnl_vlan_supported_caps stripping_support;
931 	struct virtchnl_vlan_supported_caps insertion_support;
932 	u32 ethertype_init;
933 	u8 ethertype_match;
934 	u8 pad[3];
935 };
936 
937 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
938 
939 /* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
940  * VF sends this message to determine its VLAN capabilities.
941  *
942  * PF will mark which capabilities it supports based on hardware support and
943  * current configuration. For example, if a port VLAN is configured the PF will
944  * not allow outer VLAN filtering, stripping, or insertion to be configured so
945  * it will block these features from the VF.
946  *
947  * The VF will need to cross reference its capabilities with the PFs
948  * capabilities in the response message from the PF to determine the VLAN
949  * support.
950  */
951 struct virtchnl_vlan_caps {
952 	struct virtchnl_vlan_filtering_caps filtering;
953 	struct virtchnl_vlan_offload_caps offloads;
954 };
955 
956 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
957 
958 struct virtchnl_vlan {
959 	u16 tci;	/* tci[15:13] = PCP and tci[11:0] = VID */
960 	u16 tci_mask;	/* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
961 			 * filtering caps
962 			 */
963 	u16 tpid;	/* 0x8100, 0x88a8, etc. and only type(s) set in
964 			 * filtering caps. Note that tpid here does not refer to
965 			 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
966 			 * actual 2-byte VLAN TPID
967 			 */
968 	u8 pad[2];
969 };
970 
971 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
972 
973 struct virtchnl_vlan_filter {
974 	struct virtchnl_vlan inner;
975 	struct virtchnl_vlan outer;
976 	u8 pad[16];
977 };
978 
979 VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
980 
981 /* VIRTCHNL_OP_ADD_VLAN_V2
982  * VIRTCHNL_OP_DEL_VLAN_V2
983  *
984  * VF sends these messages to add/del one or more VLAN tag filters for Rx
985  * traffic.
986  *
987  * The PF attempts to add the filters and returns status.
988  *
989  * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
990  * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
991  */
992 struct virtchnl_vlan_filter_list_v2 {
993 	u16 vport_id;
994 	u16 num_elements;
995 	u8 pad[4];
996 	struct virtchnl_vlan_filter filters[1];
997 };
998 
999 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2);
1000 
1001 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
1002  * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
1003  * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
1004  * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
1005  *
1006  * VF sends this message to enable or disable VLAN stripping or insertion. It
1007  * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
1008  * allowed and whether or not it's allowed to enable/disable the specific
1009  * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
1010  * parse the virtchnl_vlan_caps.offloads fields to determine which offload
1011  * messages are allowed.
1012  *
1013  * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
1014  * following manner the VF will be allowed to enable and/or disable 0x8100 inner
1015  * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
1016  * case means the outer most or single VLAN from the VF's perspective. This is
1017  * because no outer offloads are supported. See the comments above the
1018  * virtchnl_vlan_supported_caps structure for more details.
1019  *
1020  * virtchnl_vlan_caps.offloads.stripping_support.inner =
1021  *			VIRTCHNL_VLAN_TOGGLE |
1022  *			VIRTCHNL_VLAN_ETHERTYPE_8100;
1023  *
1024  * virtchnl_vlan_caps.offloads.insertion_support.inner =
1025  *			VIRTCHNL_VLAN_TOGGLE |
1026  *			VIRTCHNL_VLAN_ETHERTYPE_8100;
1027  *
1028  * In order to enable inner (again note that in this case inner is the outer
1029  * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
1030  * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
1031  * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
1032  *
1033  * virtchnl_vlan_setting.inner_ethertype_setting =
1034  *			VIRTCHNL_VLAN_ETHERTYPE_8100;
1035  *
1036  * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
1037  * initialization.
1038  *
1039  * The reason that VLAN TPID(s) are not being used for the
1040  * outer_ethertype_setting and inner_ethertype_setting fields is because it's
1041  * possible a device could support VLAN insertion and/or stripping offload on
1042  * multiple ethertypes concurrently, so this method allows a VF to request
1043  * multiple ethertypes in one message using the virtchnl_vlan_support
1044  * enumeration.
1045  *
1046  * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
1047  * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
1048  * VLAN insertion and stripping simultaneously. The
1049  * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
1050  * populated based on what the PF can support.
1051  *
1052  * virtchnl_vlan_caps.offloads.stripping_support.outer =
1053  *			VIRTCHNL_VLAN_TOGGLE |
1054  *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
1055  *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
1056  *			VIRTCHNL_VLAN_ETHERTYPE_AND;
1057  *
1058  * virtchnl_vlan_caps.offloads.insertion_support.outer =
1059  *			VIRTCHNL_VLAN_TOGGLE |
1060  *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
1061  *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
1062  *			VIRTCHNL_VLAN_ETHERTYPE_AND;
1063  *
1064  * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
1065  * would populate the virthcnl_vlan_offload_structure in the following manner
1066  * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
1067  *
1068  * virtchnl_vlan_setting.outer_ethertype_setting =
1069  *			VIRTHCNL_VLAN_ETHERTYPE_8100 |
1070  *			VIRTHCNL_VLAN_ETHERTYPE_88A8;
1071  *
1072  * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
1073  * initialization.
1074  *
1075  * There is also the case where a PF and the underlying hardware can support
1076  * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
1077  * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
1078  * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
1079  * offloads. The ethertypes must match for stripping and insertion.
1080  *
1081  * virtchnl_vlan_caps.offloads.stripping_support.outer =
1082  *			VIRTCHNL_VLAN_TOGGLE |
1083  *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
1084  *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
1085  *			VIRTCHNL_VLAN_ETHERTYPE_XOR;
1086  *
1087  * virtchnl_vlan_caps.offloads.insertion_support.outer =
1088  *			VIRTCHNL_VLAN_TOGGLE |
1089  *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
1090  *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
1091  *			VIRTCHNL_VLAN_ETHERTYPE_XOR;
1092  *
1093  * virtchnl_vlan_caps.offloads.ethertype_match =
1094  *			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
1095  *
1096  * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
1097  * populate the virtchnl_vlan_setting structure in the following manner and send
1098  * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
1099  * ethertype for VLAN insertion if it's enabled. So, for completeness, a
1100  * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
1101  *
1102  * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
1103  *
1104  * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
1105  * initialization.
1106  *
1107  * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2
1108  * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2
1109  *
1110  * VF sends this message to enable or disable VLAN filtering. It also needs to
1111  * specify an ethertype. The VF knows which VLAN ethertypes are allowed and
1112  * whether or not it's allowed to enable/disable filtering via the
1113  * VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
1114  * parse the virtchnl_vlan_caps.filtering fields to determine which, if any,
1115  * filtering messages are allowed.
1116  *
1117  * For example, if the PF populates the virtchnl_vlan_caps.filtering in the
1118  * following manner the VF will be allowed to enable/disable 0x8100 and 0x88a8
1119  * outer VLAN filtering together. Note, that the VIRTCHNL_VLAN_ETHERTYPE_AND
1120  * means that all filtering ethertypes will to be enabled and disabled together
1121  * regardless of the request from the VF. This means that the underlying
1122  * hardware only supports VLAN filtering for all VLAN the specified ethertypes
1123  * or none of them.
1124  *
1125  * virtchnl_vlan_caps.filtering.filtering_support.outer =
1126  *			VIRTCHNL_VLAN_TOGGLE |
1127  *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
1128  *			VIRTHCNL_VLAN_ETHERTYPE_88A8 |
1129  *			VIRTCHNL_VLAN_ETHERTYPE_9100 |
1130  *			VIRTCHNL_VLAN_ETHERTYPE_AND;
1131  *
1132  * In order to enable outer VLAN filtering for 0x88a8 and 0x8100 VLANs (0x9100
1133  * VLANs aren't supported by the VF driver), the VF would populate the
1134  * virtchnl_vlan_setting structure in the following manner and send the
1135  * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2. The same message format would be used
1136  * to disable outer VLAN filtering for 0x88a8 and 0x8100 VLANs, but the
1137  * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 opcode is used.
1138  *
1139  * virtchnl_vlan_setting.outer_ethertype_setting =
1140  *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
1141  *			VIRTCHNL_VLAN_ETHERTYPE_88A8;
1142  *
1143  */
1144 struct virtchnl_vlan_setting {
1145 	u32 outer_ethertype_setting;
1146 	u32 inner_ethertype_setting;
1147 	u16 vport_id;
1148 	u8 pad[6];
1149 };
1150 
1151 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
1152 
1153 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
1154  * VF sends VSI id and flags.
1155  * PF returns status code in retval.
1156  * Note: we assume that broadcast accept mode is always enabled.
1157  */
1158 struct virtchnl_promisc_info {
1159 	u16 vsi_id;
1160 	u16 flags;
1161 };
1162 
1163 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
1164 
1165 #define FLAG_VF_UNICAST_PROMISC	0x00000001
1166 #define FLAG_VF_MULTICAST_PROMISC	0x00000002
1167 
1168 /* VIRTCHNL_OP_GET_STATS
1169  * VF sends this message to request stats for the selected VSI. VF uses
1170  * the virtchnl_queue_select struct to specify the VSI. The queue_id
1171  * field is ignored by the PF.
1172  *
1173  * PF replies with struct virtchnl_eth_stats in an external buffer.
1174  */
1175 
1176 struct virtchnl_eth_stats {
1177 	u64 rx_bytes;			/* received bytes */
1178 	u64 rx_unicast;			/* received unicast pkts */
1179 	u64 rx_multicast;		/* received multicast pkts */
1180 	u64 rx_broadcast;		/* received broadcast pkts */
1181 	u64 rx_discards;
1182 	u64 rx_unknown_protocol;
1183 	u64 tx_bytes;			/* transmitted bytes */
1184 	u64 tx_unicast;			/* transmitted unicast pkts */
1185 	u64 tx_multicast;		/* transmitted multicast pkts */
1186 	u64 tx_broadcast;		/* transmitted broadcast pkts */
1187 	u64 tx_discards;
1188 	u64 tx_errors;
1189 };
1190 
1191 /* VIRTCHNL_OP_CONFIG_RSS_KEY
1192  * VIRTCHNL_OP_CONFIG_RSS_LUT
1193  * VF sends these messages to configure RSS. Only supported if both PF
1194  * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
1195  * configuration negotiation. If this is the case, then the RSS fields in
1196  * the VF resource struct are valid.
1197  * Both the key and LUT are initialized to 0 by the PF, meaning that
1198  * RSS is effectively disabled until set up by the VF.
1199  */
1200 struct virtchnl_rss_key {
1201 	u16 vsi_id;
1202 	u16 key_len;
1203 	u8 key[1];         /* RSS hash key, packed bytes */
1204 };
1205 
1206 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
1207 
1208 struct virtchnl_rss_lut {
1209 	u16 vsi_id;
1210 	u16 lut_entries;
1211 	u8 lut[1];        /* RSS lookup table */
1212 };
1213 
1214 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
1215 
1216 /* enum virthcnl_hash_filter
1217  *
1218  * Bits defining the hash filters in the hena field of the virtchnl_rss_hena
1219  * structure. Each bit indicates a specific hash filter for RSS.
1220  *
1221  * Note that not all bits are supported on all hardware. The VF should use
1222  * VIRTCHNL_OP_GET_RSS_HENA_CAPS to determine which bits the PF is capable of
1223  * before using VIRTCHNL_OP_SET_RSS_HENA to enable specific filters.
1224  */
1225 enum virtchnl_hash_filter {
1226 	/* Bits 0 through 28 are reserved for future use */
1227 	/* Bit 29, 30, and 32 are not supported on XL710 a X710 */
1228 	VIRTCHNL_HASH_FILTER_UNICAST_IPV4_UDP		= 29,
1229 	VIRTCHNL_HASH_FILTER_MULTICAST_IPV4_UDP		= 30,
1230 	VIRTCHNL_HASH_FILTER_IPV4_UDP			= 31,
1231 	VIRTCHNL_HASH_FILTER_IPV4_TCP_SYN_NO_ACK	= 32,
1232 	VIRTCHNL_HASH_FILTER_IPV4_TCP			= 33,
1233 	VIRTCHNL_HASH_FILTER_IPV4_SCTP			= 34,
1234 	VIRTCHNL_HASH_FILTER_IPV4_OTHER			= 35,
1235 	VIRTCHNL_HASH_FILTER_FRAG_IPV4			= 36,
1236 	/* Bits 37 and 38 are reserved for future use */
1237 	/* Bit 39, 40, and 42 are not supported on XL710 a X710 */
1238 	VIRTCHNL_HASH_FILTER_UNICAST_IPV6_UDP		= 39,
1239 	VIRTCHNL_HASH_FILTER_MULTICAST_IPV6_UDP		= 40,
1240 	VIRTCHNL_HASH_FILTER_IPV6_UDP			= 41,
1241 	VIRTCHNL_HASH_FILTER_IPV6_TCP_SYN_NO_ACK	= 42,
1242 	VIRTCHNL_HASH_FILTER_IPV6_TCP			= 43,
1243 	VIRTCHNL_HASH_FILTER_IPV6_SCTP			= 44,
1244 	VIRTCHNL_HASH_FILTER_IPV6_OTHER			= 45,
1245 	VIRTCHNL_HASH_FILTER_FRAG_IPV6			= 46,
1246 	/* Bit 37 is reserved for future use */
1247 	VIRTCHNL_HASH_FILTER_FCOE_OX			= 48,
1248 	VIRTCHNL_HASH_FILTER_FCOE_RX			= 49,
1249 	VIRTCHNL_HASH_FILTER_FCOE_OTHER			= 50,
1250 	/* Bits 51 through 62 are reserved for future use */
1251 	VIRTCHNL_HASH_FILTER_L2_PAYLOAD			= 63,
1252 };
1253 
1254 #define VIRTCHNL_HASH_FILTER_INVALID	(0)
1255 
1256 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS
1257  * VIRTCHNL_OP_SET_RSS_HENA
1258  * VF sends these messages to get and set the hash filter enable bits for RSS.
1259  * By default, the PF sets these to all possible traffic types that the
1260  * hardware supports. The VF can query this value if it wants to change the
1261  * traffic types that are hashed by the hardware.
1262  */
1263 struct virtchnl_rss_hena {
1264 	/* see enum virtchnl_hash_filter */
1265 	u64 hena;
1266 };
1267 
1268 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
1269 
1270 /* Type of RSS algorithm */
1271 enum virtchnl_rss_algorithm {
1272 	VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC	= 0,
1273 	VIRTCHNL_RSS_ALG_R_ASYMMETRIC		= 1,
1274 	VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC	= 2,
1275 	VIRTCHNL_RSS_ALG_XOR_SYMMETRIC		= 3,
1276 };
1277 
1278 /* This is used by PF driver to enforce how many channels can be supported.
1279  * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise
1280  * PF driver will allow only max 4 channels
1281  */
1282 #define VIRTCHNL_MAX_ADQ_CHANNELS 4
1283 #define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16
1284 
1285 /* VIRTCHNL_OP_ENABLE_CHANNELS
1286  * VIRTCHNL_OP_DISABLE_CHANNELS
1287  * VF sends these messages to enable or disable channels based on
1288  * the user specified queue count and queue offset for each traffic class.
1289  * This struct encompasses all the information that the PF needs from
1290  * VF to create a channel.
1291  */
1292 struct virtchnl_channel_info {
1293 	u16 count; /* number of queues in a channel */
1294 	u16 offset; /* queues in a channel start from 'offset' */
1295 	u32 pad;
1296 	u64 max_tx_rate;
1297 };
1298 
1299 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
1300 
1301 struct virtchnl_tc_info {
1302 	u32	num_tc;
1303 	u32	pad;
1304 	struct	virtchnl_channel_info list[1];
1305 };
1306 
1307 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
1308 
1309 /* VIRTCHNL_ADD_CLOUD_FILTER
1310  * VIRTCHNL_DEL_CLOUD_FILTER
1311  * VF sends these messages to add or delete a cloud filter based on the
1312  * user specified match and action filters. These structures encompass
1313  * all the information that the PF needs from the VF to add/delete a
1314  * cloud filter.
1315  */
1316 
1317 struct virtchnl_l4_spec {
1318 	u8	src_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
1319 	u8	dst_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
1320 	/* vlan_prio is part of this 16 bit field even from OS perspective
1321 	 * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio
1322 	 * in future, when decided to offload vlan_prio, pass that information
1323 	 * as part of the "vlan_id" field, Bit14..12
1324 	 */
1325 	__be16	vlan_id;
1326 	__be16	pad; /* reserved for future use */
1327 	__be32	src_ip[4];
1328 	__be32	dst_ip[4];
1329 	__be16	src_port;
1330 	__be16	dst_port;
1331 };
1332 
1333 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
1334 
1335 union virtchnl_flow_spec {
1336 	struct	virtchnl_l4_spec tcp_spec;
1337 	u8	buffer[128]; /* reserved for future use */
1338 };
1339 
1340 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
1341 
1342 enum virtchnl_action {
1343 	/* action types */
1344 	VIRTCHNL_ACTION_DROP = 0,
1345 	VIRTCHNL_ACTION_TC_REDIRECT,
1346 	VIRTCHNL_ACTION_PASSTHRU,
1347 	VIRTCHNL_ACTION_QUEUE,
1348 	VIRTCHNL_ACTION_Q_REGION,
1349 	VIRTCHNL_ACTION_MARK,
1350 	VIRTCHNL_ACTION_COUNT,
1351 };
1352 
1353 enum virtchnl_flow_type {
1354 	/* flow types */
1355 	VIRTCHNL_TCP_V4_FLOW = 0,
1356 	VIRTCHNL_TCP_V6_FLOW,
1357 	VIRTCHNL_UDP_V4_FLOW,
1358 	VIRTCHNL_UDP_V6_FLOW,
1359 };
1360 
1361 struct virtchnl_filter {
1362 	union	virtchnl_flow_spec data;
1363 	union	virtchnl_flow_spec mask;
1364 
1365 	/* see enum virtchnl_flow_type */
1366 	s32 	flow_type;
1367 
1368 	/* see enum virtchnl_action */
1369 	s32	action;
1370 	u32	action_meta;
1371 	u8	field_flags;
1372 };
1373 
1374 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
1375 
1376 struct virtchnl_shaper_bw {
1377 	/* Unit is Kbps */
1378 	u32 committed;
1379 	u32 peak;
1380 };
1381 
1382 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw);
1383 
1384 /* VIRTCHNL_OP_EVENT
1385  * PF sends this message to inform the VF driver of events that may affect it.
1386  * No direct response is expected from the VF, though it may generate other
1387  * messages in response to this one.
1388  */
1389 enum virtchnl_event_codes {
1390 	VIRTCHNL_EVENT_UNKNOWN = 0,
1391 	VIRTCHNL_EVENT_LINK_CHANGE,
1392 	VIRTCHNL_EVENT_RESET_IMPENDING,
1393 	VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
1394 };
1395 
1396 #define PF_EVENT_SEVERITY_INFO		0
1397 #define PF_EVENT_SEVERITY_ATTENTION	1
1398 #define PF_EVENT_SEVERITY_ACTION_REQUIRED	2
1399 #define PF_EVENT_SEVERITY_CERTAIN_DOOM	255
1400 
1401 struct virtchnl_pf_event {
1402 	/* see enum virtchnl_event_codes */
1403 	s32 event;
1404 	union {
1405 		/* If the PF driver does not support the new speed reporting
1406 		 * capabilities then use link_event else use link_event_adv to
1407 		 * get the speed and link information. The ability to understand
1408 		 * new speeds is indicated by setting the capability flag
1409 		 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
1410 		 * in virtchnl_vf_resource struct and can be used to determine
1411 		 * which link event struct to use below.
1412 		 */
1413 		struct {
1414 			enum virtchnl_link_speed link_speed;
1415 			bool link_status;
1416 			u8 pad[3];
1417 		} link_event;
1418 		struct {
1419 			/* link_speed provided in Mbps */
1420 			u32 link_speed;
1421 			u8 link_status;
1422 			u8 pad[3];
1423 		} link_event_adv;
1424 	} event_data;
1425 
1426 	s32 severity;
1427 };
1428 
1429 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
1430 
1431 /* VF reset states - these are written into the RSTAT register:
1432  * VFGEN_RSTAT on the VF
1433  * When the PF initiates a reset, it writes 0
1434  * When the reset is complete, it writes 1
1435  * When the PF detects that the VF has recovered, it writes 2
1436  * VF checks this register periodically to determine if a reset has occurred,
1437  * then polls it to know when the reset is complete.
1438  * If either the PF or VF reads the register while the hardware
1439  * is in a reset state, it will return DEADBEEF, which, when masked
1440  * will result in 3.
1441  */
1442 enum virtchnl_vfr_states {
1443 	VIRTCHNL_VFR_INPROGRESS = 0,
1444 	VIRTCHNL_VFR_COMPLETED,
1445 	VIRTCHNL_VFR_VFACTIVE,
1446 };
1447 
1448 #define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
1449 #define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK	16
1450 #define VIRTCHNL_MAX_SIZE_RAW_PACKET	1024
1451 #define PROTO_HDR_SHIFT			5
1452 #define PROTO_HDR_FIELD_START(proto_hdr_type) \
1453 					(proto_hdr_type << PROTO_HDR_SHIFT)
1454 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
1455 
1456 /* VF use these macros to configure each protocol header.
1457  * Specify which protocol headers and protocol header fields base on
1458  * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
1459  * @param hdr: a struct of virtchnl_proto_hdr
1460  * @param hdr_type: ETH/IPV4/TCP, etc
1461  * @param field: SRC/DST/TEID/SPI, etc
1462  */
1463 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
1464 	((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
1465 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
1466 	((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
1467 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
1468 	((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
1469 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr)	((hdr)->field_selector)
1470 
1471 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1472 	(VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
1473 		VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1474 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1475 	(VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
1476 		VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1477 
1478 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
1479 	((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
1480 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
1481 	(((hdr)->type) >> PROTO_HDR_SHIFT)
1482 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
1483 	((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
1484 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
1485 	(VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) && \
1486 	 VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val))
1487 
1488 /* Protocol header type within a packet segment. A segment consists of one or
1489  * more protocol headers that make up a logical group of protocol headers. Each
1490  * logical group of protocol headers encapsulates or is encapsulated using/by
1491  * tunneling or encapsulation protocols for network virtualization.
1492  */
1493 enum virtchnl_proto_hdr_type {
1494 	VIRTCHNL_PROTO_HDR_NONE,
1495 	VIRTCHNL_PROTO_HDR_ETH,
1496 	VIRTCHNL_PROTO_HDR_S_VLAN,
1497 	VIRTCHNL_PROTO_HDR_C_VLAN,
1498 	VIRTCHNL_PROTO_HDR_IPV4,
1499 	VIRTCHNL_PROTO_HDR_IPV6,
1500 	VIRTCHNL_PROTO_HDR_TCP,
1501 	VIRTCHNL_PROTO_HDR_UDP,
1502 	VIRTCHNL_PROTO_HDR_SCTP,
1503 	VIRTCHNL_PROTO_HDR_GTPU_IP,
1504 	VIRTCHNL_PROTO_HDR_GTPU_EH,
1505 	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
1506 	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
1507 	VIRTCHNL_PROTO_HDR_PPPOE,
1508 	VIRTCHNL_PROTO_HDR_L2TPV3,
1509 	VIRTCHNL_PROTO_HDR_ESP,
1510 	VIRTCHNL_PROTO_HDR_AH,
1511 	VIRTCHNL_PROTO_HDR_PFCP,
1512 	VIRTCHNL_PROTO_HDR_GTPC,
1513 	VIRTCHNL_PROTO_HDR_ECPRI,
1514 	VIRTCHNL_PROTO_HDR_L2TPV2,
1515 	VIRTCHNL_PROTO_HDR_PPP,
1516 	/* IPv4 and IPv6 Fragment header types are only associated to
1517 	 * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively,
1518 	 * cannot be used independently.
1519 	 */
1520 	VIRTCHNL_PROTO_HDR_IPV4_FRAG,
1521 	VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
1522 	VIRTCHNL_PROTO_HDR_GRE,
1523 };
1524 
1525 /* Protocol header field within a protocol header. */
1526 enum virtchnl_proto_hdr_field {
1527 	/* ETHER */
1528 	VIRTCHNL_PROTO_HDR_ETH_SRC =
1529 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
1530 	VIRTCHNL_PROTO_HDR_ETH_DST,
1531 	VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
1532 	/* S-VLAN */
1533 	VIRTCHNL_PROTO_HDR_S_VLAN_ID =
1534 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
1535 	/* C-VLAN */
1536 	VIRTCHNL_PROTO_HDR_C_VLAN_ID =
1537 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
1538 	/* IPV4 */
1539 	VIRTCHNL_PROTO_HDR_IPV4_SRC =
1540 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
1541 	VIRTCHNL_PROTO_HDR_IPV4_DST,
1542 	VIRTCHNL_PROTO_HDR_IPV4_DSCP,
1543 	VIRTCHNL_PROTO_HDR_IPV4_TTL,
1544 	VIRTCHNL_PROTO_HDR_IPV4_PROT,
1545 	VIRTCHNL_PROTO_HDR_IPV4_CHKSUM,
1546 	/* IPV6 */
1547 	VIRTCHNL_PROTO_HDR_IPV6_SRC =
1548 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
1549 	VIRTCHNL_PROTO_HDR_IPV6_DST,
1550 	VIRTCHNL_PROTO_HDR_IPV6_TC,
1551 	VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
1552 	VIRTCHNL_PROTO_HDR_IPV6_PROT,
1553 	/* IPV6 Prefix */
1554 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC,
1555 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST,
1556 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC,
1557 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST,
1558 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC,
1559 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST,
1560 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC,
1561 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST,
1562 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC,
1563 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST,
1564 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC,
1565 	VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST,
1566 	/* TCP */
1567 	VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
1568 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
1569 	VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
1570 	VIRTCHNL_PROTO_HDR_TCP_CHKSUM,
1571 	/* UDP */
1572 	VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
1573 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
1574 	VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
1575 	VIRTCHNL_PROTO_HDR_UDP_CHKSUM,
1576 	/* SCTP */
1577 	VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
1578 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
1579 	VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
1580 	VIRTCHNL_PROTO_HDR_SCTP_CHKSUM,
1581 	/* GTPU_IP */
1582 	VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
1583 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
1584 	/* GTPU_EH */
1585 	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
1586 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
1587 	VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
1588 	/* PPPOE */
1589 	VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
1590 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
1591 	/* L2TPV3 */
1592 	VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
1593 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
1594 	/* ESP */
1595 	VIRTCHNL_PROTO_HDR_ESP_SPI =
1596 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
1597 	/* AH */
1598 	VIRTCHNL_PROTO_HDR_AH_SPI =
1599 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
1600 	/* PFCP */
1601 	VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
1602 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
1603 	VIRTCHNL_PROTO_HDR_PFCP_SEID,
1604 	/* GTPC */
1605 	VIRTCHNL_PROTO_HDR_GTPC_TEID =
1606 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC),
1607 	/* ECPRI */
1608 	VIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE =
1609 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI),
1610 	VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID,
1611 	/* IPv4 Dummy Fragment */
1612 	VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID =
1613 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG),
1614 	/* IPv6 Extension Fragment */
1615 	VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID =
1616 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG),
1617 	/* GTPU_DWN/UP */
1618 	VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI =
1619 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN),
1620 	VIRTCHNL_PROTO_HDR_GTPU_UP_QFI =
1621 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP),
1622 	/* L2TPv2 */
1623 	VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID =
1624 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2),
1625 	VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID,
1626 };
1627 
1628 struct virtchnl_proto_hdr {
1629 	/* see enum virtchnl_proto_hdr_type */
1630 	s32 type;
1631 	u32 field_selector; /* a bit mask to select field for header type */
1632 	u8 buffer[64];
1633 	/**
1634 	 * binary buffer in network order for specific header type.
1635 	 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
1636 	 * header is expected to be copied into the buffer.
1637 	 */
1638 };
1639 
1640 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
1641 
1642 struct virtchnl_proto_hdr_w_msk {
1643 	/* see enum virtchnl_proto_hdr_type */
1644 	s32 type;
1645 	u32 pad;
1646 	/**
1647 	 * binary buffer in network order for specific header type.
1648 	 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
1649 	 * header is expected to be copied into the buffer.
1650 	 */
1651 	u8 buffer_spec[64];
1652 	/* binary buffer for bit-mask applied to specific header type */
1653 	u8 buffer_mask[64];
1654 };
1655 
1656 VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk);
1657 
1658 struct virtchnl_proto_hdrs {
1659 	u8 tunnel_level;
1660 	/**
1661 	 * specify where protocol header start from.
1662 	 * must be 0 when sending a raw packet request.
1663 	 * 0 - from the outer layer
1664 	 * 1 - from the first inner layer
1665 	 * 2 - from the second inner layer
1666 	 * ....
1667 	 */
1668 	int count;
1669 	/**
1670 	 * count must <=
1671 	 * VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK
1672 	 * count = 0 :					select raw
1673 	 * 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr
1674 	 * count > VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr_w_msk
1675 	 * last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS
1676 	 */
1677 	union {
1678 		struct virtchnl_proto_hdr
1679 			proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
1680 		struct virtchnl_proto_hdr_w_msk
1681 			proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK];
1682 		struct {
1683 			u16 pkt_len;
1684 			u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
1685 			u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
1686 		} raw;
1687 	};
1688 };
1689 
1690 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
1691 
1692 struct virtchnl_rss_cfg {
1693 	struct virtchnl_proto_hdrs proto_hdrs;	   /* protocol headers */
1694 
1695 	/* see enum virtchnl_rss_algorithm; rss algorithm type */
1696 	s32 rss_algorithm;
1697 	u8 reserved[128];                          /* reserve for future */
1698 };
1699 
1700 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
1701 
1702 /* action configuration for FDIR and FSUB */
1703 struct virtchnl_filter_action {
1704 	/* see enum virtchnl_action type */
1705 	s32 type;
1706 	union {
1707 		/* used for queue and qgroup action */
1708 		struct {
1709 			u16 index;
1710 			u8 region;
1711 		} queue;
1712 		/* used for count action */
1713 		struct {
1714 			/* share counter ID with other flow rules */
1715 			u8 shared;
1716 			u32 id; /* counter ID */
1717 		} count;
1718 		/* used for mark action */
1719 		u32 mark_id;
1720 		u8 reserve[32];
1721 	} act_conf;
1722 };
1723 
1724 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
1725 
1726 #define VIRTCHNL_MAX_NUM_ACTIONS  8
1727 
1728 struct virtchnl_filter_action_set {
1729 	/* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
1730 	int count;
1731 	struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
1732 };
1733 
1734 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
1735 
1736 /* pattern and action for FDIR rule */
1737 struct virtchnl_fdir_rule {
1738 	struct virtchnl_proto_hdrs proto_hdrs;
1739 	struct virtchnl_filter_action_set action_set;
1740 };
1741 
1742 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
1743 
1744 /* Status returned to VF after VF requests FDIR commands
1745  * VIRTCHNL_FDIR_SUCCESS
1746  * VF FDIR related request is successfully done by PF
1747  * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
1748  *
1749  * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
1750  * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
1751  *
1752  * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
1753  * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
1754  *
1755  * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
1756  * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
1757  *
1758  * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
1759  * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
1760  *
1761  * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
1762  * OP_ADD_FDIR_FILTER request is failed due to parameters validation
1763  * or HW doesn't support.
1764  *
1765  * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
1766  * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
1767  * for programming.
1768  *
1769  * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
1770  * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
1771  * for example, VF query counter of a rule who has no counter action.
1772  */
1773 enum virtchnl_fdir_prgm_status {
1774 	VIRTCHNL_FDIR_SUCCESS = 0,
1775 	VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
1776 	VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
1777 	VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
1778 	VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
1779 	VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
1780 	VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
1781 	VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
1782 };
1783 
1784 /* VIRTCHNL_OP_ADD_FDIR_FILTER
1785  * VF sends this request to PF by filling out vsi_id,
1786  * validate_only and rule_cfg. PF will return flow_id
1787  * if the request is successfully done and return add_status to VF.
1788  */
1789 struct virtchnl_fdir_add {
1790 	u16 vsi_id;  /* INPUT */
1791 	/*
1792 	 * 1 for validating a fdir rule, 0 for creating a fdir rule.
1793 	 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
1794 	 */
1795 	u16 validate_only; /* INPUT */
1796 	u32 flow_id;       /* OUTPUT */
1797 	struct virtchnl_fdir_rule rule_cfg; /* INPUT */
1798 
1799 	/* see enum virtchnl_fdir_prgm_status; OUTPUT */
1800 	s32 status;
1801 };
1802 
1803 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
1804 
1805 /* VIRTCHNL_OP_DEL_FDIR_FILTER
1806  * VF sends this request to PF by filling out vsi_id
1807  * and flow_id. PF will return del_status to VF.
1808  */
1809 struct virtchnl_fdir_del {
1810 	u16 vsi_id;  /* INPUT */
1811 	u16 pad;
1812 	u32 flow_id; /* INPUT */
1813 
1814 	/* see enum virtchnl_fdir_prgm_status; OUTPUT */
1815 	s32 status;
1816 };
1817 
1818 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
1819 
1820 /* Status returned to VF after VF requests FSUB commands
1821  * VIRTCHNL_FSUB_SUCCESS
1822  * VF FLOW related request is successfully done by PF
1823  * The request can be OP_FLOW_SUBSCRIBE/UNSUBSCRIBE.
1824  *
1825  * VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE
1826  * OP_FLOW_SUBSCRIBE request is failed due to no Hardware resource.
1827  *
1828  * VIRTCHNL_FSUB_FAILURE_RULE_EXIST
1829  * OP_FLOW_SUBSCRIBE request is failed due to the rule is already existed.
1830  *
1831  * VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST
1832  * OP_FLOW_UNSUBSCRIBE request is failed due to this rule doesn't exist.
1833  *
1834  * VIRTCHNL_FSUB_FAILURE_RULE_INVALID
1835  * OP_FLOW_SUBSCRIBE request is failed due to parameters validation
1836  * or HW doesn't support.
1837  */
1838 enum virtchnl_fsub_prgm_status {
1839 	VIRTCHNL_FSUB_SUCCESS = 0,
1840 	VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE,
1841 	VIRTCHNL_FSUB_FAILURE_RULE_EXIST,
1842 	VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST,
1843 	VIRTCHNL_FSUB_FAILURE_RULE_INVALID,
1844 };
1845 
1846 /* VIRTCHNL_OP_FLOW_SUBSCRIBE
1847  * VF sends this request to PF by filling out vsi_id,
1848  * validate_only, priority, proto_hdrs and actions.
1849  * PF will return flow_id
1850  * if the request is successfully done and return status to VF.
1851  */
1852 struct virtchnl_flow_sub {
1853 	u16 vsi_id; /* INPUT */
1854 	u8 validate_only; /* INPUT */
1855 	/* 0 is the highest priority; INPUT */
1856 	u8 priority;
1857 	u32 flow_id; /* OUTPUT */
1858 	struct virtchnl_proto_hdrs proto_hdrs; /* INPUT */
1859 	struct virtchnl_filter_action_set actions; /* INPUT */
1860 	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
1861 	s32 status;
1862 };
1863 
1864 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_flow_sub);
1865 
1866 /* VIRTCHNL_OP_FLOW_UNSUBSCRIBE
1867  * VF sends this request to PF by filling out vsi_id
1868  * and flow_id. PF will return status to VF.
1869  */
1870 struct virtchnl_flow_unsub {
1871 	u16 vsi_id; /* INPUT */
1872 	u16 pad;
1873 	u32 flow_id; /* INPUT */
1874 	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
1875 	s32 status;
1876 };
1877 
1878 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_flow_unsub);
1879 
1880 /* VIRTCHNL_OP_GET_QOS_CAPS
1881  * VF sends this message to get its QoS Caps, such as
1882  * TC number, Arbiter and Bandwidth.
1883  */
1884 struct virtchnl_qos_cap_elem {
1885 	u8 tc_num;
1886 	u8 tc_prio;
1887 #define VIRTCHNL_ABITER_STRICT      0
1888 #define VIRTCHNL_ABITER_ETS         2
1889 	u8 arbiter;
1890 #define VIRTCHNL_STRICT_WEIGHT      1
1891 	u8 weight;
1892 	enum virtchnl_bw_limit_type type;
1893 	union {
1894 		struct virtchnl_shaper_bw shaper;
1895 		u8 pad2[32];
1896 	};
1897 };
1898 
1899 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem);
1900 
1901 struct virtchnl_qos_cap_list {
1902 	u16 vsi_id;
1903 	u16 num_elem;
1904 	struct virtchnl_qos_cap_elem cap[1];
1905 };
1906 
1907 VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_qos_cap_list);
1908 
1909 /* VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP
1910  * VF sends message virtchnl_queue_tc_mapping to set queue to tc
1911  * mapping for all the Tx and Rx queues with a specified VSI, and
1912  * would get response about bitmap of valid user priorities
1913  * associated with queues.
1914  */
1915 struct virtchnl_queue_tc_mapping {
1916 	u16 vsi_id;
1917 	u16 num_tc;
1918 	u16 num_queue_pairs;
1919 	u8 pad[2];
1920 	union {
1921 		struct {
1922 			u16 start_queue_id;
1923 			u16 queue_count;
1924 		} req;
1925 		struct {
1926 #define VIRTCHNL_USER_PRIO_TYPE_UP	0
1927 #define VIRTCHNL_USER_PRIO_TYPE_DSCP	1
1928 			u16 prio_type;
1929 			u16 valid_prio_bitmap;
1930 		} resp;
1931 	} tc[1];
1932 };
1933 
1934 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
1935 
1936 /* VIRTCHNL_OP_CONFIG_QUEUE_BW */
1937 struct virtchnl_queue_bw {
1938 	u16 queue_id;
1939 	u8 tc;
1940 	u8 pad;
1941 	struct virtchnl_shaper_bw shaper;
1942 };
1943 
1944 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
1945 
1946 struct virtchnl_queues_bw_cfg {
1947 	u16 vsi_id;
1948 	u16 num_queues;
1949 	struct virtchnl_queue_bw cfg[1];
1950 };
1951 
1952 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg);
1953 
1954 /* queue types */
1955 enum virtchnl_queue_type {
1956 	VIRTCHNL_QUEUE_TYPE_TX			= 0,
1957 	VIRTCHNL_QUEUE_TYPE_RX			= 1,
1958 };
1959 
1960 /* structure to specify a chunk of contiguous queues */
1961 struct virtchnl_queue_chunk {
1962 	/* see enum virtchnl_queue_type */
1963 	s32 type;
1964 	u16 start_queue_id;
1965 	u16 num_queues;
1966 };
1967 
1968 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk);
1969 
1970 /* structure to specify several chunks of contiguous queues */
1971 struct virtchnl_queue_chunks {
1972 	u16 num_chunks;
1973 	u16 rsvd;
1974 	struct virtchnl_queue_chunk chunks[1];
1975 };
1976 
1977 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks);
1978 
1979 /* VIRTCHNL_OP_ENABLE_QUEUES_V2
1980  * VIRTCHNL_OP_DISABLE_QUEUES_V2
1981  *
1982  * These opcodes can be used if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in
1983  * VIRTCHNL_OP_GET_VF_RESOURCES
1984  *
1985  * VF sends virtchnl_ena_dis_queues struct to specify the queues to be
1986  * enabled/disabled in chunks. Also applicable to single queue RX or
1987  * TX. PF performs requested action and returns status.
1988  */
1989 struct virtchnl_del_ena_dis_queues {
1990 	u16 vport_id;
1991 	u16 pad;
1992 	struct virtchnl_queue_chunks chunks;
1993 };
1994 
1995 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_del_ena_dis_queues);
1996 
1997 /* Virtchannel interrupt throttling rate index */
1998 enum virtchnl_itr_idx {
1999 	VIRTCHNL_ITR_IDX_0	= 0,
2000 	VIRTCHNL_ITR_IDX_1	= 1,
2001 	VIRTCHNL_ITR_IDX_NO_ITR	= 3,
2002 };
2003 
2004 /* Queue to vector mapping */
2005 struct virtchnl_queue_vector {
2006 	u16 queue_id;
2007 	u16 vector_id;
2008 	u8 pad[4];
2009 
2010 	/* see enum virtchnl_itr_idx */
2011 	s32 itr_idx;
2012 
2013 	/* see enum virtchnl_queue_type */
2014 	s32 queue_type;
2015 };
2016 
2017 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector);
2018 
2019 /* VIRTCHNL_OP_MAP_QUEUE_VECTOR
2020  *
2021  * This opcode can be used only if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated
2022  * in VIRTCHNL_OP_GET_VF_RESOURCES
2023  *
2024  * VF sends this message to map queues to vectors and ITR index registers.
2025  * External data buffer contains virtchnl_queue_vector_maps structure
2026  * that contains num_qv_maps of virtchnl_queue_vector structures.
2027  * PF maps the requested queue vector maps after validating the queue and vector
2028  * ids and returns a status code.
2029  */
2030 struct virtchnl_queue_vector_maps {
2031 	u16 vport_id;
2032 	u16 num_qv_maps;
2033 	u8 pad[4];
2034 	struct virtchnl_queue_vector qv_maps[1];
2035 };
2036 
2037 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
2038 
2039 struct virtchnl_quanta_cfg {
2040 	u16 quanta_size;
2041 	struct virtchnl_queue_chunk queue_select;
2042 };
2043 
2044 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
2045 
2046 /* Since VF messages are limited by u16 size, precalculate the maximum possible
2047  * values of nested elements in virtchnl structures that virtual channel can
2048  * possibly handle in a single message.
2049  */
2050 enum virtchnl_vector_limits {
2051 	VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX	=
2052 		((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /
2053 		sizeof(struct virtchnl_queue_pair_info),
2054 
2055 	VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX		=
2056 		((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /
2057 		sizeof(struct virtchnl_vector_map),
2058 
2059 	VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX	=
2060 		((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /
2061 		sizeof(struct virtchnl_ether_addr),
2062 
2063 	VIRTCHNL_OP_ADD_DEL_VLAN_MAX		=
2064 		((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /
2065 		sizeof(u16),
2066 
2067 	VIRTCHNL_OP_ENABLE_CHANNELS_MAX		=
2068 		((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
2069 		sizeof(struct virtchnl_channel_info),
2070 
2071 	VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX	=
2072 		((u16)(~0) - sizeof(struct virtchnl_del_ena_dis_queues)) /
2073 		sizeof(struct virtchnl_queue_chunk),
2074 
2075 	VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX	=
2076 		((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) /
2077 		sizeof(struct virtchnl_queue_vector),
2078 
2079 	VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX		=
2080 		((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list_v2)) /
2081 		sizeof(struct virtchnl_vlan_filter),
2082 };
2083 
2084 /**
2085  * virtchnl_vc_validate_vf_msg
2086  * @ver: Virtchnl version info
2087  * @v_opcode: Opcode for the message
2088  * @msg: pointer to the msg buffer
2089  * @msglen: msg length
2090  *
2091  * validate msg format against struct for each opcode
2092  */
2093 static inline int
2094 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
2095 			    u8 *msg, u16 msglen)
2096 {
2097 	bool err_msg_format = false;
2098 	u32 valid_len = 0;
2099 
2100 	/* Validate message length. */
2101 	switch (v_opcode) {
2102 	case VIRTCHNL_OP_VERSION:
2103 		valid_len = sizeof(struct virtchnl_version_info);
2104 		break;
2105 	case VIRTCHNL_OP_RESET_VF:
2106 		break;
2107 	case VIRTCHNL_OP_GET_VF_RESOURCES:
2108 		if (VF_IS_V11(ver))
2109 			valid_len = sizeof(u32);
2110 		break;
2111 	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
2112 		valid_len = sizeof(struct virtchnl_txq_info);
2113 		break;
2114 	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
2115 		valid_len = sizeof(struct virtchnl_rxq_info);
2116 		break;
2117 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2118 		valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
2119 		if (msglen >= valid_len) {
2120 			struct virtchnl_vsi_queue_config_info *vqc =
2121 			    (struct virtchnl_vsi_queue_config_info *)msg;
2122 
2123 			if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >
2124 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {
2125 				err_msg_format = true;
2126 				break;
2127 			}
2128 
2129 			valid_len += (vqc->num_queue_pairs *
2130 				      sizeof(struct
2131 					     virtchnl_queue_pair_info));
2132 		}
2133 		break;
2134 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2135 		valid_len = sizeof(struct virtchnl_irq_map_info);
2136 		if (msglen >= valid_len) {
2137 			struct virtchnl_irq_map_info *vimi =
2138 			    (struct virtchnl_irq_map_info *)msg;
2139 
2140 			if (vimi->num_vectors == 0 || vimi->num_vectors >
2141 			    VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {
2142 				err_msg_format = true;
2143 				break;
2144 			}
2145 
2146 			valid_len += (vimi->num_vectors *
2147 				      sizeof(struct virtchnl_vector_map));
2148 		}
2149 		break;
2150 	case VIRTCHNL_OP_ENABLE_QUEUES:
2151 	case VIRTCHNL_OP_DISABLE_QUEUES:
2152 		valid_len = sizeof(struct virtchnl_queue_select);
2153 		break;
2154 	case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
2155 		break;
2156 	case VIRTCHNL_OP_ADD_ETH_ADDR:
2157 	case VIRTCHNL_OP_DEL_ETH_ADDR:
2158 		valid_len = sizeof(struct virtchnl_ether_addr_list);
2159 		if (msglen >= valid_len) {
2160 			struct virtchnl_ether_addr_list *veal =
2161 			    (struct virtchnl_ether_addr_list *)msg;
2162 
2163 			if (veal->num_elements == 0 || veal->num_elements >
2164 			    VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {
2165 				err_msg_format = true;
2166 				break;
2167 			}
2168 
2169 			valid_len += veal->num_elements *
2170 			    sizeof(struct virtchnl_ether_addr);
2171 		}
2172 		break;
2173 	case VIRTCHNL_OP_ADD_VLAN:
2174 	case VIRTCHNL_OP_DEL_VLAN:
2175 		valid_len = sizeof(struct virtchnl_vlan_filter_list);
2176 		if (msglen >= valid_len) {
2177 			struct virtchnl_vlan_filter_list *vfl =
2178 			    (struct virtchnl_vlan_filter_list *)msg;
2179 
2180 			if (vfl->num_elements == 0 || vfl->num_elements >
2181 			    VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {
2182 				err_msg_format = true;
2183 				break;
2184 			}
2185 
2186 			valid_len += vfl->num_elements * sizeof(u16);
2187 		}
2188 		break;
2189 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
2190 		valid_len = sizeof(struct virtchnl_promisc_info);
2191 		break;
2192 	case VIRTCHNL_OP_GET_STATS:
2193 		valid_len = sizeof(struct virtchnl_queue_select);
2194 		break;
2195 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
2196 		valid_len = sizeof(struct virtchnl_rss_key);
2197 		if (msglen >= valid_len) {
2198 			struct virtchnl_rss_key *vrk =
2199 				(struct virtchnl_rss_key *)msg;
2200 
2201 			if (vrk->key_len == 0) {
2202 				/* zero length is allowed as input */
2203 				break;
2204 			}
2205 
2206 			valid_len += vrk->key_len - 1;
2207 		}
2208 		break;
2209 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
2210 		valid_len = sizeof(struct virtchnl_rss_lut);
2211 		if (msglen >= valid_len) {
2212 			struct virtchnl_rss_lut *vrl =
2213 				(struct virtchnl_rss_lut *)msg;
2214 
2215 			if (vrl->lut_entries == 0) {
2216 				/* zero entries is allowed as input */
2217 				break;
2218 			}
2219 
2220 			valid_len += vrl->lut_entries - 1;
2221 		}
2222 		break;
2223 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
2224 		break;
2225 	case VIRTCHNL_OP_SET_RSS_HENA:
2226 		valid_len = sizeof(struct virtchnl_rss_hena);
2227 		break;
2228 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2229 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2230 		break;
2231 	case VIRTCHNL_OP_REQUEST_QUEUES:
2232 		valid_len = sizeof(struct virtchnl_vf_res_request);
2233 		break;
2234 	case VIRTCHNL_OP_ENABLE_CHANNELS:
2235 		valid_len = sizeof(struct virtchnl_tc_info);
2236 		if (msglen >= valid_len) {
2237 			struct virtchnl_tc_info *vti =
2238 				(struct virtchnl_tc_info *)msg;
2239 
2240 			if (vti->num_tc == 0 || vti->num_tc >
2241 			    VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {
2242 				err_msg_format = true;
2243 				break;
2244 			}
2245 
2246 			valid_len += (vti->num_tc - 1) *
2247 				     sizeof(struct virtchnl_channel_info);
2248 		}
2249 		break;
2250 	case VIRTCHNL_OP_DISABLE_CHANNELS:
2251 		break;
2252 	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
2253 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
2254 		valid_len = sizeof(struct virtchnl_filter);
2255 		break;
2256 	case VIRTCHNL_OP_ADD_RSS_CFG:
2257 	case VIRTCHNL_OP_DEL_RSS_CFG:
2258 		valid_len = sizeof(struct virtchnl_rss_cfg);
2259 		break;
2260 	case VIRTCHNL_OP_ADD_FDIR_FILTER:
2261 		valid_len = sizeof(struct virtchnl_fdir_add);
2262 		break;
2263 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
2264 		valid_len = sizeof(struct virtchnl_fdir_del);
2265 		break;
2266 	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
2267 		valid_len = sizeof(struct virtchnl_flow_sub);
2268 		break;
2269 	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
2270 		valid_len = sizeof(struct virtchnl_flow_unsub);
2271 		break;
2272 	case VIRTCHNL_OP_GET_QOS_CAPS:
2273 		break;
2274 	case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:
2275 		valid_len = sizeof(struct virtchnl_queue_tc_mapping);
2276 		if (msglen >= valid_len) {
2277 			struct virtchnl_queue_tc_mapping *q_tc =
2278 				(struct virtchnl_queue_tc_mapping *)msg;
2279 			if (q_tc->num_tc == 0) {
2280 				err_msg_format = true;
2281 				break;
2282 			}
2283 			valid_len += (q_tc->num_tc - 1) *
2284 					 sizeof(q_tc->tc[0]);
2285 		}
2286 		break;
2287 	case VIRTCHNL_OP_CONFIG_QUEUE_BW:
2288 		valid_len = sizeof(struct virtchnl_queues_bw_cfg);
2289 		if (msglen >= valid_len) {
2290 			struct virtchnl_queues_bw_cfg *q_bw =
2291 				(struct virtchnl_queues_bw_cfg *)msg;
2292 			if (q_bw->num_queues == 0) {
2293 				err_msg_format = true;
2294 				break;
2295 			}
2296 			valid_len += (q_bw->num_queues - 1) *
2297 					 sizeof(q_bw->cfg[0]);
2298 		}
2299 		break;
2300 	case VIRTCHNL_OP_CONFIG_QUANTA:
2301 		valid_len = sizeof(struct virtchnl_quanta_cfg);
2302 		if (msglen >= valid_len) {
2303 			struct virtchnl_quanta_cfg *q_quanta =
2304 				(struct virtchnl_quanta_cfg *)msg;
2305 			if (q_quanta->quanta_size == 0 ||
2306 			    q_quanta->queue_select.num_queues == 0) {
2307 				err_msg_format = true;
2308 				break;
2309 			}
2310 		}
2311 		break;
2312 	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
2313 		break;
2314 	case VIRTCHNL_OP_ADD_VLAN_V2:
2315 	case VIRTCHNL_OP_DEL_VLAN_V2:
2316 		valid_len = sizeof(struct virtchnl_vlan_filter_list_v2);
2317 		if (msglen >= valid_len) {
2318 			struct virtchnl_vlan_filter_list_v2 *vfl =
2319 			    (struct virtchnl_vlan_filter_list_v2 *)msg;
2320 
2321 			if (vfl->num_elements == 0 || vfl->num_elements >
2322 			    VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX) {
2323 				err_msg_format = true;
2324 				break;
2325 			}
2326 
2327 			valid_len += (vfl->num_elements - 1) *
2328 				sizeof(struct virtchnl_vlan_filter);
2329 		}
2330 		break;
2331 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
2332 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
2333 	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
2334 	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
2335 	case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2:
2336 	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
2337 		valid_len = sizeof(struct virtchnl_vlan_setting);
2338 		break;
2339 	case VIRTCHNL_OP_ENABLE_QUEUES_V2:
2340 	case VIRTCHNL_OP_DISABLE_QUEUES_V2:
2341 		valid_len = sizeof(struct virtchnl_del_ena_dis_queues);
2342 		if (msglen >= valid_len) {
2343 			struct virtchnl_del_ena_dis_queues *qs =
2344 				(struct virtchnl_del_ena_dis_queues *)msg;
2345 			if (qs->chunks.num_chunks == 0 ||
2346 			    qs->chunks.num_chunks > VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX) {
2347 				err_msg_format = true;
2348 				break;
2349 			}
2350 			valid_len += (qs->chunks.num_chunks - 1) *
2351 				      sizeof(struct virtchnl_queue_chunk);
2352 		}
2353 		break;
2354 	case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
2355 		valid_len = sizeof(struct virtchnl_queue_vector_maps);
2356 		if (msglen >= valid_len) {
2357 			struct virtchnl_queue_vector_maps *v_qp =
2358 				(struct virtchnl_queue_vector_maps *)msg;
2359 			if (v_qp->num_qv_maps == 0 ||
2360 			    v_qp->num_qv_maps > VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) {
2361 				err_msg_format = true;
2362 				break;
2363 			}
2364 			valid_len += (v_qp->num_qv_maps - 1) *
2365 				      sizeof(struct virtchnl_queue_vector);
2366 		}
2367 		break;
2368 	/* These are always errors coming from the VF. */
2369 	case VIRTCHNL_OP_EVENT:
2370 	case VIRTCHNL_OP_UNKNOWN:
2371 	default:
2372 		return VIRTCHNL_STATUS_ERR_PARAM;
2373 	}
2374 	/* few more checks */
2375 	if (err_msg_format || valid_len != msglen)
2376 		return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
2377 
2378 	return 0;
2379 }
2380 #endif /* _VIRTCHNL_H_ */
2381