1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2013-2022, Intel Corporation. */
3
4 #ifndef _VIRTCHNL_H_
5 #define _VIRTCHNL_H_
6
7 #include <linux/bitops.h>
8 #include <linux/bits.h>
9 #include <linux/overflow.h>
10 #include <uapi/linux/if_ether.h>
11
12 /* Description:
13 * This header file describes the Virtual Function (VF) - Physical Function
14 * (PF) communication protocol used by the drivers for all devices starting
15 * from our 40G product line
16 *
17 * Admin queue buffer usage:
18 * desc->opcode is always aqc_opc_send_msg_to_pf
19 * flags, retval, datalen, and data addr are all used normally.
20 * The Firmware copies the cookie fields when sending messages between the
21 * PF and VF, but uses all other fields internally. Due to this limitation,
22 * we must send all messages as "indirect", i.e. using an external buffer.
23 *
24 * All the VSI indexes are relative to the VF. Each VF can have maximum of
25 * three VSIs. All the queue indexes are relative to the VSI. Each VF can
26 * have a maximum of sixteen queues for all of its VSIs.
27 *
28 * The PF is required to return a status code in v_retval for all messages
29 * except RESET_VF, which does not require any response. The returned value
30 * is of virtchnl_status_code type, defined here.
31 *
32 * In general, VF driver initialization should roughly follow the order of
33 * these opcodes. The VF driver must first validate the API version of the
34 * PF driver, then request a reset, then get resources, then configure
35 * queues and interrupts. After these operations are complete, the VF
36 * driver may start its queues, optionally add MAC and VLAN filters, and
37 * process traffic.
38 */
39
40 /* START GENERIC DEFINES
41 * Need to ensure the following enums and defines hold the same meaning and
42 * value in current and future projects
43 */
44
45 /* Error Codes */
46 enum virtchnl_status_code {
47 VIRTCHNL_STATUS_SUCCESS = 0,
48 VIRTCHNL_STATUS_ERR_PARAM = -5,
49 VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
50 VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
51 VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
52 VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
53 VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
54 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
55 };
56
57 /* Backward compatibility */
58 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
59 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
60
61 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
62 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
63 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
64 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
65 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
66 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
67 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
68 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
69
70 enum virtchnl_link_speed {
71 VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
72 VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
73 VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
74 VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
75 VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
76 VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
77 VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
78 VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
79 VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
80 };
81
82 /* for hsplit_0 field of Rx HMC context */
83 /* deprecated with AVF 1.0 */
84 enum virtchnl_rx_hsplit {
85 VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
86 VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
87 VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
88 VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
89 VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
90 };
91
92 /* END GENERIC DEFINES */
93
94 /* Opcodes for VF-PF communication. These are placed in the v_opcode field
95 * of the virtchnl_msg structure.
96 */
97 enum virtchnl_ops {
98 /* The PF sends status change events to VFs using
99 * the VIRTCHNL_OP_EVENT opcode.
100 * VFs send requests to the PF using the other ops.
101 * Use of "advanced opcode" features must be negotiated as part of capabilities
102 * exchange and are not considered part of base mode feature set.
103 */
104 VIRTCHNL_OP_UNKNOWN = 0,
105 VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
106 VIRTCHNL_OP_RESET_VF = 2,
107 VIRTCHNL_OP_GET_VF_RESOURCES = 3,
108 VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
109 VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
110 VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
111 VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
112 VIRTCHNL_OP_ENABLE_QUEUES = 8,
113 VIRTCHNL_OP_DISABLE_QUEUES = 9,
114 VIRTCHNL_OP_ADD_ETH_ADDR = 10,
115 VIRTCHNL_OP_DEL_ETH_ADDR = 11,
116 VIRTCHNL_OP_ADD_VLAN = 12,
117 VIRTCHNL_OP_DEL_VLAN = 13,
118 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
119 VIRTCHNL_OP_GET_STATS = 15,
120 VIRTCHNL_OP_RSVD = 16,
121 VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
122 VIRTCHNL_OP_CONFIG_RSS_HFUNC = 18,
123 /* opcode 19 is reserved */
124 VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
125 VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
126 VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
127 VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
128 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
129 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
130 VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
131 VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
132 VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
133 VIRTCHNL_OP_SET_RSS_HENA = 26,
134 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
135 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
136 VIRTCHNL_OP_REQUEST_QUEUES = 29,
137 VIRTCHNL_OP_ENABLE_CHANNELS = 30,
138 VIRTCHNL_OP_DISABLE_CHANNELS = 31,
139 VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
140 VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
141 /* opcode 34 - 43 are reserved */
142 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
143 VIRTCHNL_OP_ADD_RSS_CFG = 45,
144 VIRTCHNL_OP_DEL_RSS_CFG = 46,
145 VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
146 VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
147 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
148 VIRTCHNL_OP_ADD_VLAN_V2 = 52,
149 VIRTCHNL_OP_DEL_VLAN_V2 = 53,
150 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
151 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
152 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
153 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
154 VIRTCHNL_OP_MAX,
155 };
156
157 /* These macros are used to generate compilation errors if a structure/union
158 * is not exactly the correct length. It gives a divide by zero error if the
159 * structure/union is not of the correct size, otherwise it creates an enum
160 * that is never used.
161 */
162 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
163 { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
164 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
165 { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
166
167 /* Message descriptions and data structures. */
168
169 /* VIRTCHNL_OP_VERSION
170 * VF posts its version number to the PF. PF responds with its version number
171 * in the same format, along with a return code.
172 * Reply from PF has its major/minor versions also in param0 and param1.
173 * If there is a major version mismatch, then the VF cannot operate.
174 * If there is a minor version mismatch, then the VF can operate but should
175 * add a warning to the system log.
176 *
177 * This enum element MUST always be specified as == 1, regardless of other
178 * changes in the API. The PF must always respond to this message without
179 * error regardless of version mismatch.
180 */
181 #define VIRTCHNL_VERSION_MAJOR 1
182 #define VIRTCHNL_VERSION_MINOR 1
183 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
184
185 struct virtchnl_version_info {
186 u32 major;
187 u32 minor;
188 };
189
190 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
191
192 #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
193 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
194
195 /* VIRTCHNL_OP_RESET_VF
196 * VF sends this request to PF with no parameters
197 * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
198 * until reset completion is indicated. The admin queue must be reinitialized
199 * after this operation.
200 *
201 * When reset is complete, PF must ensure that all queues in all VSIs associated
202 * with the VF are stopped, all queue configurations in the HMC are set to 0,
203 * and all MAC and VLAN filters (except the default MAC address) on all VSIs
204 * are cleared.
205 */
206
207 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
208 * vsi_type should always be 6 for backward compatibility. Add other fields
209 * as needed.
210 */
211 enum virtchnl_vsi_type {
212 VIRTCHNL_VSI_TYPE_INVALID = 0,
213 VIRTCHNL_VSI_SRIOV = 6,
214 };
215
216 /* VIRTCHNL_OP_GET_VF_RESOURCES
217 * Version 1.0 VF sends this request to PF with no parameters
218 * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
219 * PF responds with an indirect message containing
220 * virtchnl_vf_resource and one or more
221 * virtchnl_vsi_resource structures.
222 */
223
224 struct virtchnl_vsi_resource {
225 u16 vsi_id;
226 u16 num_queue_pairs;
227
228 /* see enum virtchnl_vsi_type */
229 s32 vsi_type;
230 u16 qset_handle;
231 u8 default_mac_addr[ETH_ALEN];
232 };
233
234 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
235
236 /* VF capability flags
237 * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
238 * TX/RX Checksum offloading and TSO for non-tunnelled packets.
239 */
240 #define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
241 #define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1)
242 #define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA
243 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
244 #define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
245 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
246 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
247 /* used to negotiate communicating link speeds in Mbps */
248 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
249 #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
250 #define VIRTCHNL_VF_OFFLOAD_TC_U32 BIT(11)
251 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
252 #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
253 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
254 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
255 #define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
256 #define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
257 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
258 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
259 #define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
260 #define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
261 #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26)
262 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
263 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
264
265 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
266 VIRTCHNL_VF_OFFLOAD_VLAN | \
267 VIRTCHNL_VF_OFFLOAD_RSS_PF)
268
269 struct virtchnl_vf_resource {
270 u16 num_vsis;
271 u16 num_queue_pairs;
272 u16 max_vectors;
273 u16 max_mtu;
274
275 u32 vf_cap_flags;
276 u32 rss_key_size;
277 u32 rss_lut_size;
278
279 struct virtchnl_vsi_resource vsi_res[];
280 };
281
282 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource);
283 #define virtchnl_vf_resource_LEGACY_SIZEOF 36
284
285 /* VIRTCHNL_OP_CONFIG_TX_QUEUE
286 * VF sends this message to set up parameters for one TX queue.
287 * External data buffer contains one instance of virtchnl_txq_info.
288 * PF configures requested queue and returns a status code.
289 */
290
291 /* Tx queue config info */
292 struct virtchnl_txq_info {
293 u16 vsi_id;
294 u16 queue_id;
295 u16 ring_len; /* number of descriptors, multiple of 8 */
296 u16 headwb_enabled; /* deprecated with AVF 1.0 */
297 u64 dma_ring_addr;
298 u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
299 };
300
301 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
302
303 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
304 * VF sends this message to set up parameters for one RX queue.
305 * External data buffer contains one instance of virtchnl_rxq_info.
306 * PF configures requested queue and returns a status code. The
307 * crc_disable flag disables CRC stripping on the VF. Setting
308 * the crc_disable flag to 1 will disable CRC stripping for each
309 * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
310 * offload must have been set prior to sending this info or the PF
311 * will ignore the request. This flag should be set the same for
312 * all of the queues for a VF.
313 */
314
315 /* Rx queue config info */
316 struct virtchnl_rxq_info {
317 u16 vsi_id;
318 u16 queue_id;
319 u32 ring_len; /* number of descriptors, multiple of 32 */
320 u16 hdr_size;
321 u16 splithdr_enabled; /* deprecated with AVF 1.0 */
322 u32 databuffer_size;
323 u32 max_pkt_size;
324 u8 crc_disable;
325 u8 rxdid;
326 u8 pad1[2];
327 u64 dma_ring_addr;
328
329 /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
330 s32 rx_split_pos;
331 u32 pad2;
332 };
333
334 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
335
336 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES
337 * VF sends this message to set parameters for all active TX and RX queues
338 * associated with the specified VSI.
339 * PF configures queues and returns status.
340 * If the number of queues specified is greater than the number of queues
341 * associated with the VSI, an error is returned and no queues are configured.
342 * NOTE: The VF is not required to configure all queues in a single request.
343 * It may send multiple messages. PF drivers must correctly handle all VF
344 * requests.
345 */
346 struct virtchnl_queue_pair_info {
347 /* NOTE: vsi_id and queue_id should be identical for both queues. */
348 struct virtchnl_txq_info txq;
349 struct virtchnl_rxq_info rxq;
350 };
351
352 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
353
354 struct virtchnl_vsi_queue_config_info {
355 u16 vsi_id;
356 u16 num_queue_pairs;
357 u32 pad;
358 struct virtchnl_queue_pair_info qpair[];
359 };
360
361 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info);
362 #define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF 72
363
364 /* VIRTCHNL_OP_REQUEST_QUEUES
365 * VF sends this message to request the PF to allocate additional queues to
366 * this VF. Each VF gets a guaranteed number of queues on init but asking for
367 * additional queues must be negotiated. This is a best effort request as it
368 * is possible the PF does not have enough queues left to support the request.
369 * If the PF cannot support the number requested it will respond with the
370 * maximum number it is able to support. If the request is successful, PF will
371 * then reset the VF to institute required changes.
372 */
373
374 /* VF resource request */
375 struct virtchnl_vf_res_request {
376 u16 num_queue_pairs;
377 };
378
379 /* VIRTCHNL_OP_CONFIG_IRQ_MAP
380 * VF uses this message to map vectors to queues.
381 * The rxq_map and txq_map fields are bitmaps used to indicate which queues
382 * are to be associated with the specified vector.
383 * The "other" causes are always mapped to vector 0. The VF may not request
384 * that vector 0 be used for traffic.
385 * PF configures interrupt mapping and returns status.
386 * NOTE: due to hardware requirements, all active queues (both TX and RX)
387 * should be mapped to interrupts, even if the driver intends to operate
388 * only in polling mode. In this case the interrupt may be disabled, but
389 * the ITR timer will still run to trigger writebacks.
390 */
391 struct virtchnl_vector_map {
392 u16 vsi_id;
393 u16 vector_id;
394 u16 rxq_map;
395 u16 txq_map;
396 u16 rxitr_idx;
397 u16 txitr_idx;
398 };
399
400 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
401
402 struct virtchnl_irq_map_info {
403 u16 num_vectors;
404 struct virtchnl_vector_map vecmap[];
405 };
406
407 VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info);
408 #define virtchnl_irq_map_info_LEGACY_SIZEOF 14
409
410 /* VIRTCHNL_OP_ENABLE_QUEUES
411 * VIRTCHNL_OP_DISABLE_QUEUES
412 * VF sends these message to enable or disable TX/RX queue pairs.
413 * The queues fields are bitmaps indicating which queues to act upon.
414 * (Currently, we only support 16 queues per VF, but we make the field
415 * u32 to allow for expansion.)
416 * PF performs requested action and returns status.
417 * NOTE: The VF is not required to enable/disable all queues in a single
418 * request. It may send multiple messages.
419 * PF drivers must correctly handle all VF requests.
420 */
421 struct virtchnl_queue_select {
422 u16 vsi_id;
423 u16 pad;
424 u32 rx_queues;
425 u32 tx_queues;
426 };
427
428 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
429
430 /* VIRTCHNL_OP_ADD_ETH_ADDR
431 * VF sends this message in order to add one or more unicast or multicast
432 * address filters for the specified VSI.
433 * PF adds the filters and returns status.
434 */
435
436 /* VIRTCHNL_OP_DEL_ETH_ADDR
437 * VF sends this message in order to remove one or more unicast or multicast
438 * filters for the specified VSI.
439 * PF removes the filters and returns status.
440 */
441
442 /* VIRTCHNL_ETHER_ADDR_LEGACY
443 * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
444 * bytes. Moving forward all VF drivers should not set type to
445 * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
446 * behavior. The control plane function (i.e. PF) can use a best effort method
447 * of tracking the primary/device unicast in this case, but there is no
448 * guarantee and functionality depends on the implementation of the PF.
449 */
450
451 /* VIRTCHNL_ETHER_ADDR_PRIMARY
452 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
453 * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
454 * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
455 * function (i.e. PF) to accurately track and use this MAC address for
456 * displaying on the host and for VM/function reset.
457 */
458
459 /* VIRTCHNL_ETHER_ADDR_EXTRA
460 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
461 * unicast and/or multicast filters that are being added/deleted via
462 * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
463 */
464 struct virtchnl_ether_addr {
465 u8 addr[ETH_ALEN];
466 u8 type;
467 #define VIRTCHNL_ETHER_ADDR_LEGACY 0
468 #define VIRTCHNL_ETHER_ADDR_PRIMARY 1
469 #define VIRTCHNL_ETHER_ADDR_EXTRA 2
470 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
471 u8 pad;
472 };
473
474 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
475
476 struct virtchnl_ether_addr_list {
477 u16 vsi_id;
478 u16 num_elements;
479 struct virtchnl_ether_addr list[];
480 };
481
482 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list);
483 #define virtchnl_ether_addr_list_LEGACY_SIZEOF 12
484
485 /* VIRTCHNL_OP_ADD_VLAN
486 * VF sends this message to add one or more VLAN tag filters for receives.
487 * PF adds the filters and returns status.
488 * If a port VLAN is configured by the PF, this operation will return an
489 * error to the VF.
490 */
491
492 /* VIRTCHNL_OP_DEL_VLAN
493 * VF sends this message to remove one or more VLAN tag filters for receives.
494 * PF removes the filters and returns status.
495 * If a port VLAN is configured by the PF, this operation will return an
496 * error to the VF.
497 */
498
499 struct virtchnl_vlan_filter_list {
500 u16 vsi_id;
501 u16 num_elements;
502 u16 vlan_id[];
503 };
504
505 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list);
506 #define virtchnl_vlan_filter_list_LEGACY_SIZEOF 6
507
508 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
509 * structures and opcodes.
510 *
511 * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
512 * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
513 *
514 * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
515 * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
516 * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
517 *
518 * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
519 * by the PF concurrently. For example, if the PF can support
520 * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
521 * would OR the following bits:
522 *
523 * VIRTHCNL_VLAN_ETHERTYPE_8100 |
524 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
525 * VIRTCHNL_VLAN_ETHERTYPE_AND;
526 *
527 * The VF would interpret this as VLAN filtering can be supported on both 0x8100
528 * and 0x88A8 VLAN ethertypes.
529 *
530 * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
531 * by the PF concurrently. For example if the PF can support
532 * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
533 * offload it would OR the following bits:
534 *
535 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
536 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
537 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
538 *
539 * The VF would interpret this as VLAN stripping can be supported on either
540 * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
541 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
542 * the previously set value.
543 *
544 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
545 * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
546 *
547 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
548 * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
549 *
550 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
551 * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
552 *
553 * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
554 * VLAN filtering if the underlying PF supports it.
555 *
556 * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
557 * certain VLAN capability can be toggled. For example if the underlying PF/CP
558 * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
559 * set this bit along with the supported ethertypes.
560 */
561 enum virtchnl_vlan_support {
562 VIRTCHNL_VLAN_UNSUPPORTED = 0,
563 VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0),
564 VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1),
565 VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2),
566 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8),
567 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9),
568 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10),
569 VIRTCHNL_VLAN_PRIO = BIT(24),
570 VIRTCHNL_VLAN_FILTER_MASK = BIT(28),
571 VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29),
572 VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30),
573 VIRTCHNL_VLAN_TOGGLE = BIT(31),
574 };
575
576 /* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
577 * for filtering, insertion, and stripping capabilities.
578 *
579 * If only outer capabilities are supported (for filtering, insertion, and/or
580 * stripping) then this refers to the outer most or single VLAN from the VF's
581 * perspective.
582 *
583 * If only inner capabilities are supported (for filtering, insertion, and/or
584 * stripping) then this refers to the outer most or single VLAN from the VF's
585 * perspective. Functionally this is the same as if only outer capabilities are
586 * supported. The VF driver is just forced to use the inner fields when
587 * adding/deleting filters and enabling/disabling offloads (if supported).
588 *
589 * If both outer and inner capabilities are supported (for filtering, insertion,
590 * and/or stripping) then outer refers to the outer most or single VLAN and
591 * inner refers to the second VLAN, if it exists, in the packet.
592 *
593 * There is no support for tunneled VLAN offloads, so outer or inner are never
594 * referring to a tunneled packet from the VF's perspective.
595 */
596 struct virtchnl_vlan_supported_caps {
597 u32 outer;
598 u32 inner;
599 };
600
601 /* The PF populates these fields based on the supported VLAN filtering. If a
602 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
603 * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
604 * the unsupported fields.
605 *
606 * Also, a VF is only allowed to toggle its VLAN filtering setting if the
607 * VIRTCHNL_VLAN_TOGGLE bit is set.
608 *
609 * The ethertype(s) specified in the ethertype_init field are the ethertypes
610 * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
611 * most VLAN from the VF's perspective. If both inner and outer filtering are
612 * allowed then ethertype_init only refers to the outer most VLAN as only
613 * VLAN ethertype supported for inner VLAN filtering is
614 * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
615 * when both inner and outer filtering are allowed.
616 *
617 * The max_filters field tells the VF how many VLAN filters it's allowed to have
618 * at any one time. If it exceeds this amount and tries to add another filter,
619 * then the request will be rejected by the PF. To prevent failures, the VF
620 * should keep track of how many VLAN filters it has added and not attempt to
621 * add more than max_filters.
622 */
623 struct virtchnl_vlan_filtering_caps {
624 struct virtchnl_vlan_supported_caps filtering_support;
625 u32 ethertype_init;
626 u16 max_filters;
627 u8 pad[2];
628 };
629
630 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
631
632 /* This enum is used for the virtchnl_vlan_offload_caps structure to specify
633 * if the PF supports a different ethertype for stripping and insertion.
634 *
635 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
636 * for stripping affect the ethertype(s) specified for insertion and visa versa
637 * as well. If the VF tries to configure VLAN stripping via
638 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
639 * that will be the ethertype for both stripping and insertion.
640 *
641 * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
642 * stripping do not affect the ethertype(s) specified for insertion and visa
643 * versa.
644 */
645 enum virtchnl_vlan_ethertype_match {
646 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
647 VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
648 };
649
650 /* The PF populates these fields based on the supported VLAN offloads. If a
651 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
652 * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
653 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
654 *
655 * Also, a VF is only allowed to toggle its VLAN offload setting if the
656 * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
657 *
658 * The VF driver needs to be aware of how the tags are stripped by hardware and
659 * inserted by the VF driver based on the level of offload support. The PF will
660 * populate these fields based on where the VLAN tags are expected to be
661 * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
662 * interpret these fields. See the definition of the
663 * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
664 * enumeration.
665 */
666 struct virtchnl_vlan_offload_caps {
667 struct virtchnl_vlan_supported_caps stripping_support;
668 struct virtchnl_vlan_supported_caps insertion_support;
669 u32 ethertype_init;
670 u8 ethertype_match;
671 u8 pad[3];
672 };
673
674 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
675
676 /* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
677 * VF sends this message to determine its VLAN capabilities.
678 *
679 * PF will mark which capabilities it supports based on hardware support and
680 * current configuration. For example, if a port VLAN is configured the PF will
681 * not allow outer VLAN filtering, stripping, or insertion to be configured so
682 * it will block these features from the VF.
683 *
684 * The VF will need to cross reference its capabilities with the PFs
685 * capabilities in the response message from the PF to determine the VLAN
686 * support.
687 */
688 struct virtchnl_vlan_caps {
689 struct virtchnl_vlan_filtering_caps filtering;
690 struct virtchnl_vlan_offload_caps offloads;
691 };
692
693 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
694
695 struct virtchnl_vlan {
696 u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */
697 u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
698 * filtering caps
699 */
700 u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in
701 * filtering caps. Note that tpid here does not refer to
702 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
703 * actual 2-byte VLAN TPID
704 */
705 u8 pad[2];
706 };
707
708 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
709
710 struct virtchnl_vlan_filter {
711 struct virtchnl_vlan inner;
712 struct virtchnl_vlan outer;
713 u8 pad[16];
714 };
715
716 VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
717
718 /* VIRTCHNL_OP_ADD_VLAN_V2
719 * VIRTCHNL_OP_DEL_VLAN_V2
720 *
721 * VF sends these messages to add/del one or more VLAN tag filters for Rx
722 * traffic.
723 *
724 * The PF attempts to add the filters and returns status.
725 *
726 * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
727 * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
728 */
729 struct virtchnl_vlan_filter_list_v2 {
730 u16 vport_id;
731 u16 num_elements;
732 u8 pad[4];
733 struct virtchnl_vlan_filter filters[];
734 };
735
736 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2);
737 #define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF 40
738
739 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
740 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
741 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
742 * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
743 *
744 * VF sends this message to enable or disable VLAN stripping or insertion. It
745 * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
746 * allowed and whether or not it's allowed to enable/disable the specific
747 * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
748 * parse the virtchnl_vlan_caps.offloads fields to determine which offload
749 * messages are allowed.
750 *
751 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
752 * following manner the VF will be allowed to enable and/or disable 0x8100 inner
753 * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
754 * case means the outer most or single VLAN from the VF's perspective. This is
755 * because no outer offloads are supported. See the comments above the
756 * virtchnl_vlan_supported_caps structure for more details.
757 *
758 * virtchnl_vlan_caps.offloads.stripping_support.inner =
759 * VIRTCHNL_VLAN_TOGGLE |
760 * VIRTCHNL_VLAN_ETHERTYPE_8100;
761 *
762 * virtchnl_vlan_caps.offloads.insertion_support.inner =
763 * VIRTCHNL_VLAN_TOGGLE |
764 * VIRTCHNL_VLAN_ETHERTYPE_8100;
765 *
766 * In order to enable inner (again note that in this case inner is the outer
767 * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
768 * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
769 * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
770 *
771 * virtchnl_vlan_setting.inner_ethertype_setting =
772 * VIRTCHNL_VLAN_ETHERTYPE_8100;
773 *
774 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
775 * initialization.
776 *
777 * The reason that VLAN TPID(s) are not being used for the
778 * outer_ethertype_setting and inner_ethertype_setting fields is because it's
779 * possible a device could support VLAN insertion and/or stripping offload on
780 * multiple ethertypes concurrently, so this method allows a VF to request
781 * multiple ethertypes in one message using the virtchnl_vlan_support
782 * enumeration.
783 *
784 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
785 * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
786 * VLAN insertion and stripping simultaneously. The
787 * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
788 * populated based on what the PF can support.
789 *
790 * virtchnl_vlan_caps.offloads.stripping_support.outer =
791 * VIRTCHNL_VLAN_TOGGLE |
792 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
793 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
794 * VIRTCHNL_VLAN_ETHERTYPE_AND;
795 *
796 * virtchnl_vlan_caps.offloads.insertion_support.outer =
797 * VIRTCHNL_VLAN_TOGGLE |
798 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
799 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
800 * VIRTCHNL_VLAN_ETHERTYPE_AND;
801 *
802 * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
803 * would populate the virthcnl_vlan_offload_structure in the following manner
804 * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
805 *
806 * virtchnl_vlan_setting.outer_ethertype_setting =
807 * VIRTHCNL_VLAN_ETHERTYPE_8100 |
808 * VIRTHCNL_VLAN_ETHERTYPE_88A8;
809 *
810 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
811 * initialization.
812 *
813 * There is also the case where a PF and the underlying hardware can support
814 * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
815 * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
816 * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
817 * offloads. The ethertypes must match for stripping and insertion.
818 *
819 * virtchnl_vlan_caps.offloads.stripping_support.outer =
820 * VIRTCHNL_VLAN_TOGGLE |
821 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
822 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
823 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
824 *
825 * virtchnl_vlan_caps.offloads.insertion_support.outer =
826 * VIRTCHNL_VLAN_TOGGLE |
827 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
828 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
829 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
830 *
831 * virtchnl_vlan_caps.offloads.ethertype_match =
832 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
833 *
834 * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
835 * populate the virtchnl_vlan_setting structure in the following manner and send
836 * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
837 * ethertype for VLAN insertion if it's enabled. So, for completeness, a
838 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
839 *
840 * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
841 *
842 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
843 * initialization.
844 */
845 struct virtchnl_vlan_setting {
846 u32 outer_ethertype_setting;
847 u32 inner_ethertype_setting;
848 u16 vport_id;
849 u8 pad[6];
850 };
851
852 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
853
854 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
855 * VF sends VSI id and flags.
856 * PF returns status code in retval.
857 * Note: we assume that broadcast accept mode is always enabled.
858 */
859 struct virtchnl_promisc_info {
860 u16 vsi_id;
861 u16 flags;
862 };
863
864 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
865
866 #define FLAG_VF_UNICAST_PROMISC 0x00000001
867 #define FLAG_VF_MULTICAST_PROMISC 0x00000002
868
869 /* VIRTCHNL_OP_GET_STATS
870 * VF sends this message to request stats for the selected VSI. VF uses
871 * the virtchnl_queue_select struct to specify the VSI. The queue_id
872 * field is ignored by the PF.
873 *
874 * PF replies with struct eth_stats in an external buffer.
875 */
876
877 /* VIRTCHNL_OP_CONFIG_RSS_KEY
878 * VIRTCHNL_OP_CONFIG_RSS_LUT
879 * VF sends these messages to configure RSS. Only supported if both PF
880 * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
881 * configuration negotiation. If this is the case, then the RSS fields in
882 * the VF resource struct are valid.
883 * Both the key and LUT are initialized to 0 by the PF, meaning that
884 * RSS is effectively disabled until set up by the VF.
885 */
886 struct virtchnl_rss_key {
887 u16 vsi_id;
888 u16 key_len;
889 u8 key[]; /* RSS hash key, packed bytes */
890 };
891
892 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key);
893 #define virtchnl_rss_key_LEGACY_SIZEOF 6
894
895 struct virtchnl_rss_lut {
896 u16 vsi_id;
897 u16 lut_entries;
898 u8 lut[]; /* RSS lookup table */
899 };
900
901 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
902 #define virtchnl_rss_lut_LEGACY_SIZEOF 6
903
904 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS
905 * VIRTCHNL_OP_SET_RSS_HENA
906 * VF sends these messages to get and set the hash filter enable bits for RSS.
907 * By default, the PF sets these to all possible traffic types that the
908 * hardware supports. The VF can query this value if it wants to change the
909 * traffic types that are hashed by the hardware.
910 */
911 struct virtchnl_rss_hena {
912 u64 hena;
913 };
914
915 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
916
917 /* Type of RSS algorithm */
918 enum virtchnl_rss_algorithm {
919 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
920 VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
921 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
922 VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
923 };
924
925 /* VIRTCHNL_OP_CONFIG_RSS_HFUNC
926 * VF sends this message to configure the RSS hash function. Only supported
927 * if both PF and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
928 * configuration negotiation.
929 * The hash function is initialized to VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC
930 * by the PF.
931 */
932 struct virtchnl_rss_hfunc {
933 u16 vsi_id;
934 u16 rss_algorithm; /* enum virtchnl_rss_algorithm */
935 u32 reserved;
936 };
937
938 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hfunc);
939
940 /* VIRTCHNL_OP_ENABLE_CHANNELS
941 * VIRTCHNL_OP_DISABLE_CHANNELS
942 * VF sends these messages to enable or disable channels based on
943 * the user specified queue count and queue offset for each traffic class.
944 * This struct encompasses all the information that the PF needs from
945 * VF to create a channel.
946 */
947 struct virtchnl_channel_info {
948 u16 count; /* number of queues in a channel */
949 u16 offset; /* queues in a channel start from 'offset' */
950 u32 pad;
951 u64 max_tx_rate;
952 };
953
954 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
955
956 struct virtchnl_tc_info {
957 u32 num_tc;
958 u32 pad;
959 struct virtchnl_channel_info list[];
960 };
961
962 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info);
963 #define virtchnl_tc_info_LEGACY_SIZEOF 24
964
965 /* VIRTCHNL_ADD_CLOUD_FILTER
966 * VIRTCHNL_DEL_CLOUD_FILTER
967 * VF sends these messages to add or delete a cloud filter based on the
968 * user specified match and action filters. These structures encompass
969 * all the information that the PF needs from the VF to add/delete a
970 * cloud filter.
971 */
972
973 struct virtchnl_l4_spec {
974 u8 src_mac[ETH_ALEN];
975 u8 dst_mac[ETH_ALEN];
976 __be16 vlan_id;
977 __be16 pad; /* reserved for future use */
978 __be32 src_ip[4];
979 __be32 dst_ip[4];
980 __be16 src_port;
981 __be16 dst_port;
982 };
983
984 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
985
986 union virtchnl_flow_spec {
987 struct virtchnl_l4_spec tcp_spec;
988 u8 buffer[128]; /* reserved for future use */
989 };
990
991 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
992
993 enum virtchnl_action {
994 /* action types */
995 VIRTCHNL_ACTION_DROP = 0,
996 VIRTCHNL_ACTION_TC_REDIRECT,
997 VIRTCHNL_ACTION_PASSTHRU,
998 VIRTCHNL_ACTION_QUEUE,
999 VIRTCHNL_ACTION_Q_REGION,
1000 VIRTCHNL_ACTION_MARK,
1001 VIRTCHNL_ACTION_COUNT,
1002 };
1003
1004 enum virtchnl_flow_type {
1005 /* flow types */
1006 VIRTCHNL_TCP_V4_FLOW = 0,
1007 VIRTCHNL_TCP_V6_FLOW,
1008 };
1009
1010 struct virtchnl_filter {
1011 union virtchnl_flow_spec data;
1012 union virtchnl_flow_spec mask;
1013
1014 /* see enum virtchnl_flow_type */
1015 s32 flow_type;
1016
1017 /* see enum virtchnl_action */
1018 s32 action;
1019 u32 action_meta;
1020 u8 field_flags;
1021 u8 pad[3];
1022 };
1023
1024 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
1025
1026 struct virtchnl_supported_rxdids {
1027 u64 supported_rxdids;
1028 };
1029
1030 /* VIRTCHNL_OP_EVENT
1031 * PF sends this message to inform the VF driver of events that may affect it.
1032 * No direct response is expected from the VF, though it may generate other
1033 * messages in response to this one.
1034 */
1035 enum virtchnl_event_codes {
1036 VIRTCHNL_EVENT_UNKNOWN = 0,
1037 VIRTCHNL_EVENT_LINK_CHANGE,
1038 VIRTCHNL_EVENT_RESET_IMPENDING,
1039 VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
1040 };
1041
1042 #define PF_EVENT_SEVERITY_INFO 0
1043 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
1044
1045 struct virtchnl_pf_event {
1046 /* see enum virtchnl_event_codes */
1047 s32 event;
1048 union {
1049 /* If the PF driver does not support the new speed reporting
1050 * capabilities then use link_event else use link_event_adv to
1051 * get the speed and link information. The ability to understand
1052 * new speeds is indicated by setting the capability flag
1053 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
1054 * in virtchnl_vf_resource struct and can be used to determine
1055 * which link event struct to use below.
1056 */
1057 struct {
1058 enum virtchnl_link_speed link_speed;
1059 bool link_status;
1060 u8 pad[3];
1061 } link_event;
1062 struct {
1063 /* link_speed provided in Mbps */
1064 u32 link_speed;
1065 u8 link_status;
1066 u8 pad[3];
1067 } link_event_adv;
1068 } event_data;
1069
1070 s32 severity;
1071 };
1072
1073 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
1074
1075 /* used to specify if a ceq_idx or aeq_idx is invalid */
1076 #define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF
1077 /* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
1078 * VF uses this message to request PF to map RDMA vectors to RDMA queues.
1079 * The request for this originates from the VF RDMA driver through
1080 * a client interface between VF LAN and VF RDMA driver.
1081 * A vector could have an AEQ and CEQ attached to it although
1082 * there is a single AEQ per VF RDMA instance in which case
1083 * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
1084 * idx for ceqs There will never be a case where there will be multiple CEQs
1085 * attached to a single vector.
1086 * PF configures interrupt mapping and returns status.
1087 */
1088
1089 struct virtchnl_rdma_qv_info {
1090 u32 v_idx; /* msix_vector */
1091 u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1092 u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1093 u8 itr_idx;
1094 u8 pad[3];
1095 };
1096
1097 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
1098
1099 struct virtchnl_rdma_qvlist_info {
1100 u32 num_vectors;
1101 struct virtchnl_rdma_qv_info qv_info[];
1102 };
1103
1104 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info);
1105 #define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF 16
1106
1107 /* VF reset states - these are written into the RSTAT register:
1108 * VFGEN_RSTAT on the VF
1109 * When the PF initiates a reset, it writes 0
1110 * When the reset is complete, it writes 1
1111 * When the PF detects that the VF has recovered, it writes 2
1112 * VF checks this register periodically to determine if a reset has occurred,
1113 * then polls it to know when the reset is complete.
1114 * If either the PF or VF reads the register while the hardware
1115 * is in a reset state, it will return DEADBEEF, which, when masked
1116 * will result in 3.
1117 */
1118 enum virtchnl_vfr_states {
1119 VIRTCHNL_VFR_INPROGRESS = 0,
1120 VIRTCHNL_VFR_COMPLETED,
1121 VIRTCHNL_VFR_VFACTIVE,
1122 };
1123
1124 #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
1125 #define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
1126 #define PROTO_HDR_SHIFT 5
1127 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
1128 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
1129
1130 /* VF use these macros to configure each protocol header.
1131 * Specify which protocol headers and protocol header fields base on
1132 * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
1133 * @param hdr: a struct of virtchnl_proto_hdr
1134 * @param hdr_type: ETH/IPV4/TCP, etc
1135 * @param field: SRC/DST/TEID/SPI, etc
1136 */
1137 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
1138 ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
1139 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
1140 ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
1141 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
1142 ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
1143 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
1144
1145 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1146 (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
1147 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1148 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1149 (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
1150 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1151
1152 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
1153 ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
1154 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
1155 (((hdr)->type) >> PROTO_HDR_SHIFT)
1156 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
1157 ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
1158 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
1159 (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
1160 VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
1161
1162 /* Protocol header type within a packet segment. A segment consists of one or
1163 * more protocol headers that make up a logical group of protocol headers. Each
1164 * logical group of protocol headers encapsulates or is encapsulated using/by
1165 * tunneling or encapsulation protocols for network virtualization.
1166 */
1167 enum virtchnl_proto_hdr_type {
1168 VIRTCHNL_PROTO_HDR_NONE,
1169 VIRTCHNL_PROTO_HDR_ETH,
1170 VIRTCHNL_PROTO_HDR_S_VLAN,
1171 VIRTCHNL_PROTO_HDR_C_VLAN,
1172 VIRTCHNL_PROTO_HDR_IPV4,
1173 VIRTCHNL_PROTO_HDR_IPV6,
1174 VIRTCHNL_PROTO_HDR_TCP,
1175 VIRTCHNL_PROTO_HDR_UDP,
1176 VIRTCHNL_PROTO_HDR_SCTP,
1177 VIRTCHNL_PROTO_HDR_GTPU_IP,
1178 VIRTCHNL_PROTO_HDR_GTPU_EH,
1179 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
1180 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
1181 VIRTCHNL_PROTO_HDR_PPPOE,
1182 VIRTCHNL_PROTO_HDR_L2TPV3,
1183 VIRTCHNL_PROTO_HDR_ESP,
1184 VIRTCHNL_PROTO_HDR_AH,
1185 VIRTCHNL_PROTO_HDR_PFCP,
1186 };
1187
1188 /* Protocol header field within a protocol header. */
1189 enum virtchnl_proto_hdr_field {
1190 /* ETHER */
1191 VIRTCHNL_PROTO_HDR_ETH_SRC =
1192 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
1193 VIRTCHNL_PROTO_HDR_ETH_DST,
1194 VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
1195 /* S-VLAN */
1196 VIRTCHNL_PROTO_HDR_S_VLAN_ID =
1197 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
1198 /* C-VLAN */
1199 VIRTCHNL_PROTO_HDR_C_VLAN_ID =
1200 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
1201 /* IPV4 */
1202 VIRTCHNL_PROTO_HDR_IPV4_SRC =
1203 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
1204 VIRTCHNL_PROTO_HDR_IPV4_DST,
1205 VIRTCHNL_PROTO_HDR_IPV4_DSCP,
1206 VIRTCHNL_PROTO_HDR_IPV4_TTL,
1207 VIRTCHNL_PROTO_HDR_IPV4_PROT,
1208 /* IPV6 */
1209 VIRTCHNL_PROTO_HDR_IPV6_SRC =
1210 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
1211 VIRTCHNL_PROTO_HDR_IPV6_DST,
1212 VIRTCHNL_PROTO_HDR_IPV6_TC,
1213 VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
1214 VIRTCHNL_PROTO_HDR_IPV6_PROT,
1215 /* TCP */
1216 VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
1217 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
1218 VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
1219 /* UDP */
1220 VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
1221 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
1222 VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
1223 /* SCTP */
1224 VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
1225 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
1226 VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
1227 /* GTPU_IP */
1228 VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
1229 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
1230 /* GTPU_EH */
1231 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
1232 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
1233 VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
1234 /* PPPOE */
1235 VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
1236 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
1237 /* L2TPV3 */
1238 VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
1239 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
1240 /* ESP */
1241 VIRTCHNL_PROTO_HDR_ESP_SPI =
1242 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
1243 /* AH */
1244 VIRTCHNL_PROTO_HDR_AH_SPI =
1245 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
1246 /* PFCP */
1247 VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
1248 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
1249 VIRTCHNL_PROTO_HDR_PFCP_SEID,
1250 };
1251
1252 struct virtchnl_proto_hdr {
1253 /* see enum virtchnl_proto_hdr_type */
1254 s32 type;
1255 u32 field_selector; /* a bit mask to select field for header type */
1256 u8 buffer[64];
1257 /**
1258 * binary buffer in network order for specific header type.
1259 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
1260 * header is expected to be copied into the buffer.
1261 */
1262 };
1263
1264 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
1265
1266 struct virtchnl_proto_hdrs {
1267 u8 tunnel_level;
1268 u8 pad[3];
1269 /**
1270 * specify where protocol header start from.
1271 * must be 0 when sending a raw packet request.
1272 * 0 - from the outer layer
1273 * 1 - from the first inner layer
1274 * 2 - from the second inner layer
1275 * ....
1276 **/
1277 int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
1278 union {
1279 struct virtchnl_proto_hdr
1280 proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
1281 struct {
1282 u16 pkt_len;
1283 u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
1284 u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
1285 } raw;
1286 };
1287 };
1288
1289 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
1290
1291 struct virtchnl_rss_cfg {
1292 struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
1293
1294 /* see enum virtchnl_rss_algorithm; rss algorithm type */
1295 s32 rss_algorithm;
1296 u8 reserved[128]; /* reserve for future */
1297 };
1298
1299 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
1300
1301 /* action configuration for FDIR */
1302 struct virtchnl_filter_action {
1303 /* see enum virtchnl_action type */
1304 s32 type;
1305 union {
1306 /* used for queue and qgroup action */
1307 struct {
1308 u16 index;
1309 u8 region;
1310 } queue;
1311 /* used for count action */
1312 struct {
1313 /* share counter ID with other flow rules */
1314 u8 shared;
1315 u32 id; /* counter ID */
1316 } count;
1317 /* used for mark action */
1318 u32 mark_id;
1319 u8 reserve[32];
1320 } act_conf;
1321 };
1322
1323 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
1324
1325 #define VIRTCHNL_MAX_NUM_ACTIONS 8
1326
1327 struct virtchnl_filter_action_set {
1328 /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
1329 int count;
1330 struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
1331 };
1332
1333 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
1334
1335 /* pattern and action for FDIR rule */
1336 struct virtchnl_fdir_rule {
1337 struct virtchnl_proto_hdrs proto_hdrs;
1338 struct virtchnl_filter_action_set action_set;
1339 };
1340
1341 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
1342
1343 /* Status returned to VF after VF requests FDIR commands
1344 * VIRTCHNL_FDIR_SUCCESS
1345 * VF FDIR related request is successfully done by PF
1346 * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
1347 *
1348 * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
1349 * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
1350 *
1351 * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
1352 * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
1353 *
1354 * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
1355 * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
1356 *
1357 * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
1358 * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
1359 *
1360 * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
1361 * OP_ADD_FDIR_FILTER request is failed due to parameters validation
1362 * or HW doesn't support.
1363 *
1364 * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
1365 * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
1366 * for programming.
1367 *
1368 * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
1369 * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
1370 * for example, VF query counter of a rule who has no counter action.
1371 */
1372 enum virtchnl_fdir_prgm_status {
1373 VIRTCHNL_FDIR_SUCCESS = 0,
1374 VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
1375 VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
1376 VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
1377 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
1378 VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
1379 VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
1380 VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
1381 };
1382
1383 /* VIRTCHNL_OP_ADD_FDIR_FILTER
1384 * VF sends this request to PF by filling out vsi_id,
1385 * validate_only and rule_cfg. PF will return flow_id
1386 * if the request is successfully done and return add_status to VF.
1387 */
1388 struct virtchnl_fdir_add {
1389 u16 vsi_id; /* INPUT */
1390 /*
1391 * 1 for validating a fdir rule, 0 for creating a fdir rule.
1392 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
1393 */
1394 u16 validate_only; /* INPUT */
1395 u32 flow_id; /* OUTPUT */
1396 struct virtchnl_fdir_rule rule_cfg; /* INPUT */
1397
1398 /* see enum virtchnl_fdir_prgm_status; OUTPUT */
1399 s32 status;
1400 };
1401
1402 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
1403
1404 /* VIRTCHNL_OP_DEL_FDIR_FILTER
1405 * VF sends this request to PF by filling out vsi_id
1406 * and flow_id. PF will return del_status to VF.
1407 */
1408 struct virtchnl_fdir_del {
1409 u16 vsi_id; /* INPUT */
1410 u16 pad;
1411 u32 flow_id; /* INPUT */
1412
1413 /* see enum virtchnl_fdir_prgm_status; OUTPUT */
1414 s32 status;
1415 };
1416
1417 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
1418
1419 #define __vss_byone(p, member, count, old) \
1420 (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0)))
1421
1422 #define __vss_byelem(p, member, count, old) \
1423 (struct_size(p, member, count - 1) + (old - struct_size(p, member, 0)))
1424
1425 #define __vss_full(p, member, count, old) \
1426 (struct_size(p, member, count) + (old - struct_size(p, member, 0)))
1427
1428 #define __vss(type, func, p, member, count) \
1429 struct type: func(p, member, count, type##_LEGACY_SIZEOF)
1430
1431 #define virtchnl_struct_size(p, m, c) \
1432 _Generic(*p, \
1433 __vss(virtchnl_vf_resource, __vss_full, p, m, c), \
1434 __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c), \
1435 __vss(virtchnl_irq_map_info, __vss_full, p, m, c), \
1436 __vss(virtchnl_ether_addr_list, __vss_full, p, m, c), \
1437 __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c), \
1438 __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \
1439 __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \
1440 __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \
1441 __vss(virtchnl_rss_key, __vss_byone, p, m, c), \
1442 __vss(virtchnl_rss_lut, __vss_byone, p, m, c))
1443
1444 /**
1445 * virtchnl_vc_validate_vf_msg
1446 * @ver: Virtchnl version info
1447 * @v_opcode: Opcode for the message
1448 * @msg: pointer to the msg buffer
1449 * @msglen: msg length
1450 *
1451 * validate msg format against struct for each opcode
1452 */
1453 static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info * ver,u32 v_opcode,u8 * msg,u16 msglen)1454 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
1455 u8 *msg, u16 msglen)
1456 {
1457 bool err_msg_format = false;
1458 u32 valid_len = 0;
1459
1460 /* Validate message length. */
1461 switch (v_opcode) {
1462 case VIRTCHNL_OP_VERSION:
1463 valid_len = sizeof(struct virtchnl_version_info);
1464 break;
1465 case VIRTCHNL_OP_RESET_VF:
1466 break;
1467 case VIRTCHNL_OP_GET_VF_RESOURCES:
1468 if (VF_IS_V11(ver))
1469 valid_len = sizeof(u32);
1470 break;
1471 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1472 valid_len = sizeof(struct virtchnl_txq_info);
1473 break;
1474 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1475 valid_len = sizeof(struct virtchnl_rxq_info);
1476 break;
1477 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1478 valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF;
1479 if (msglen >= valid_len) {
1480 struct virtchnl_vsi_queue_config_info *vqc =
1481 (struct virtchnl_vsi_queue_config_info *)msg;
1482 valid_len = virtchnl_struct_size(vqc, qpair,
1483 vqc->num_queue_pairs);
1484 if (vqc->num_queue_pairs == 0)
1485 err_msg_format = true;
1486 }
1487 break;
1488 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1489 valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF;
1490 if (msglen >= valid_len) {
1491 struct virtchnl_irq_map_info *vimi =
1492 (struct virtchnl_irq_map_info *)msg;
1493 valid_len = virtchnl_struct_size(vimi, vecmap,
1494 vimi->num_vectors);
1495 if (vimi->num_vectors == 0)
1496 err_msg_format = true;
1497 }
1498 break;
1499 case VIRTCHNL_OP_ENABLE_QUEUES:
1500 case VIRTCHNL_OP_DISABLE_QUEUES:
1501 valid_len = sizeof(struct virtchnl_queue_select);
1502 break;
1503 case VIRTCHNL_OP_ADD_ETH_ADDR:
1504 case VIRTCHNL_OP_DEL_ETH_ADDR:
1505 valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF;
1506 if (msglen >= valid_len) {
1507 struct virtchnl_ether_addr_list *veal =
1508 (struct virtchnl_ether_addr_list *)msg;
1509 valid_len = virtchnl_struct_size(veal, list,
1510 veal->num_elements);
1511 if (veal->num_elements == 0)
1512 err_msg_format = true;
1513 }
1514 break;
1515 case VIRTCHNL_OP_ADD_VLAN:
1516 case VIRTCHNL_OP_DEL_VLAN:
1517 valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF;
1518 if (msglen >= valid_len) {
1519 struct virtchnl_vlan_filter_list *vfl =
1520 (struct virtchnl_vlan_filter_list *)msg;
1521 valid_len = virtchnl_struct_size(vfl, vlan_id,
1522 vfl->num_elements);
1523 if (vfl->num_elements == 0)
1524 err_msg_format = true;
1525 }
1526 break;
1527 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1528 valid_len = sizeof(struct virtchnl_promisc_info);
1529 break;
1530 case VIRTCHNL_OP_GET_STATS:
1531 valid_len = sizeof(struct virtchnl_queue_select);
1532 break;
1533 case VIRTCHNL_OP_RDMA:
1534 /* These messages are opaque to us and will be validated in
1535 * the RDMA client code. We just need to check for nonzero
1536 * length. The firmware will enforce max length restrictions.
1537 */
1538 if (msglen)
1539 valid_len = msglen;
1540 else
1541 err_msg_format = true;
1542 break;
1543 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
1544 break;
1545 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
1546 valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF;
1547 if (msglen >= valid_len) {
1548 struct virtchnl_rdma_qvlist_info *qv =
1549 (struct virtchnl_rdma_qvlist_info *)msg;
1550
1551 valid_len = virtchnl_struct_size(qv, qv_info,
1552 qv->num_vectors);
1553 }
1554 break;
1555 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1556 valid_len = virtchnl_rss_key_LEGACY_SIZEOF;
1557 if (msglen >= valid_len) {
1558 struct virtchnl_rss_key *vrk =
1559 (struct virtchnl_rss_key *)msg;
1560 valid_len = virtchnl_struct_size(vrk, key,
1561 vrk->key_len);
1562 }
1563 break;
1564 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1565 valid_len = virtchnl_rss_lut_LEGACY_SIZEOF;
1566 if (msglen >= valid_len) {
1567 struct virtchnl_rss_lut *vrl =
1568 (struct virtchnl_rss_lut *)msg;
1569 valid_len = virtchnl_struct_size(vrl, lut,
1570 vrl->lut_entries);
1571 }
1572 break;
1573 case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
1574 valid_len = sizeof(struct virtchnl_rss_hfunc);
1575 break;
1576 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
1577 break;
1578 case VIRTCHNL_OP_SET_RSS_HENA:
1579 valid_len = sizeof(struct virtchnl_rss_hena);
1580 break;
1581 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1582 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1583 break;
1584 case VIRTCHNL_OP_REQUEST_QUEUES:
1585 valid_len = sizeof(struct virtchnl_vf_res_request);
1586 break;
1587 case VIRTCHNL_OP_ENABLE_CHANNELS:
1588 valid_len = virtchnl_tc_info_LEGACY_SIZEOF;
1589 if (msglen >= valid_len) {
1590 struct virtchnl_tc_info *vti =
1591 (struct virtchnl_tc_info *)msg;
1592 valid_len = virtchnl_struct_size(vti, list,
1593 vti->num_tc);
1594 if (vti->num_tc == 0)
1595 err_msg_format = true;
1596 }
1597 break;
1598 case VIRTCHNL_OP_DISABLE_CHANNELS:
1599 break;
1600 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
1601 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
1602 valid_len = sizeof(struct virtchnl_filter);
1603 break;
1604 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
1605 break;
1606 case VIRTCHNL_OP_ADD_RSS_CFG:
1607 case VIRTCHNL_OP_DEL_RSS_CFG:
1608 valid_len = sizeof(struct virtchnl_rss_cfg);
1609 break;
1610 case VIRTCHNL_OP_ADD_FDIR_FILTER:
1611 valid_len = sizeof(struct virtchnl_fdir_add);
1612 break;
1613 case VIRTCHNL_OP_DEL_FDIR_FILTER:
1614 valid_len = sizeof(struct virtchnl_fdir_del);
1615 break;
1616 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
1617 break;
1618 case VIRTCHNL_OP_ADD_VLAN_V2:
1619 case VIRTCHNL_OP_DEL_VLAN_V2:
1620 valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF;
1621 if (msglen >= valid_len) {
1622 struct virtchnl_vlan_filter_list_v2 *vfl =
1623 (struct virtchnl_vlan_filter_list_v2 *)msg;
1624
1625 valid_len = virtchnl_struct_size(vfl, filters,
1626 vfl->num_elements);
1627
1628 if (vfl->num_elements == 0) {
1629 err_msg_format = true;
1630 break;
1631 }
1632 }
1633 break;
1634 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1635 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1636 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1637 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1638 valid_len = sizeof(struct virtchnl_vlan_setting);
1639 break;
1640 /* These are always errors coming from the VF. */
1641 case VIRTCHNL_OP_EVENT:
1642 case VIRTCHNL_OP_UNKNOWN:
1643 default:
1644 return VIRTCHNL_STATUS_ERR_PARAM;
1645 }
1646 /* few more checks */
1647 if (err_msg_format || valid_len != msglen)
1648 return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
1649
1650 return 0;
1651 }
1652 #endif /* _VIRTCHNL_H_ */
1653