1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2013-2022, Intel Corporation. */
3
4 #ifndef _VIRTCHNL_H_
5 #define _VIRTCHNL_H_
6
7 #include <linux/bitops.h>
8 #include <linux/bits.h>
9 #include <linux/overflow.h>
10 #include <uapi/linux/if_ether.h>
11
12 /* Description:
13 * This header file describes the Virtual Function (VF) - Physical Function
14 * (PF) communication protocol used by the drivers for all devices starting
15 * from our 40G product line
16 *
17 * Admin queue buffer usage:
18 * desc->opcode is always aqc_opc_send_msg_to_pf
19 * flags, retval, datalen, and data addr are all used normally.
20 * The Firmware copies the cookie fields when sending messages between the
21 * PF and VF, but uses all other fields internally. Due to this limitation,
22 * we must send all messages as "indirect", i.e. using an external buffer.
23 *
24 * All the VSI indexes are relative to the VF. Each VF can have maximum of
25 * three VSIs. All the queue indexes are relative to the VSI. Each VF can
26 * have a maximum of sixteen queues for all of its VSIs.
27 *
28 * The PF is required to return a status code in v_retval for all messages
29 * except RESET_VF, which does not require any response. The returned value
30 * is of virtchnl_status_code type, defined here.
31 *
32 * In general, VF driver initialization should roughly follow the order of
33 * these opcodes. The VF driver must first validate the API version of the
34 * PF driver, then request a reset, then get resources, then configure
35 * queues and interrupts. After these operations are complete, the VF
36 * driver may start its queues, optionally add MAC and VLAN filters, and
37 * process traffic.
38 */
39
40 /* START GENERIC DEFINES
41 * Need to ensure the following enums and defines hold the same meaning and
42 * value in current and future projects
43 */
44
45 /* Error Codes */
46 enum virtchnl_status_code {
47 VIRTCHNL_STATUS_SUCCESS = 0,
48 VIRTCHNL_STATUS_ERR_PARAM = -5,
49 VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
50 VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
51 VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
52 VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
53 VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
54 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
55 };
56
57 /* Backward compatibility */
58 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
59 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
60
61 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
62 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
63 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
64 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
65 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
66 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
67 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
68 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
69
70 enum virtchnl_link_speed {
71 VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
72 VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
73 VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
74 VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
75 VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
76 VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
77 VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
78 VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
79 VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
80 };
81
82 /* for hsplit_0 field of Rx HMC context */
83 /* deprecated with AVF 1.0 */
84 enum virtchnl_rx_hsplit {
85 VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
86 VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
87 VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
88 VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
89 VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
90 };
91
92 enum virtchnl_bw_limit_type {
93 VIRTCHNL_BW_SHAPER = 0,
94 };
95 /* END GENERIC DEFINES */
96
97 /* Opcodes for VF-PF communication. These are placed in the v_opcode field
98 * of the virtchnl_msg structure.
99 */
100 enum virtchnl_ops {
101 /* The PF sends status change events to VFs using
102 * the VIRTCHNL_OP_EVENT opcode.
103 * VFs send requests to the PF using the other ops.
104 * Use of "advanced opcode" features must be negotiated as part of capabilities
105 * exchange and are not considered part of base mode feature set.
106 */
107 VIRTCHNL_OP_UNKNOWN = 0,
108 VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
109 VIRTCHNL_OP_RESET_VF = 2,
110 VIRTCHNL_OP_GET_VF_RESOURCES = 3,
111 VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
112 VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
113 VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
114 VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
115 VIRTCHNL_OP_ENABLE_QUEUES = 8,
116 VIRTCHNL_OP_DISABLE_QUEUES = 9,
117 VIRTCHNL_OP_ADD_ETH_ADDR = 10,
118 VIRTCHNL_OP_DEL_ETH_ADDR = 11,
119 VIRTCHNL_OP_ADD_VLAN = 12,
120 VIRTCHNL_OP_DEL_VLAN = 13,
121 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
122 VIRTCHNL_OP_GET_STATS = 15,
123 VIRTCHNL_OP_RSVD = 16,
124 VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
125 VIRTCHNL_OP_CONFIG_RSS_HFUNC = 18,
126 /* opcode 19 is reserved */
127 VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
128 VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
129 VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
130 VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
131 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
132 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
133 VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
134 VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
135 VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
136 VIRTCHNL_OP_SET_RSS_HENA = 26,
137 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
138 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
139 VIRTCHNL_OP_REQUEST_QUEUES = 29,
140 VIRTCHNL_OP_ENABLE_CHANNELS = 30,
141 VIRTCHNL_OP_DISABLE_CHANNELS = 31,
142 VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
143 VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
144 /* opcode 34 - 43 are reserved */
145 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
146 VIRTCHNL_OP_ADD_RSS_CFG = 45,
147 VIRTCHNL_OP_DEL_RSS_CFG = 46,
148 VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
149 VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
150 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
151 VIRTCHNL_OP_ADD_VLAN_V2 = 52,
152 VIRTCHNL_OP_DEL_VLAN_V2 = 53,
153 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
154 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
155 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
156 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
157 /* opcode 57 - 65 are reserved */
158 VIRTCHNL_OP_GET_QOS_CAPS = 66,
159 /* opcode 68 through 111 are reserved */
160 VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
161 VIRTCHNL_OP_CONFIG_QUANTA = 113,
162 VIRTCHNL_OP_MAX,
163 };
164
165 /* These macros are used to generate compilation errors if a structure/union
166 * is not exactly the correct length. It gives a divide by zero error if the
167 * structure/union is not of the correct size, otherwise it creates an enum
168 * that is never used.
169 */
170 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
171 { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
172 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
173 { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
174
175 /* Message descriptions and data structures. */
176
177 /* VIRTCHNL_OP_VERSION
178 * VF posts its version number to the PF. PF responds with its version number
179 * in the same format, along with a return code.
180 * Reply from PF has its major/minor versions also in param0 and param1.
181 * If there is a major version mismatch, then the VF cannot operate.
182 * If there is a minor version mismatch, then the VF can operate but should
183 * add a warning to the system log.
184 *
185 * This enum element MUST always be specified as == 1, regardless of other
186 * changes in the API. The PF must always respond to this message without
187 * error regardless of version mismatch.
188 */
189 #define VIRTCHNL_VERSION_MAJOR 1
190 #define VIRTCHNL_VERSION_MINOR 1
191 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
192
193 struct virtchnl_version_info {
194 u32 major;
195 u32 minor;
196 };
197
198 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
199
200 #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
201 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
202
203 /* VIRTCHNL_OP_RESET_VF
204 * VF sends this request to PF with no parameters
205 * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
206 * until reset completion is indicated. The admin queue must be reinitialized
207 * after this operation.
208 *
209 * When reset is complete, PF must ensure that all queues in all VSIs associated
210 * with the VF are stopped, all queue configurations in the HMC are set to 0,
211 * and all MAC and VLAN filters (except the default MAC address) on all VSIs
212 * are cleared.
213 */
214
215 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
216 * vsi_type should always be 6 for backward compatibility. Add other fields
217 * as needed.
218 */
219 enum virtchnl_vsi_type {
220 VIRTCHNL_VSI_TYPE_INVALID = 0,
221 VIRTCHNL_VSI_SRIOV = 6,
222 };
223
224 /* VIRTCHNL_OP_GET_VF_RESOURCES
225 * Version 1.0 VF sends this request to PF with no parameters
226 * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
227 * PF responds with an indirect message containing
228 * virtchnl_vf_resource and one or more
229 * virtchnl_vsi_resource structures.
230 */
231
232 struct virtchnl_vsi_resource {
233 u16 vsi_id;
234 u16 num_queue_pairs;
235
236 /* see enum virtchnl_vsi_type */
237 s32 vsi_type;
238 u16 qset_handle;
239 u8 default_mac_addr[ETH_ALEN];
240 };
241
242 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
243
244 /* VF capability flags
245 * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
246 * TX/RX Checksum offloading and TSO for non-tunnelled packets.
247 */
248 #define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
249 #define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1)
250 #define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA
251 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
252 #define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
253 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
254 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
255 /* used to negotiate communicating link speeds in Mbps */
256 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
257 #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
258 #define VIRTCHNL_VF_OFFLOAD_TC_U32 BIT(11)
259 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
260 #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
261 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
262 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
263 #define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
264 #define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
265 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
266 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
267 #define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
268 #define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
269 #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26)
270 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
271 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
272 #define VIRTCHNL_VF_OFFLOAD_QOS BIT(29)
273
274 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
275 VIRTCHNL_VF_OFFLOAD_VLAN | \
276 VIRTCHNL_VF_OFFLOAD_RSS_PF)
277
278 struct virtchnl_vf_resource {
279 u16 num_vsis;
280 u16 num_queue_pairs;
281 u16 max_vectors;
282 u16 max_mtu;
283
284 u32 vf_cap_flags;
285 u32 rss_key_size;
286 u32 rss_lut_size;
287
288 struct virtchnl_vsi_resource vsi_res[];
289 };
290
291 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource);
292 #define virtchnl_vf_resource_LEGACY_SIZEOF 36
293
294 /* VIRTCHNL_OP_CONFIG_TX_QUEUE
295 * VF sends this message to set up parameters for one TX queue.
296 * External data buffer contains one instance of virtchnl_txq_info.
297 * PF configures requested queue and returns a status code.
298 */
299
300 /* Tx queue config info */
301 struct virtchnl_txq_info {
302 u16 vsi_id;
303 u16 queue_id;
304 u16 ring_len; /* number of descriptors, multiple of 8 */
305 u16 headwb_enabled; /* deprecated with AVF 1.0 */
306 u64 dma_ring_addr;
307 u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
308 };
309
310 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
311
312 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
313 * VF sends this message to set up parameters for one RX queue.
314 * External data buffer contains one instance of virtchnl_rxq_info.
315 * PF configures requested queue and returns a status code. The
316 * crc_disable flag disables CRC stripping on the VF. Setting
317 * the crc_disable flag to 1 will disable CRC stripping for each
318 * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
319 * offload must have been set prior to sending this info or the PF
320 * will ignore the request. This flag should be set the same for
321 * all of the queues for a VF.
322 */
323
324 /* Rx queue config info */
325 struct virtchnl_rxq_info {
326 u16 vsi_id;
327 u16 queue_id;
328 u32 ring_len; /* number of descriptors, multiple of 32 */
329 u16 hdr_size;
330 u16 splithdr_enabled; /* deprecated with AVF 1.0 */
331 u32 databuffer_size;
332 u32 max_pkt_size;
333 u8 crc_disable;
334 u8 rxdid;
335 u8 pad1[2];
336 u64 dma_ring_addr;
337
338 /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
339 s32 rx_split_pos;
340 u32 pad2;
341 };
342
343 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
344
345 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES
346 * VF sends this message to set parameters for all active TX and RX queues
347 * associated with the specified VSI.
348 * PF configures queues and returns status.
349 * If the number of queues specified is greater than the number of queues
350 * associated with the VSI, an error is returned and no queues are configured.
351 * NOTE: The VF is not required to configure all queues in a single request.
352 * It may send multiple messages. PF drivers must correctly handle all VF
353 * requests.
354 */
355 struct virtchnl_queue_pair_info {
356 /* NOTE: vsi_id and queue_id should be identical for both queues. */
357 struct virtchnl_txq_info txq;
358 struct virtchnl_rxq_info rxq;
359 };
360
361 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
362
363 struct virtchnl_vsi_queue_config_info {
364 u16 vsi_id;
365 u16 num_queue_pairs;
366 u32 pad;
367 struct virtchnl_queue_pair_info qpair[];
368 };
369
370 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info);
371 #define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF 72
372
373 /* VIRTCHNL_OP_REQUEST_QUEUES
374 * VF sends this message to request the PF to allocate additional queues to
375 * this VF. Each VF gets a guaranteed number of queues on init but asking for
376 * additional queues must be negotiated. This is a best effort request as it
377 * is possible the PF does not have enough queues left to support the request.
378 * If the PF cannot support the number requested it will respond with the
379 * maximum number it is able to support. If the request is successful, PF will
380 * then reset the VF to institute required changes.
381 */
382
383 /* VF resource request */
384 struct virtchnl_vf_res_request {
385 u16 num_queue_pairs;
386 };
387
388 /* VIRTCHNL_OP_CONFIG_IRQ_MAP
389 * VF uses this message to map vectors to queues.
390 * The rxq_map and txq_map fields are bitmaps used to indicate which queues
391 * are to be associated with the specified vector.
392 * The "other" causes are always mapped to vector 0. The VF may not request
393 * that vector 0 be used for traffic.
394 * PF configures interrupt mapping and returns status.
395 * NOTE: due to hardware requirements, all active queues (both TX and RX)
396 * should be mapped to interrupts, even if the driver intends to operate
397 * only in polling mode. In this case the interrupt may be disabled, but
398 * the ITR timer will still run to trigger writebacks.
399 */
400 struct virtchnl_vector_map {
401 u16 vsi_id;
402 u16 vector_id;
403 u16 rxq_map;
404 u16 txq_map;
405 u16 rxitr_idx;
406 u16 txitr_idx;
407 };
408
409 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
410
411 struct virtchnl_irq_map_info {
412 u16 num_vectors;
413 struct virtchnl_vector_map vecmap[];
414 };
415
416 VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info);
417 #define virtchnl_irq_map_info_LEGACY_SIZEOF 14
418
419 /* VIRTCHNL_OP_ENABLE_QUEUES
420 * VIRTCHNL_OP_DISABLE_QUEUES
421 * VF sends these message to enable or disable TX/RX queue pairs.
422 * The queues fields are bitmaps indicating which queues to act upon.
423 * (Currently, we only support 16 queues per VF, but we make the field
424 * u32 to allow for expansion.)
425 * PF performs requested action and returns status.
426 * NOTE: The VF is not required to enable/disable all queues in a single
427 * request. It may send multiple messages.
428 * PF drivers must correctly handle all VF requests.
429 */
430 struct virtchnl_queue_select {
431 u16 vsi_id;
432 u16 pad;
433 u32 rx_queues;
434 u32 tx_queues;
435 };
436
437 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
438
439 /* VIRTCHNL_OP_ADD_ETH_ADDR
440 * VF sends this message in order to add one or more unicast or multicast
441 * address filters for the specified VSI.
442 * PF adds the filters and returns status.
443 */
444
445 /* VIRTCHNL_OP_DEL_ETH_ADDR
446 * VF sends this message in order to remove one or more unicast or multicast
447 * filters for the specified VSI.
448 * PF removes the filters and returns status.
449 */
450
451 /* VIRTCHNL_ETHER_ADDR_LEGACY
452 * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
453 * bytes. Moving forward all VF drivers should not set type to
454 * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
455 * behavior. The control plane function (i.e. PF) can use a best effort method
456 * of tracking the primary/device unicast in this case, but there is no
457 * guarantee and functionality depends on the implementation of the PF.
458 */
459
460 /* VIRTCHNL_ETHER_ADDR_PRIMARY
461 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
462 * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
463 * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
464 * function (i.e. PF) to accurately track and use this MAC address for
465 * displaying on the host and for VM/function reset.
466 */
467
468 /* VIRTCHNL_ETHER_ADDR_EXTRA
469 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
470 * unicast and/or multicast filters that are being added/deleted via
471 * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
472 */
473 struct virtchnl_ether_addr {
474 u8 addr[ETH_ALEN];
475 u8 type;
476 #define VIRTCHNL_ETHER_ADDR_LEGACY 0
477 #define VIRTCHNL_ETHER_ADDR_PRIMARY 1
478 #define VIRTCHNL_ETHER_ADDR_EXTRA 2
479 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
480 u8 pad;
481 };
482
483 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
484
485 struct virtchnl_ether_addr_list {
486 u16 vsi_id;
487 u16 num_elements;
488 struct virtchnl_ether_addr list[];
489 };
490
491 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list);
492 #define virtchnl_ether_addr_list_LEGACY_SIZEOF 12
493
494 /* VIRTCHNL_OP_ADD_VLAN
495 * VF sends this message to add one or more VLAN tag filters for receives.
496 * PF adds the filters and returns status.
497 * If a port VLAN is configured by the PF, this operation will return an
498 * error to the VF.
499 */
500
501 /* VIRTCHNL_OP_DEL_VLAN
502 * VF sends this message to remove one or more VLAN tag filters for receives.
503 * PF removes the filters and returns status.
504 * If a port VLAN is configured by the PF, this operation will return an
505 * error to the VF.
506 */
507
508 struct virtchnl_vlan_filter_list {
509 u16 vsi_id;
510 u16 num_elements;
511 u16 vlan_id[];
512 };
513
514 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list);
515 #define virtchnl_vlan_filter_list_LEGACY_SIZEOF 6
516
517 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
518 * structures and opcodes.
519 *
520 * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
521 * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
522 *
523 * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
524 * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
525 * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
526 *
527 * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
528 * by the PF concurrently. For example, if the PF can support
529 * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
530 * would OR the following bits:
531 *
532 * VIRTHCNL_VLAN_ETHERTYPE_8100 |
533 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
534 * VIRTCHNL_VLAN_ETHERTYPE_AND;
535 *
536 * The VF would interpret this as VLAN filtering can be supported on both 0x8100
537 * and 0x88A8 VLAN ethertypes.
538 *
539 * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
540 * by the PF concurrently. For example if the PF can support
541 * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
542 * offload it would OR the following bits:
543 *
544 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
545 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
546 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
547 *
548 * The VF would interpret this as VLAN stripping can be supported on either
549 * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
550 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
551 * the previously set value.
552 *
553 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
554 * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
555 *
556 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
557 * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
558 *
559 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
560 * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
561 *
562 * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
563 * VLAN filtering if the underlying PF supports it.
564 *
565 * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
566 * certain VLAN capability can be toggled. For example if the underlying PF/CP
567 * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
568 * set this bit along with the supported ethertypes.
569 */
570 enum virtchnl_vlan_support {
571 VIRTCHNL_VLAN_UNSUPPORTED = 0,
572 VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0),
573 VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1),
574 VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2),
575 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8),
576 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9),
577 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10),
578 VIRTCHNL_VLAN_PRIO = BIT(24),
579 VIRTCHNL_VLAN_FILTER_MASK = BIT(28),
580 VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29),
581 VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30),
582 VIRTCHNL_VLAN_TOGGLE = BIT(31),
583 };
584
585 /* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
586 * for filtering, insertion, and stripping capabilities.
587 *
588 * If only outer capabilities are supported (for filtering, insertion, and/or
589 * stripping) then this refers to the outer most or single VLAN from the VF's
590 * perspective.
591 *
592 * If only inner capabilities are supported (for filtering, insertion, and/or
593 * stripping) then this refers to the outer most or single VLAN from the VF's
594 * perspective. Functionally this is the same as if only outer capabilities are
595 * supported. The VF driver is just forced to use the inner fields when
596 * adding/deleting filters and enabling/disabling offloads (if supported).
597 *
598 * If both outer and inner capabilities are supported (for filtering, insertion,
599 * and/or stripping) then outer refers to the outer most or single VLAN and
600 * inner refers to the second VLAN, if it exists, in the packet.
601 *
602 * There is no support for tunneled VLAN offloads, so outer or inner are never
603 * referring to a tunneled packet from the VF's perspective.
604 */
605 struct virtchnl_vlan_supported_caps {
606 u32 outer;
607 u32 inner;
608 };
609
610 /* The PF populates these fields based on the supported VLAN filtering. If a
611 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
612 * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
613 * the unsupported fields.
614 *
615 * Also, a VF is only allowed to toggle its VLAN filtering setting if the
616 * VIRTCHNL_VLAN_TOGGLE bit is set.
617 *
618 * The ethertype(s) specified in the ethertype_init field are the ethertypes
619 * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
620 * most VLAN from the VF's perspective. If both inner and outer filtering are
621 * allowed then ethertype_init only refers to the outer most VLAN as only
622 * VLAN ethertype supported for inner VLAN filtering is
623 * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
624 * when both inner and outer filtering are allowed.
625 *
626 * The max_filters field tells the VF how many VLAN filters it's allowed to have
627 * at any one time. If it exceeds this amount and tries to add another filter,
628 * then the request will be rejected by the PF. To prevent failures, the VF
629 * should keep track of how many VLAN filters it has added and not attempt to
630 * add more than max_filters.
631 */
632 struct virtchnl_vlan_filtering_caps {
633 struct virtchnl_vlan_supported_caps filtering_support;
634 u32 ethertype_init;
635 u16 max_filters;
636 u8 pad[2];
637 };
638
639 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
640
641 /* This enum is used for the virtchnl_vlan_offload_caps structure to specify
642 * if the PF supports a different ethertype for stripping and insertion.
643 *
644 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
645 * for stripping affect the ethertype(s) specified for insertion and visa versa
646 * as well. If the VF tries to configure VLAN stripping via
647 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
648 * that will be the ethertype for both stripping and insertion.
649 *
650 * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
651 * stripping do not affect the ethertype(s) specified for insertion and visa
652 * versa.
653 */
654 enum virtchnl_vlan_ethertype_match {
655 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
656 VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
657 };
658
659 /* The PF populates these fields based on the supported VLAN offloads. If a
660 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
661 * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
662 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
663 *
664 * Also, a VF is only allowed to toggle its VLAN offload setting if the
665 * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
666 *
667 * The VF driver needs to be aware of how the tags are stripped by hardware and
668 * inserted by the VF driver based on the level of offload support. The PF will
669 * populate these fields based on where the VLAN tags are expected to be
670 * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
671 * interpret these fields. See the definition of the
672 * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
673 * enumeration.
674 */
675 struct virtchnl_vlan_offload_caps {
676 struct virtchnl_vlan_supported_caps stripping_support;
677 struct virtchnl_vlan_supported_caps insertion_support;
678 u32 ethertype_init;
679 u8 ethertype_match;
680 u8 pad[3];
681 };
682
683 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
684
685 /* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
686 * VF sends this message to determine its VLAN capabilities.
687 *
688 * PF will mark which capabilities it supports based on hardware support and
689 * current configuration. For example, if a port VLAN is configured the PF will
690 * not allow outer VLAN filtering, stripping, or insertion to be configured so
691 * it will block these features from the VF.
692 *
693 * The VF will need to cross reference its capabilities with the PFs
694 * capabilities in the response message from the PF to determine the VLAN
695 * support.
696 */
697 struct virtchnl_vlan_caps {
698 struct virtchnl_vlan_filtering_caps filtering;
699 struct virtchnl_vlan_offload_caps offloads;
700 };
701
702 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
703
704 struct virtchnl_vlan {
705 u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */
706 u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
707 * filtering caps
708 */
709 u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in
710 * filtering caps. Note that tpid here does not refer to
711 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
712 * actual 2-byte VLAN TPID
713 */
714 u8 pad[2];
715 };
716
717 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
718
719 struct virtchnl_vlan_filter {
720 struct virtchnl_vlan inner;
721 struct virtchnl_vlan outer;
722 u8 pad[16];
723 };
724
725 VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
726
727 /* VIRTCHNL_OP_ADD_VLAN_V2
728 * VIRTCHNL_OP_DEL_VLAN_V2
729 *
730 * VF sends these messages to add/del one or more VLAN tag filters for Rx
731 * traffic.
732 *
733 * The PF attempts to add the filters and returns status.
734 *
735 * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
736 * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
737 */
738 struct virtchnl_vlan_filter_list_v2 {
739 u16 vport_id;
740 u16 num_elements;
741 u8 pad[4];
742 struct virtchnl_vlan_filter filters[];
743 };
744
745 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2);
746 #define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF 40
747
748 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
749 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
750 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
751 * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
752 *
753 * VF sends this message to enable or disable VLAN stripping or insertion. It
754 * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
755 * allowed and whether or not it's allowed to enable/disable the specific
756 * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
757 * parse the virtchnl_vlan_caps.offloads fields to determine which offload
758 * messages are allowed.
759 *
760 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
761 * following manner the VF will be allowed to enable and/or disable 0x8100 inner
762 * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
763 * case means the outer most or single VLAN from the VF's perspective. This is
764 * because no outer offloads are supported. See the comments above the
765 * virtchnl_vlan_supported_caps structure for more details.
766 *
767 * virtchnl_vlan_caps.offloads.stripping_support.inner =
768 * VIRTCHNL_VLAN_TOGGLE |
769 * VIRTCHNL_VLAN_ETHERTYPE_8100;
770 *
771 * virtchnl_vlan_caps.offloads.insertion_support.inner =
772 * VIRTCHNL_VLAN_TOGGLE |
773 * VIRTCHNL_VLAN_ETHERTYPE_8100;
774 *
775 * In order to enable inner (again note that in this case inner is the outer
776 * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
777 * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
778 * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
779 *
780 * virtchnl_vlan_setting.inner_ethertype_setting =
781 * VIRTCHNL_VLAN_ETHERTYPE_8100;
782 *
783 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
784 * initialization.
785 *
786 * The reason that VLAN TPID(s) are not being used for the
787 * outer_ethertype_setting and inner_ethertype_setting fields is because it's
788 * possible a device could support VLAN insertion and/or stripping offload on
789 * multiple ethertypes concurrently, so this method allows a VF to request
790 * multiple ethertypes in one message using the virtchnl_vlan_support
791 * enumeration.
792 *
793 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
794 * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
795 * VLAN insertion and stripping simultaneously. The
796 * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
797 * populated based on what the PF can support.
798 *
799 * virtchnl_vlan_caps.offloads.stripping_support.outer =
800 * VIRTCHNL_VLAN_TOGGLE |
801 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
802 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
803 * VIRTCHNL_VLAN_ETHERTYPE_AND;
804 *
805 * virtchnl_vlan_caps.offloads.insertion_support.outer =
806 * VIRTCHNL_VLAN_TOGGLE |
807 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
808 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
809 * VIRTCHNL_VLAN_ETHERTYPE_AND;
810 *
811 * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
812 * would populate the virthcnl_vlan_offload_structure in the following manner
813 * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
814 *
815 * virtchnl_vlan_setting.outer_ethertype_setting =
816 * VIRTHCNL_VLAN_ETHERTYPE_8100 |
817 * VIRTHCNL_VLAN_ETHERTYPE_88A8;
818 *
819 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
820 * initialization.
821 *
822 * There is also the case where a PF and the underlying hardware can support
823 * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
824 * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
825 * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
826 * offloads. The ethertypes must match for stripping and insertion.
827 *
828 * virtchnl_vlan_caps.offloads.stripping_support.outer =
829 * VIRTCHNL_VLAN_TOGGLE |
830 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
831 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
832 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
833 *
834 * virtchnl_vlan_caps.offloads.insertion_support.outer =
835 * VIRTCHNL_VLAN_TOGGLE |
836 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
837 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
838 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
839 *
840 * virtchnl_vlan_caps.offloads.ethertype_match =
841 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
842 *
843 * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
844 * populate the virtchnl_vlan_setting structure in the following manner and send
845 * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
846 * ethertype for VLAN insertion if it's enabled. So, for completeness, a
847 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
848 *
849 * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
850 *
851 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
852 * initialization.
853 */
854 struct virtchnl_vlan_setting {
855 u32 outer_ethertype_setting;
856 u32 inner_ethertype_setting;
857 u16 vport_id;
858 u8 pad[6];
859 };
860
861 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
862
863 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
864 * VF sends VSI id and flags.
865 * PF returns status code in retval.
866 * Note: we assume that broadcast accept mode is always enabled.
867 */
868 struct virtchnl_promisc_info {
869 u16 vsi_id;
870 u16 flags;
871 };
872
873 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
874
875 #define FLAG_VF_UNICAST_PROMISC 0x00000001
876 #define FLAG_VF_MULTICAST_PROMISC 0x00000002
877
878 /* VIRTCHNL_OP_GET_STATS
879 * VF sends this message to request stats for the selected VSI. VF uses
880 * the virtchnl_queue_select struct to specify the VSI. The queue_id
881 * field is ignored by the PF.
882 *
883 * PF replies with struct eth_stats in an external buffer.
884 */
885
886 /* VIRTCHNL_OP_CONFIG_RSS_KEY
887 * VIRTCHNL_OP_CONFIG_RSS_LUT
888 * VF sends these messages to configure RSS. Only supported if both PF
889 * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
890 * configuration negotiation. If this is the case, then the RSS fields in
891 * the VF resource struct are valid.
892 * Both the key and LUT are initialized to 0 by the PF, meaning that
893 * RSS is effectively disabled until set up by the VF.
894 */
895 struct virtchnl_rss_key {
896 u16 vsi_id;
897 u16 key_len;
898 u8 key[]; /* RSS hash key, packed bytes */
899 };
900
901 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key);
902 #define virtchnl_rss_key_LEGACY_SIZEOF 6
903
904 struct virtchnl_rss_lut {
905 u16 vsi_id;
906 u16 lut_entries;
907 u8 lut[]; /* RSS lookup table */
908 };
909
910 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
911 #define virtchnl_rss_lut_LEGACY_SIZEOF 6
912
913 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS
914 * VIRTCHNL_OP_SET_RSS_HENA
915 * VF sends these messages to get and set the hash filter enable bits for RSS.
916 * By default, the PF sets these to all possible traffic types that the
917 * hardware supports. The VF can query this value if it wants to change the
918 * traffic types that are hashed by the hardware.
919 */
920 struct virtchnl_rss_hena {
921 u64 hena;
922 };
923
924 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
925
926 /* Type of RSS algorithm */
927 enum virtchnl_rss_algorithm {
928 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
929 VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
930 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
931 VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
932 };
933
934 /* VIRTCHNL_OP_CONFIG_RSS_HFUNC
935 * VF sends this message to configure the RSS hash function. Only supported
936 * if both PF and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
937 * configuration negotiation.
938 * The hash function is initialized to VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC
939 * by the PF.
940 */
941 struct virtchnl_rss_hfunc {
942 u16 vsi_id;
943 u16 rss_algorithm; /* enum virtchnl_rss_algorithm */
944 u32 reserved;
945 };
946
947 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hfunc);
948
949 /* VIRTCHNL_OP_ENABLE_CHANNELS
950 * VIRTCHNL_OP_DISABLE_CHANNELS
951 * VF sends these messages to enable or disable channels based on
952 * the user specified queue count and queue offset for each traffic class.
953 * This struct encompasses all the information that the PF needs from
954 * VF to create a channel.
955 */
956 struct virtchnl_channel_info {
957 u16 count; /* number of queues in a channel */
958 u16 offset; /* queues in a channel start from 'offset' */
959 u32 pad;
960 u64 max_tx_rate;
961 };
962
963 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
964
965 struct virtchnl_tc_info {
966 u32 num_tc;
967 u32 pad;
968 struct virtchnl_channel_info list[];
969 };
970
971 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info);
972 #define virtchnl_tc_info_LEGACY_SIZEOF 24
973
974 /* VIRTCHNL_ADD_CLOUD_FILTER
975 * VIRTCHNL_DEL_CLOUD_FILTER
976 * VF sends these messages to add or delete a cloud filter based on the
977 * user specified match and action filters. These structures encompass
978 * all the information that the PF needs from the VF to add/delete a
979 * cloud filter.
980 */
981
982 struct virtchnl_l4_spec {
983 u8 src_mac[ETH_ALEN];
984 u8 dst_mac[ETH_ALEN];
985 __be16 vlan_id;
986 __be16 pad; /* reserved for future use */
987 __be32 src_ip[4];
988 __be32 dst_ip[4];
989 __be16 src_port;
990 __be16 dst_port;
991 };
992
993 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
994
995 union virtchnl_flow_spec {
996 struct virtchnl_l4_spec tcp_spec;
997 u8 buffer[128]; /* reserved for future use */
998 };
999
1000 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
1001
1002 enum virtchnl_action {
1003 /* action types */
1004 VIRTCHNL_ACTION_DROP = 0,
1005 VIRTCHNL_ACTION_TC_REDIRECT,
1006 VIRTCHNL_ACTION_PASSTHRU,
1007 VIRTCHNL_ACTION_QUEUE,
1008 VIRTCHNL_ACTION_Q_REGION,
1009 VIRTCHNL_ACTION_MARK,
1010 VIRTCHNL_ACTION_COUNT,
1011 };
1012
1013 enum virtchnl_flow_type {
1014 /* flow types */
1015 VIRTCHNL_TCP_V4_FLOW = 0,
1016 VIRTCHNL_TCP_V6_FLOW,
1017 };
1018
1019 struct virtchnl_filter {
1020 union virtchnl_flow_spec data;
1021 union virtchnl_flow_spec mask;
1022
1023 /* see enum virtchnl_flow_type */
1024 s32 flow_type;
1025
1026 /* see enum virtchnl_action */
1027 s32 action;
1028 u32 action_meta;
1029 u8 field_flags;
1030 u8 pad[3];
1031 };
1032
1033 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
1034
1035 struct virtchnl_supported_rxdids {
1036 u64 supported_rxdids;
1037 };
1038
1039 /* VIRTCHNL_OP_EVENT
1040 * PF sends this message to inform the VF driver of events that may affect it.
1041 * No direct response is expected from the VF, though it may generate other
1042 * messages in response to this one.
1043 */
1044 enum virtchnl_event_codes {
1045 VIRTCHNL_EVENT_UNKNOWN = 0,
1046 VIRTCHNL_EVENT_LINK_CHANGE,
1047 VIRTCHNL_EVENT_RESET_IMPENDING,
1048 VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
1049 };
1050
1051 #define PF_EVENT_SEVERITY_INFO 0
1052 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
1053
1054 struct virtchnl_pf_event {
1055 /* see enum virtchnl_event_codes */
1056 s32 event;
1057 union {
1058 /* If the PF driver does not support the new speed reporting
1059 * capabilities then use link_event else use link_event_adv to
1060 * get the speed and link information. The ability to understand
1061 * new speeds is indicated by setting the capability flag
1062 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
1063 * in virtchnl_vf_resource struct and can be used to determine
1064 * which link event struct to use below.
1065 */
1066 struct {
1067 enum virtchnl_link_speed link_speed;
1068 bool link_status;
1069 u8 pad[3];
1070 } link_event;
1071 struct {
1072 /* link_speed provided in Mbps */
1073 u32 link_speed;
1074 u8 link_status;
1075 u8 pad[3];
1076 } link_event_adv;
1077 } event_data;
1078
1079 s32 severity;
1080 };
1081
1082 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
1083
1084 /* used to specify if a ceq_idx or aeq_idx is invalid */
1085 #define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF
1086 /* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
1087 * VF uses this message to request PF to map RDMA vectors to RDMA queues.
1088 * The request for this originates from the VF RDMA driver through
1089 * a client interface between VF LAN and VF RDMA driver.
1090 * A vector could have an AEQ and CEQ attached to it although
1091 * there is a single AEQ per VF RDMA instance in which case
1092 * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
1093 * idx for ceqs There will never be a case where there will be multiple CEQs
1094 * attached to a single vector.
1095 * PF configures interrupt mapping and returns status.
1096 */
1097
1098 struct virtchnl_rdma_qv_info {
1099 u32 v_idx; /* msix_vector */
1100 u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1101 u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1102 u8 itr_idx;
1103 u8 pad[3];
1104 };
1105
1106 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
1107
1108 struct virtchnl_rdma_qvlist_info {
1109 u32 num_vectors;
1110 struct virtchnl_rdma_qv_info qv_info[];
1111 };
1112
1113 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info);
1114 #define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF 16
1115
1116 /* VF reset states - these are written into the RSTAT register:
1117 * VFGEN_RSTAT on the VF
1118 * When the PF initiates a reset, it writes 0
1119 * When the reset is complete, it writes 1
1120 * When the PF detects that the VF has recovered, it writes 2
1121 * VF checks this register periodically to determine if a reset has occurred,
1122 * then polls it to know when the reset is complete.
1123 * If either the PF or VF reads the register while the hardware
1124 * is in a reset state, it will return DEADBEEF, which, when masked
1125 * will result in 3.
1126 */
1127 enum virtchnl_vfr_states {
1128 VIRTCHNL_VFR_INPROGRESS = 0,
1129 VIRTCHNL_VFR_COMPLETED,
1130 VIRTCHNL_VFR_VFACTIVE,
1131 };
1132
1133 #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
1134 #define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
1135 #define PROTO_HDR_SHIFT 5
1136 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
1137 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
1138
1139 /* VF use these macros to configure each protocol header.
1140 * Specify which protocol headers and protocol header fields base on
1141 * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
1142 * @param hdr: a struct of virtchnl_proto_hdr
1143 * @param hdr_type: ETH/IPV4/TCP, etc
1144 * @param field: SRC/DST/TEID/SPI, etc
1145 */
1146 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
1147 ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
1148 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
1149 ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
1150 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
1151 ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
1152 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
1153
1154 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1155 (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
1156 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1157 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1158 (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
1159 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1160
1161 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
1162 ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
1163 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
1164 (((hdr)->type) >> PROTO_HDR_SHIFT)
1165 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
1166 ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
1167 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
1168 (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
1169 VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
1170
1171 /* Protocol header type within a packet segment. A segment consists of one or
1172 * more protocol headers that make up a logical group of protocol headers. Each
1173 * logical group of protocol headers encapsulates or is encapsulated using/by
1174 * tunneling or encapsulation protocols for network virtualization.
1175 */
1176 enum virtchnl_proto_hdr_type {
1177 VIRTCHNL_PROTO_HDR_NONE,
1178 VIRTCHNL_PROTO_HDR_ETH,
1179 VIRTCHNL_PROTO_HDR_S_VLAN,
1180 VIRTCHNL_PROTO_HDR_C_VLAN,
1181 VIRTCHNL_PROTO_HDR_IPV4,
1182 VIRTCHNL_PROTO_HDR_IPV6,
1183 VIRTCHNL_PROTO_HDR_TCP,
1184 VIRTCHNL_PROTO_HDR_UDP,
1185 VIRTCHNL_PROTO_HDR_SCTP,
1186 VIRTCHNL_PROTO_HDR_GTPU_IP,
1187 VIRTCHNL_PROTO_HDR_GTPU_EH,
1188 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
1189 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
1190 VIRTCHNL_PROTO_HDR_PPPOE,
1191 VIRTCHNL_PROTO_HDR_L2TPV3,
1192 VIRTCHNL_PROTO_HDR_ESP,
1193 VIRTCHNL_PROTO_HDR_AH,
1194 VIRTCHNL_PROTO_HDR_PFCP,
1195 };
1196
1197 /* Protocol header field within a protocol header. */
1198 enum virtchnl_proto_hdr_field {
1199 /* ETHER */
1200 VIRTCHNL_PROTO_HDR_ETH_SRC =
1201 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
1202 VIRTCHNL_PROTO_HDR_ETH_DST,
1203 VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
1204 /* S-VLAN */
1205 VIRTCHNL_PROTO_HDR_S_VLAN_ID =
1206 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
1207 /* C-VLAN */
1208 VIRTCHNL_PROTO_HDR_C_VLAN_ID =
1209 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
1210 /* IPV4 */
1211 VIRTCHNL_PROTO_HDR_IPV4_SRC =
1212 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
1213 VIRTCHNL_PROTO_HDR_IPV4_DST,
1214 VIRTCHNL_PROTO_HDR_IPV4_DSCP,
1215 VIRTCHNL_PROTO_HDR_IPV4_TTL,
1216 VIRTCHNL_PROTO_HDR_IPV4_PROT,
1217 /* IPV6 */
1218 VIRTCHNL_PROTO_HDR_IPV6_SRC =
1219 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
1220 VIRTCHNL_PROTO_HDR_IPV6_DST,
1221 VIRTCHNL_PROTO_HDR_IPV6_TC,
1222 VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
1223 VIRTCHNL_PROTO_HDR_IPV6_PROT,
1224 /* TCP */
1225 VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
1226 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
1227 VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
1228 /* UDP */
1229 VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
1230 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
1231 VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
1232 /* SCTP */
1233 VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
1234 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
1235 VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
1236 /* GTPU_IP */
1237 VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
1238 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
1239 /* GTPU_EH */
1240 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
1241 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
1242 VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
1243 /* PPPOE */
1244 VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
1245 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
1246 /* L2TPV3 */
1247 VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
1248 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
1249 /* ESP */
1250 VIRTCHNL_PROTO_HDR_ESP_SPI =
1251 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
1252 /* AH */
1253 VIRTCHNL_PROTO_HDR_AH_SPI =
1254 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
1255 /* PFCP */
1256 VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
1257 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
1258 VIRTCHNL_PROTO_HDR_PFCP_SEID,
1259 };
1260
1261 struct virtchnl_proto_hdr {
1262 /* see enum virtchnl_proto_hdr_type */
1263 s32 type;
1264 u32 field_selector; /* a bit mask to select field for header type */
1265 u8 buffer[64];
1266 /**
1267 * binary buffer in network order for specific header type.
1268 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
1269 * header is expected to be copied into the buffer.
1270 */
1271 };
1272
1273 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
1274
1275 struct virtchnl_proto_hdrs {
1276 u8 tunnel_level;
1277 u8 pad[3];
1278 /**
1279 * specify where protocol header start from.
1280 * must be 0 when sending a raw packet request.
1281 * 0 - from the outer layer
1282 * 1 - from the first inner layer
1283 * 2 - from the second inner layer
1284 * ....
1285 **/
1286 int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
1287 union {
1288 struct virtchnl_proto_hdr
1289 proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
1290 struct {
1291 u16 pkt_len;
1292 u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
1293 u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
1294 } raw;
1295 };
1296 };
1297
1298 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
1299
1300 struct virtchnl_rss_cfg {
1301 struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
1302
1303 /* see enum virtchnl_rss_algorithm; rss algorithm type */
1304 s32 rss_algorithm;
1305 u8 reserved[128]; /* reserve for future */
1306 };
1307
1308 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
1309
1310 /* action configuration for FDIR */
1311 struct virtchnl_filter_action {
1312 /* see enum virtchnl_action type */
1313 s32 type;
1314 union {
1315 /* used for queue and qgroup action */
1316 struct {
1317 u16 index;
1318 u8 region;
1319 } queue;
1320 /* used for count action */
1321 struct {
1322 /* share counter ID with other flow rules */
1323 u8 shared;
1324 u32 id; /* counter ID */
1325 } count;
1326 /* used for mark action */
1327 u32 mark_id;
1328 u8 reserve[32];
1329 } act_conf;
1330 };
1331
1332 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
1333
1334 #define VIRTCHNL_MAX_NUM_ACTIONS 8
1335
1336 struct virtchnl_filter_action_set {
1337 /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
1338 int count;
1339 struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
1340 };
1341
1342 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
1343
1344 /* pattern and action for FDIR rule */
1345 struct virtchnl_fdir_rule {
1346 struct virtchnl_proto_hdrs proto_hdrs;
1347 struct virtchnl_filter_action_set action_set;
1348 };
1349
1350 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
1351
1352 /* Status returned to VF after VF requests FDIR commands
1353 * VIRTCHNL_FDIR_SUCCESS
1354 * VF FDIR related request is successfully done by PF
1355 * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
1356 *
1357 * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
1358 * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
1359 *
1360 * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
1361 * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
1362 *
1363 * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
1364 * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
1365 *
1366 * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
1367 * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
1368 *
1369 * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
1370 * OP_ADD_FDIR_FILTER request is failed due to parameters validation
1371 * or HW doesn't support.
1372 *
1373 * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
1374 * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
1375 * for programming.
1376 *
1377 * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
1378 * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
1379 * for example, VF query counter of a rule who has no counter action.
1380 */
1381 enum virtchnl_fdir_prgm_status {
1382 VIRTCHNL_FDIR_SUCCESS = 0,
1383 VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
1384 VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
1385 VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
1386 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
1387 VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
1388 VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
1389 VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
1390 };
1391
1392 /* VIRTCHNL_OP_ADD_FDIR_FILTER
1393 * VF sends this request to PF by filling out vsi_id,
1394 * validate_only and rule_cfg. PF will return flow_id
1395 * if the request is successfully done and return add_status to VF.
1396 */
1397 struct virtchnl_fdir_add {
1398 u16 vsi_id; /* INPUT */
1399 /*
1400 * 1 for validating a fdir rule, 0 for creating a fdir rule.
1401 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
1402 */
1403 u16 validate_only; /* INPUT */
1404 u32 flow_id; /* OUTPUT */
1405 struct virtchnl_fdir_rule rule_cfg; /* INPUT */
1406
1407 /* see enum virtchnl_fdir_prgm_status; OUTPUT */
1408 s32 status;
1409 };
1410
1411 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
1412
1413 /* VIRTCHNL_OP_DEL_FDIR_FILTER
1414 * VF sends this request to PF by filling out vsi_id
1415 * and flow_id. PF will return del_status to VF.
1416 */
1417 struct virtchnl_fdir_del {
1418 u16 vsi_id; /* INPUT */
1419 u16 pad;
1420 u32 flow_id; /* INPUT */
1421
1422 /* see enum virtchnl_fdir_prgm_status; OUTPUT */
1423 s32 status;
1424 };
1425
1426 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
1427
1428 struct virtchnl_shaper_bw {
1429 /* Unit is Kbps */
1430 u32 committed;
1431 u32 peak;
1432 };
1433
1434 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw);
1435
1436 /* VIRTCHNL_OP_GET_QOS_CAPS
1437 * VF sends this message to get its QoS Caps, such as
1438 * TC number, Arbiter and Bandwidth.
1439 */
1440 struct virtchnl_qos_cap_elem {
1441 u8 tc_num;
1442 u8 tc_prio;
1443 #define VIRTCHNL_ABITER_STRICT 0
1444 #define VIRTCHNL_ABITER_ETS 2
1445 u8 arbiter;
1446 #define VIRTCHNL_STRICT_WEIGHT 1
1447 u8 weight;
1448 enum virtchnl_bw_limit_type type;
1449 union {
1450 struct virtchnl_shaper_bw shaper;
1451 u8 pad2[32];
1452 };
1453 };
1454
1455 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem);
1456
1457 struct virtchnl_qos_cap_list {
1458 u16 vsi_id;
1459 u16 num_elem;
1460 struct virtchnl_qos_cap_elem cap[];
1461 };
1462
1463 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_qos_cap_list);
1464 #define virtchnl_qos_cap_list_LEGACY_SIZEOF 44
1465
1466 /* VIRTCHNL_OP_CONFIG_QUEUE_BW */
1467 struct virtchnl_queue_bw {
1468 u16 queue_id;
1469 u8 tc;
1470 u8 pad;
1471 struct virtchnl_shaper_bw shaper;
1472 };
1473
1474 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
1475
1476 struct virtchnl_queues_bw_cfg {
1477 u16 vsi_id;
1478 u16 num_queues;
1479 struct virtchnl_queue_bw cfg[];
1480 };
1481
1482 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_queues_bw_cfg);
1483 #define virtchnl_queues_bw_cfg_LEGACY_SIZEOF 16
1484
1485 enum virtchnl_queue_type {
1486 VIRTCHNL_QUEUE_TYPE_TX = 0,
1487 VIRTCHNL_QUEUE_TYPE_RX = 1,
1488 };
1489
1490 /* structure to specify a chunk of contiguous queues */
1491 struct virtchnl_queue_chunk {
1492 /* see enum virtchnl_queue_type */
1493 s32 type;
1494 u16 start_queue_id;
1495 u16 num_queues;
1496 };
1497
1498 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk);
1499
1500 struct virtchnl_quanta_cfg {
1501 u16 quanta_size;
1502 u16 pad;
1503 struct virtchnl_queue_chunk queue_select;
1504 };
1505
1506 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
1507
1508 #define __vss_byone(p, member, count, old) \
1509 (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0)))
1510
1511 #define __vss_byelem(p, member, count, old) \
1512 (struct_size(p, member, count - 1) + (old - struct_size(p, member, 0)))
1513
1514 #define __vss_full(p, member, count, old) \
1515 (struct_size(p, member, count) + (old - struct_size(p, member, 0)))
1516
1517 #define __vss(type, func, p, member, count) \
1518 struct type: func(p, member, count, type##_LEGACY_SIZEOF)
1519
1520 #define virtchnl_struct_size(p, m, c) \
1521 _Generic(*p, \
1522 __vss(virtchnl_vf_resource, __vss_full, p, m, c), \
1523 __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c), \
1524 __vss(virtchnl_irq_map_info, __vss_full, p, m, c), \
1525 __vss(virtchnl_ether_addr_list, __vss_full, p, m, c), \
1526 __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c), \
1527 __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \
1528 __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \
1529 __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \
1530 __vss(virtchnl_qos_cap_list, __vss_byelem, p, m, c), \
1531 __vss(virtchnl_queues_bw_cfg, __vss_byelem, p, m, c), \
1532 __vss(virtchnl_rss_key, __vss_byone, p, m, c), \
1533 __vss(virtchnl_rss_lut, __vss_byone, p, m, c))
1534
1535 /**
1536 * virtchnl_vc_validate_vf_msg
1537 * @ver: Virtchnl version info
1538 * @v_opcode: Opcode for the message
1539 * @msg: pointer to the msg buffer
1540 * @msglen: msg length
1541 *
1542 * validate msg format against struct for each opcode
1543 */
1544 static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info * ver,u32 v_opcode,u8 * msg,u16 msglen)1545 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
1546 u8 *msg, u16 msglen)
1547 {
1548 bool err_msg_format = false;
1549 u32 valid_len = 0;
1550
1551 /* Validate message length. */
1552 switch (v_opcode) {
1553 case VIRTCHNL_OP_VERSION:
1554 valid_len = sizeof(struct virtchnl_version_info);
1555 break;
1556 case VIRTCHNL_OP_RESET_VF:
1557 break;
1558 case VIRTCHNL_OP_GET_VF_RESOURCES:
1559 if (VF_IS_V11(ver))
1560 valid_len = sizeof(u32);
1561 break;
1562 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1563 valid_len = sizeof(struct virtchnl_txq_info);
1564 break;
1565 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1566 valid_len = sizeof(struct virtchnl_rxq_info);
1567 break;
1568 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1569 valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF;
1570 if (msglen >= valid_len) {
1571 struct virtchnl_vsi_queue_config_info *vqc =
1572 (struct virtchnl_vsi_queue_config_info *)msg;
1573 valid_len = virtchnl_struct_size(vqc, qpair,
1574 vqc->num_queue_pairs);
1575 if (vqc->num_queue_pairs == 0)
1576 err_msg_format = true;
1577 }
1578 break;
1579 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1580 valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF;
1581 if (msglen >= valid_len) {
1582 struct virtchnl_irq_map_info *vimi =
1583 (struct virtchnl_irq_map_info *)msg;
1584 valid_len = virtchnl_struct_size(vimi, vecmap,
1585 vimi->num_vectors);
1586 if (vimi->num_vectors == 0)
1587 err_msg_format = true;
1588 }
1589 break;
1590 case VIRTCHNL_OP_ENABLE_QUEUES:
1591 case VIRTCHNL_OP_DISABLE_QUEUES:
1592 valid_len = sizeof(struct virtchnl_queue_select);
1593 break;
1594 case VIRTCHNL_OP_ADD_ETH_ADDR:
1595 case VIRTCHNL_OP_DEL_ETH_ADDR:
1596 valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF;
1597 if (msglen >= valid_len) {
1598 struct virtchnl_ether_addr_list *veal =
1599 (struct virtchnl_ether_addr_list *)msg;
1600 valid_len = virtchnl_struct_size(veal, list,
1601 veal->num_elements);
1602 if (veal->num_elements == 0)
1603 err_msg_format = true;
1604 }
1605 break;
1606 case VIRTCHNL_OP_ADD_VLAN:
1607 case VIRTCHNL_OP_DEL_VLAN:
1608 valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF;
1609 if (msglen >= valid_len) {
1610 struct virtchnl_vlan_filter_list *vfl =
1611 (struct virtchnl_vlan_filter_list *)msg;
1612 valid_len = virtchnl_struct_size(vfl, vlan_id,
1613 vfl->num_elements);
1614 if (vfl->num_elements == 0)
1615 err_msg_format = true;
1616 }
1617 break;
1618 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1619 valid_len = sizeof(struct virtchnl_promisc_info);
1620 break;
1621 case VIRTCHNL_OP_GET_STATS:
1622 valid_len = sizeof(struct virtchnl_queue_select);
1623 break;
1624 case VIRTCHNL_OP_RDMA:
1625 /* These messages are opaque to us and will be validated in
1626 * the RDMA client code. We just need to check for nonzero
1627 * length. The firmware will enforce max length restrictions.
1628 */
1629 if (msglen)
1630 valid_len = msglen;
1631 else
1632 err_msg_format = true;
1633 break;
1634 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
1635 break;
1636 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
1637 valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF;
1638 if (msglen >= valid_len) {
1639 struct virtchnl_rdma_qvlist_info *qv =
1640 (struct virtchnl_rdma_qvlist_info *)msg;
1641
1642 valid_len = virtchnl_struct_size(qv, qv_info,
1643 qv->num_vectors);
1644 }
1645 break;
1646 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1647 valid_len = virtchnl_rss_key_LEGACY_SIZEOF;
1648 if (msglen >= valid_len) {
1649 struct virtchnl_rss_key *vrk =
1650 (struct virtchnl_rss_key *)msg;
1651 valid_len = virtchnl_struct_size(vrk, key,
1652 vrk->key_len);
1653 }
1654 break;
1655 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1656 valid_len = virtchnl_rss_lut_LEGACY_SIZEOF;
1657 if (msglen >= valid_len) {
1658 struct virtchnl_rss_lut *vrl =
1659 (struct virtchnl_rss_lut *)msg;
1660 valid_len = virtchnl_struct_size(vrl, lut,
1661 vrl->lut_entries);
1662 }
1663 break;
1664 case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
1665 valid_len = sizeof(struct virtchnl_rss_hfunc);
1666 break;
1667 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
1668 break;
1669 case VIRTCHNL_OP_SET_RSS_HENA:
1670 valid_len = sizeof(struct virtchnl_rss_hena);
1671 break;
1672 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1673 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1674 break;
1675 case VIRTCHNL_OP_REQUEST_QUEUES:
1676 valid_len = sizeof(struct virtchnl_vf_res_request);
1677 break;
1678 case VIRTCHNL_OP_ENABLE_CHANNELS:
1679 valid_len = virtchnl_tc_info_LEGACY_SIZEOF;
1680 if (msglen >= valid_len) {
1681 struct virtchnl_tc_info *vti =
1682 (struct virtchnl_tc_info *)msg;
1683 valid_len = virtchnl_struct_size(vti, list,
1684 vti->num_tc);
1685 if (vti->num_tc == 0)
1686 err_msg_format = true;
1687 }
1688 break;
1689 case VIRTCHNL_OP_DISABLE_CHANNELS:
1690 break;
1691 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
1692 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
1693 valid_len = sizeof(struct virtchnl_filter);
1694 break;
1695 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
1696 break;
1697 case VIRTCHNL_OP_ADD_RSS_CFG:
1698 case VIRTCHNL_OP_DEL_RSS_CFG:
1699 valid_len = sizeof(struct virtchnl_rss_cfg);
1700 break;
1701 case VIRTCHNL_OP_ADD_FDIR_FILTER:
1702 valid_len = sizeof(struct virtchnl_fdir_add);
1703 break;
1704 case VIRTCHNL_OP_DEL_FDIR_FILTER:
1705 valid_len = sizeof(struct virtchnl_fdir_del);
1706 break;
1707 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
1708 break;
1709 case VIRTCHNL_OP_ADD_VLAN_V2:
1710 case VIRTCHNL_OP_DEL_VLAN_V2:
1711 valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF;
1712 if (msglen >= valid_len) {
1713 struct virtchnl_vlan_filter_list_v2 *vfl =
1714 (struct virtchnl_vlan_filter_list_v2 *)msg;
1715
1716 valid_len = virtchnl_struct_size(vfl, filters,
1717 vfl->num_elements);
1718
1719 if (vfl->num_elements == 0) {
1720 err_msg_format = true;
1721 break;
1722 }
1723 }
1724 break;
1725 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1726 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1727 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1728 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1729 valid_len = sizeof(struct virtchnl_vlan_setting);
1730 break;
1731 case VIRTCHNL_OP_GET_QOS_CAPS:
1732 break;
1733 case VIRTCHNL_OP_CONFIG_QUEUE_BW:
1734 valid_len = virtchnl_queues_bw_cfg_LEGACY_SIZEOF;
1735 if (msglen >= valid_len) {
1736 struct virtchnl_queues_bw_cfg *q_bw =
1737 (struct virtchnl_queues_bw_cfg *)msg;
1738
1739 valid_len = virtchnl_struct_size(q_bw, cfg,
1740 q_bw->num_queues);
1741 if (q_bw->num_queues == 0) {
1742 err_msg_format = true;
1743 break;
1744 }
1745 }
1746 break;
1747 case VIRTCHNL_OP_CONFIG_QUANTA:
1748 valid_len = sizeof(struct virtchnl_quanta_cfg);
1749 if (msglen >= valid_len) {
1750 struct virtchnl_quanta_cfg *q_quanta =
1751 (struct virtchnl_quanta_cfg *)msg;
1752
1753 if (q_quanta->quanta_size == 0 ||
1754 q_quanta->queue_select.num_queues == 0) {
1755 err_msg_format = true;
1756 break;
1757 }
1758 }
1759 break;
1760 /* These are always errors coming from the VF. */
1761 case VIRTCHNL_OP_EVENT:
1762 case VIRTCHNL_OP_UNKNOWN:
1763 default:
1764 return VIRTCHNL_STATUS_ERR_PARAM;
1765 }
1766 /* few more checks */
1767 if (err_msg_format || valid_len != msglen)
1768 return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
1769
1770 return 0;
1771 }
1772 #endif /* _VIRTCHNL_H_ */
1773