1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2013-2022, Intel Corporation. */
3
4 #ifndef _VIRTCHNL_H_
5 #define _VIRTCHNL_H_
6
7 #include <linux/bitops.h>
8 #include <linux/bits.h>
9 #include <linux/overflow.h>
10 #include <uapi/linux/if_ether.h>
11
12 /* Description:
13 * This header file describes the Virtual Function (VF) - Physical Function
14 * (PF) communication protocol used by the drivers for all devices starting
15 * from our 40G product line
16 *
17 * Admin queue buffer usage:
18 * desc->opcode is always aqc_opc_send_msg_to_pf
19 * flags, retval, datalen, and data addr are all used normally.
20 * The Firmware copies the cookie fields when sending messages between the
21 * PF and VF, but uses all other fields internally. Due to this limitation,
22 * we must send all messages as "indirect", i.e. using an external buffer.
23 *
24 * All the VSI indexes are relative to the VF. Each VF can have maximum of
25 * three VSIs. All the queue indexes are relative to the VSI. Each VF can
26 * have a maximum of sixteen queues for all of its VSIs.
27 *
28 * The PF is required to return a status code in v_retval for all messages
29 * except RESET_VF, which does not require any response. The returned value
30 * is of virtchnl_status_code type, defined here.
31 *
32 * In general, VF driver initialization should roughly follow the order of
33 * these opcodes. The VF driver must first validate the API version of the
34 * PF driver, then request a reset, then get resources, then configure
35 * queues and interrupts. After these operations are complete, the VF
36 * driver may start its queues, optionally add MAC and VLAN filters, and
37 * process traffic.
38 */
39
40 /* START GENERIC DEFINES
41 * Need to ensure the following enums and defines hold the same meaning and
42 * value in current and future projects
43 */
44
45 /* Error Codes */
46 enum virtchnl_status_code {
47 VIRTCHNL_STATUS_SUCCESS = 0,
48 VIRTCHNL_STATUS_ERR_PARAM = -5,
49 VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
50 VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
51 VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
52 VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
53 VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
54 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
55 };
56
57 /* Backward compatibility */
58 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
59 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
60
61 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
62 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
63 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
64 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
65 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
66 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
67 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
68 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
69
70 enum virtchnl_link_speed {
71 VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
72 VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
73 VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
74 VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
75 VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
76 VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
77 VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
78 VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
79 VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
80 };
81
82 /* for hsplit_0 field of Rx HMC context */
83 /* deprecated with AVF 1.0 */
84 enum virtchnl_rx_hsplit {
85 VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
86 VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
87 VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
88 VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
89 VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
90 };
91
92 enum virtchnl_bw_limit_type {
93 VIRTCHNL_BW_SHAPER = 0,
94 };
95 /* END GENERIC DEFINES */
96
97 /* Opcodes for VF-PF communication. These are placed in the v_opcode field
98 * of the virtchnl_msg structure.
99 */
100 enum virtchnl_ops {
101 /* The PF sends status change events to VFs using
102 * the VIRTCHNL_OP_EVENT opcode.
103 * VFs send requests to the PF using the other ops.
104 * Use of "advanced opcode" features must be negotiated as part of capabilities
105 * exchange and are not considered part of base mode feature set.
106 */
107 VIRTCHNL_OP_UNKNOWN = 0,
108 VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
109 VIRTCHNL_OP_RESET_VF = 2,
110 VIRTCHNL_OP_GET_VF_RESOURCES = 3,
111 VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
112 VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
113 VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
114 VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
115 VIRTCHNL_OP_ENABLE_QUEUES = 8,
116 VIRTCHNL_OP_DISABLE_QUEUES = 9,
117 VIRTCHNL_OP_ADD_ETH_ADDR = 10,
118 VIRTCHNL_OP_DEL_ETH_ADDR = 11,
119 VIRTCHNL_OP_ADD_VLAN = 12,
120 VIRTCHNL_OP_DEL_VLAN = 13,
121 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
122 VIRTCHNL_OP_GET_STATS = 15,
123 VIRTCHNL_OP_RSVD = 16,
124 VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
125 VIRTCHNL_OP_CONFIG_RSS_HFUNC = 18,
126 /* opcode 19 is reserved */
127 VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
128 VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
129 VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
130 VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
131 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
132 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
133 VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
134 VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
135 VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS = 25,
136 VIRTCHNL_OP_SET_RSS_HASHCFG = 26,
137 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
138 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
139 VIRTCHNL_OP_REQUEST_QUEUES = 29,
140 VIRTCHNL_OP_ENABLE_CHANNELS = 30,
141 VIRTCHNL_OP_DISABLE_CHANNELS = 31,
142 VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
143 VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
144 /* opcode 34 - 43 are reserved */
145 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
146 VIRTCHNL_OP_ADD_RSS_CFG = 45,
147 VIRTCHNL_OP_DEL_RSS_CFG = 46,
148 VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
149 VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
150 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
151 VIRTCHNL_OP_ADD_VLAN_V2 = 52,
152 VIRTCHNL_OP_DEL_VLAN_V2 = 53,
153 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
154 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
155 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
156 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
157 /* opcode 58 and 59 are reserved */
158 VIRTCHNL_OP_1588_PTP_GET_CAPS = 60,
159 VIRTCHNL_OP_1588_PTP_GET_TIME = 61,
160 /* opcode 62 - 65 are reserved */
161 VIRTCHNL_OP_GET_QOS_CAPS = 66,
162 /* opcode 68 through 111 are reserved */
163 VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
164 VIRTCHNL_OP_CONFIG_QUANTA = 113,
165 VIRTCHNL_OP_MAX,
166 };
167
168 /* These macros are used to generate compilation errors if a structure/union
169 * is not exactly the correct length. It gives a divide by zero error if the
170 * structure/union is not of the correct size, otherwise it creates an enum
171 * that is never used.
172 */
173 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
174 { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
175 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
176 { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
177
178 /* Message descriptions and data structures. */
179
180 /* VIRTCHNL_OP_VERSION
181 * VF posts its version number to the PF. PF responds with its version number
182 * in the same format, along with a return code.
183 * Reply from PF has its major/minor versions also in param0 and param1.
184 * If there is a major version mismatch, then the VF cannot operate.
185 * If there is a minor version mismatch, then the VF can operate but should
186 * add a warning to the system log.
187 *
188 * This enum element MUST always be specified as == 1, regardless of other
189 * changes in the API. The PF must always respond to this message without
190 * error regardless of version mismatch.
191 */
192 #define VIRTCHNL_VERSION_MAJOR 1
193 #define VIRTCHNL_VERSION_MINOR 1
194 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
195
196 struct virtchnl_version_info {
197 u32 major;
198 u32 minor;
199 };
200
201 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
202
203 #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
204 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
205
206 /* VIRTCHNL_OP_RESET_VF
207 * VF sends this request to PF with no parameters
208 * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
209 * until reset completion is indicated. The admin queue must be reinitialized
210 * after this operation.
211 *
212 * When reset is complete, PF must ensure that all queues in all VSIs associated
213 * with the VF are stopped, all queue configurations in the HMC are set to 0,
214 * and all MAC and VLAN filters (except the default MAC address) on all VSIs
215 * are cleared.
216 */
217
218 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
219 * vsi_type should always be 6 for backward compatibility. Add other fields
220 * as needed.
221 */
222 enum virtchnl_vsi_type {
223 VIRTCHNL_VSI_TYPE_INVALID = 0,
224 VIRTCHNL_VSI_SRIOV = 6,
225 };
226
227 /* VIRTCHNL_OP_GET_VF_RESOURCES
228 * Version 1.0 VF sends this request to PF with no parameters
229 * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
230 * PF responds with an indirect message containing
231 * virtchnl_vf_resource and one or more
232 * virtchnl_vsi_resource structures.
233 */
234
235 struct virtchnl_vsi_resource {
236 u16 vsi_id;
237 u16 num_queue_pairs;
238
239 /* see enum virtchnl_vsi_type */
240 s32 vsi_type;
241 u16 qset_handle;
242 u8 default_mac_addr[ETH_ALEN];
243 };
244
245 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
246
247 /* VF capability flags
248 * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
249 * TX/RX Checksum offloading and TSO for non-tunnelled packets.
250 */
251 #define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
252 #define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1)
253 #define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA
254 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
255 #define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
256 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
257 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
258 /* used to negotiate communicating link speeds in Mbps */
259 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
260 #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
261 #define VIRTCHNL_VF_OFFLOAD_TC_U32 BIT(11)
262 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
263 #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
264 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
265 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
266 #define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
267 #define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
268 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
269 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
270 #define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
271 #define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
272 #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26)
273 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
274 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
275 #define VIRTCHNL_VF_OFFLOAD_QOS BIT(29)
276 #define VIRTCHNL_VF_CAP_PTP BIT(31)
277
278 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
279 VIRTCHNL_VF_OFFLOAD_VLAN | \
280 VIRTCHNL_VF_OFFLOAD_RSS_PF)
281
282 struct virtchnl_vf_resource {
283 u16 num_vsis;
284 u16 num_queue_pairs;
285 u16 max_vectors;
286 u16 max_mtu;
287
288 u32 vf_cap_flags;
289 u32 rss_key_size;
290 u32 rss_lut_size;
291
292 struct virtchnl_vsi_resource vsi_res[];
293 };
294
295 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource);
296 #define virtchnl_vf_resource_LEGACY_SIZEOF 36
297
298 /* VIRTCHNL_OP_CONFIG_TX_QUEUE
299 * VF sends this message to set up parameters for one TX queue.
300 * External data buffer contains one instance of virtchnl_txq_info.
301 * PF configures requested queue and returns a status code.
302 */
303
304 /* Tx queue config info */
305 struct virtchnl_txq_info {
306 u16 vsi_id;
307 u16 queue_id;
308 u16 ring_len; /* number of descriptors, multiple of 8 */
309 u16 headwb_enabled; /* deprecated with AVF 1.0 */
310 u64 dma_ring_addr;
311 u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
312 };
313
314 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
315
316 /* RX descriptor IDs (range from 0 to 63) */
317 enum virtchnl_rx_desc_ids {
318 VIRTCHNL_RXDID_0_16B_BASE = 0,
319 VIRTCHNL_RXDID_1_32B_BASE = 1,
320 VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2,
321 VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3,
322 VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4,
323 VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5,
324 VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6,
325 VIRTCHNL_RXDID_7_HW_RSVD = 7,
326 /* 8 through 15 are reserved */
327 VIRTCHNL_RXDID_16_COMMS_GENERIC = 16,
328 VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17,
329 VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18,
330 VIRTCHNL_RXDID_19_COMMS_AUX_IPV6 = 19,
331 VIRTCHNL_RXDID_20_COMMS_AUX_FLOW = 20,
332 VIRTCHNL_RXDID_21_COMMS_AUX_TCP = 21,
333 /* 22 through 63 are reserved */
334 };
335
336 #define VIRTCHNL_RXDID_BIT(x) BIT_ULL(VIRTCHNL_RXDID_##x)
337
338 /* RX descriptor ID bitmasks */
339 enum virtchnl_rx_desc_id_bitmasks {
340 VIRTCHNL_RXDID_0_16B_BASE_M = VIRTCHNL_RXDID_BIT(0_16B_BASE),
341 VIRTCHNL_RXDID_1_32B_BASE_M = VIRTCHNL_RXDID_BIT(1_32B_BASE),
342 VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = VIRTCHNL_RXDID_BIT(2_FLEX_SQ_NIC),
343 VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = VIRTCHNL_RXDID_BIT(3_FLEX_SQ_SW),
344 VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = VIRTCHNL_RXDID_BIT(4_FLEX_SQ_NIC_VEB),
345 VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M = VIRTCHNL_RXDID_BIT(5_FLEX_SQ_NIC_ACL),
346 VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M = VIRTCHNL_RXDID_BIT(6_FLEX_SQ_NIC_2),
347 VIRTCHNL_RXDID_7_HW_RSVD_M = VIRTCHNL_RXDID_BIT(7_HW_RSVD),
348 /* 8 through 15 are reserved */
349 VIRTCHNL_RXDID_16_COMMS_GENERIC_M = VIRTCHNL_RXDID_BIT(16_COMMS_GENERIC),
350 VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M = VIRTCHNL_RXDID_BIT(17_COMMS_AUX_VLAN),
351 VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M = VIRTCHNL_RXDID_BIT(18_COMMS_AUX_IPV4),
352 VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M = VIRTCHNL_RXDID_BIT(19_COMMS_AUX_IPV6),
353 VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M = VIRTCHNL_RXDID_BIT(20_COMMS_AUX_FLOW),
354 VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M = VIRTCHNL_RXDID_BIT(21_COMMS_AUX_TCP),
355 /* 22 through 63 are reserved */
356 };
357
358 /* virtchnl_rxq_info_flags - definition of bits in the flags field of the
359 * virtchnl_rxq_info structure.
360 *
361 * @VIRTCHNL_PTP_RX_TSTAMP: request to enable Rx timestamping
362 *
363 * Other flag bits are currently reserved and they may be extended in the
364 * future.
365 */
366 enum virtchnl_rxq_info_flags {
367 VIRTCHNL_PTP_RX_TSTAMP = BIT(0),
368 };
369
370 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
371 * VF sends this message to set up parameters for one RX queue.
372 * External data buffer contains one instance of virtchnl_rxq_info.
373 * PF configures requested queue and returns a status code. The
374 * crc_disable flag disables CRC stripping on the VF. Setting
375 * the crc_disable flag to 1 will disable CRC stripping for each
376 * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
377 * offload must have been set prior to sending this info or the PF
378 * will ignore the request. This flag should be set the same for
379 * all of the queues for a VF.
380 */
381
382 /* Rx queue config info */
383 struct virtchnl_rxq_info {
384 u16 vsi_id;
385 u16 queue_id;
386 u32 ring_len; /* number of descriptors, multiple of 32 */
387 u16 hdr_size;
388 u16 splithdr_enabled; /* deprecated with AVF 1.0 */
389 u32 databuffer_size;
390 u32 max_pkt_size;
391 u8 crc_disable;
392 /* see enum virtchnl_rx_desc_ids;
393 * only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported. Note
394 * that when the offload is not supported, the descriptor format aligns
395 * with VIRTCHNL_RXDID_1_32B_BASE.
396 */
397 enum virtchnl_rx_desc_ids rxdid:8;
398 enum virtchnl_rxq_info_flags flags:8; /* see virtchnl_rxq_info_flags */
399 u8 pad1;
400 u64 dma_ring_addr;
401
402 /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
403 s32 rx_split_pos;
404 u32 pad2;
405 };
406
407 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
408
409 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES
410 * VF sends this message to set parameters for all active TX and RX queues
411 * associated with the specified VSI.
412 * PF configures queues and returns status.
413 * If the number of queues specified is greater than the number of queues
414 * associated with the VSI, an error is returned and no queues are configured.
415 * NOTE: The VF is not required to configure all queues in a single request.
416 * It may send multiple messages. PF drivers must correctly handle all VF
417 * requests.
418 */
419 struct virtchnl_queue_pair_info {
420 /* NOTE: vsi_id and queue_id should be identical for both queues. */
421 struct virtchnl_txq_info txq;
422 struct virtchnl_rxq_info rxq;
423 };
424
425 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
426
427 struct virtchnl_vsi_queue_config_info {
428 u16 vsi_id;
429 u16 num_queue_pairs;
430 u32 pad;
431 struct virtchnl_queue_pair_info qpair[];
432 };
433
434 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info);
435 #define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF 72
436
437 /* VIRTCHNL_OP_REQUEST_QUEUES
438 * VF sends this message to request the PF to allocate additional queues to
439 * this VF. Each VF gets a guaranteed number of queues on init but asking for
440 * additional queues must be negotiated. This is a best effort request as it
441 * is possible the PF does not have enough queues left to support the request.
442 * If the PF cannot support the number requested it will respond with the
443 * maximum number it is able to support. If the request is successful, PF will
444 * then reset the VF to institute required changes.
445 */
446
447 /* VF resource request */
448 struct virtchnl_vf_res_request {
449 u16 num_queue_pairs;
450 };
451
452 /* VIRTCHNL_OP_CONFIG_IRQ_MAP
453 * VF uses this message to map vectors to queues.
454 * The rxq_map and txq_map fields are bitmaps used to indicate which queues
455 * are to be associated with the specified vector.
456 * The "other" causes are always mapped to vector 0. The VF may not request
457 * that vector 0 be used for traffic.
458 * PF configures interrupt mapping and returns status.
459 * NOTE: due to hardware requirements, all active queues (both TX and RX)
460 * should be mapped to interrupts, even if the driver intends to operate
461 * only in polling mode. In this case the interrupt may be disabled, but
462 * the ITR timer will still run to trigger writebacks.
463 */
464 struct virtchnl_vector_map {
465 u16 vsi_id;
466 u16 vector_id;
467 u16 rxq_map;
468 u16 txq_map;
469 u16 rxitr_idx;
470 u16 txitr_idx;
471 };
472
473 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
474
475 struct virtchnl_irq_map_info {
476 u16 num_vectors;
477 struct virtchnl_vector_map vecmap[];
478 };
479
480 VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info);
481 #define virtchnl_irq_map_info_LEGACY_SIZEOF 14
482
483 /* VIRTCHNL_OP_ENABLE_QUEUES
484 * VIRTCHNL_OP_DISABLE_QUEUES
485 * VF sends these message to enable or disable TX/RX queue pairs.
486 * The queues fields are bitmaps indicating which queues to act upon.
487 * (Currently, we only support 16 queues per VF, but we make the field
488 * u32 to allow for expansion.)
489 * PF performs requested action and returns status.
490 * NOTE: The VF is not required to enable/disable all queues in a single
491 * request. It may send multiple messages.
492 * PF drivers must correctly handle all VF requests.
493 */
494 struct virtchnl_queue_select {
495 u16 vsi_id;
496 u16 pad;
497 u32 rx_queues;
498 u32 tx_queues;
499 };
500
501 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
502
503 /* VIRTCHNL_OP_ADD_ETH_ADDR
504 * VF sends this message in order to add one or more unicast or multicast
505 * address filters for the specified VSI.
506 * PF adds the filters and returns status.
507 */
508
509 /* VIRTCHNL_OP_DEL_ETH_ADDR
510 * VF sends this message in order to remove one or more unicast or multicast
511 * filters for the specified VSI.
512 * PF removes the filters and returns status.
513 */
514
515 /* VIRTCHNL_ETHER_ADDR_LEGACY
516 * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
517 * bytes. Moving forward all VF drivers should not set type to
518 * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
519 * behavior. The control plane function (i.e. PF) can use a best effort method
520 * of tracking the primary/device unicast in this case, but there is no
521 * guarantee and functionality depends on the implementation of the PF.
522 */
523
524 /* VIRTCHNL_ETHER_ADDR_PRIMARY
525 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
526 * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
527 * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
528 * function (i.e. PF) to accurately track and use this MAC address for
529 * displaying on the host and for VM/function reset.
530 */
531
532 /* VIRTCHNL_ETHER_ADDR_EXTRA
533 * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
534 * unicast and/or multicast filters that are being added/deleted via
535 * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
536 */
537 struct virtchnl_ether_addr {
538 u8 addr[ETH_ALEN];
539 u8 type;
540 #define VIRTCHNL_ETHER_ADDR_LEGACY 0
541 #define VIRTCHNL_ETHER_ADDR_PRIMARY 1
542 #define VIRTCHNL_ETHER_ADDR_EXTRA 2
543 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
544 u8 pad;
545 };
546
547 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
548
549 struct virtchnl_ether_addr_list {
550 u16 vsi_id;
551 u16 num_elements;
552 struct virtchnl_ether_addr list[];
553 };
554
555 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list);
556 #define virtchnl_ether_addr_list_LEGACY_SIZEOF 12
557
558 /* VIRTCHNL_OP_ADD_VLAN
559 * VF sends this message to add one or more VLAN tag filters for receives.
560 * PF adds the filters and returns status.
561 * If a port VLAN is configured by the PF, this operation will return an
562 * error to the VF.
563 */
564
565 /* VIRTCHNL_OP_DEL_VLAN
566 * VF sends this message to remove one or more VLAN tag filters for receives.
567 * PF removes the filters and returns status.
568 * If a port VLAN is configured by the PF, this operation will return an
569 * error to the VF.
570 */
571
572 struct virtchnl_vlan_filter_list {
573 u16 vsi_id;
574 u16 num_elements;
575 u16 vlan_id[];
576 };
577
578 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list);
579 #define virtchnl_vlan_filter_list_LEGACY_SIZEOF 6
580
581 /* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
582 * structures and opcodes.
583 *
584 * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
585 * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
586 *
587 * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
588 * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
589 * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
590 *
591 * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
592 * by the PF concurrently. For example, if the PF can support
593 * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
594 * would OR the following bits:
595 *
596 * VIRTHCNL_VLAN_ETHERTYPE_8100 |
597 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
598 * VIRTCHNL_VLAN_ETHERTYPE_AND;
599 *
600 * The VF would interpret this as VLAN filtering can be supported on both 0x8100
601 * and 0x88A8 VLAN ethertypes.
602 *
603 * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
604 * by the PF concurrently. For example if the PF can support
605 * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
606 * offload it would OR the following bits:
607 *
608 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
609 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
610 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
611 *
612 * The VF would interpret this as VLAN stripping can be supported on either
613 * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
614 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
615 * the previously set value.
616 *
617 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
618 * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
619 *
620 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
621 * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
622 *
623 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
624 * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
625 *
626 * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
627 * VLAN filtering if the underlying PF supports it.
628 *
629 * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
630 * certain VLAN capability can be toggled. For example if the underlying PF/CP
631 * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
632 * set this bit along with the supported ethertypes.
633 */
634 enum virtchnl_vlan_support {
635 VIRTCHNL_VLAN_UNSUPPORTED = 0,
636 VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0),
637 VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1),
638 VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2),
639 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8),
640 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9),
641 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10),
642 VIRTCHNL_VLAN_PRIO = BIT(24),
643 VIRTCHNL_VLAN_FILTER_MASK = BIT(28),
644 VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29),
645 VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30),
646 VIRTCHNL_VLAN_TOGGLE = BIT(31),
647 };
648
649 /* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
650 * for filtering, insertion, and stripping capabilities.
651 *
652 * If only outer capabilities are supported (for filtering, insertion, and/or
653 * stripping) then this refers to the outer most or single VLAN from the VF's
654 * perspective.
655 *
656 * If only inner capabilities are supported (for filtering, insertion, and/or
657 * stripping) then this refers to the outer most or single VLAN from the VF's
658 * perspective. Functionally this is the same as if only outer capabilities are
659 * supported. The VF driver is just forced to use the inner fields when
660 * adding/deleting filters and enabling/disabling offloads (if supported).
661 *
662 * If both outer and inner capabilities are supported (for filtering, insertion,
663 * and/or stripping) then outer refers to the outer most or single VLAN and
664 * inner refers to the second VLAN, if it exists, in the packet.
665 *
666 * There is no support for tunneled VLAN offloads, so outer or inner are never
667 * referring to a tunneled packet from the VF's perspective.
668 */
669 struct virtchnl_vlan_supported_caps {
670 u32 outer;
671 u32 inner;
672 };
673
674 /* The PF populates these fields based on the supported VLAN filtering. If a
675 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
676 * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
677 * the unsupported fields.
678 *
679 * Also, a VF is only allowed to toggle its VLAN filtering setting if the
680 * VIRTCHNL_VLAN_TOGGLE bit is set.
681 *
682 * The ethertype(s) specified in the ethertype_init field are the ethertypes
683 * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
684 * most VLAN from the VF's perspective. If both inner and outer filtering are
685 * allowed then ethertype_init only refers to the outer most VLAN as only
686 * VLAN ethertype supported for inner VLAN filtering is
687 * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
688 * when both inner and outer filtering are allowed.
689 *
690 * The max_filters field tells the VF how many VLAN filters it's allowed to have
691 * at any one time. If it exceeds this amount and tries to add another filter,
692 * then the request will be rejected by the PF. To prevent failures, the VF
693 * should keep track of how many VLAN filters it has added and not attempt to
694 * add more than max_filters.
695 */
696 struct virtchnl_vlan_filtering_caps {
697 struct virtchnl_vlan_supported_caps filtering_support;
698 u32 ethertype_init;
699 u16 max_filters;
700 u8 pad[2];
701 };
702
703 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
704
705 /* This enum is used for the virtchnl_vlan_offload_caps structure to specify
706 * if the PF supports a different ethertype for stripping and insertion.
707 *
708 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
709 * for stripping affect the ethertype(s) specified for insertion and visa versa
710 * as well. If the VF tries to configure VLAN stripping via
711 * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
712 * that will be the ethertype for both stripping and insertion.
713 *
714 * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
715 * stripping do not affect the ethertype(s) specified for insertion and visa
716 * versa.
717 */
718 enum virtchnl_vlan_ethertype_match {
719 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
720 VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
721 };
722
723 /* The PF populates these fields based on the supported VLAN offloads. If a
724 * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
725 * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
726 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
727 *
728 * Also, a VF is only allowed to toggle its VLAN offload setting if the
729 * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
730 *
731 * The VF driver needs to be aware of how the tags are stripped by hardware and
732 * inserted by the VF driver based on the level of offload support. The PF will
733 * populate these fields based on where the VLAN tags are expected to be
734 * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
735 * interpret these fields. See the definition of the
736 * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
737 * enumeration.
738 */
739 struct virtchnl_vlan_offload_caps {
740 struct virtchnl_vlan_supported_caps stripping_support;
741 struct virtchnl_vlan_supported_caps insertion_support;
742 u32 ethertype_init;
743 u8 ethertype_match;
744 u8 pad[3];
745 };
746
747 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
748
749 /* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
750 * VF sends this message to determine its VLAN capabilities.
751 *
752 * PF will mark which capabilities it supports based on hardware support and
753 * current configuration. For example, if a port VLAN is configured the PF will
754 * not allow outer VLAN filtering, stripping, or insertion to be configured so
755 * it will block these features from the VF.
756 *
757 * The VF will need to cross reference its capabilities with the PFs
758 * capabilities in the response message from the PF to determine the VLAN
759 * support.
760 */
761 struct virtchnl_vlan_caps {
762 struct virtchnl_vlan_filtering_caps filtering;
763 struct virtchnl_vlan_offload_caps offloads;
764 };
765
766 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
767
768 struct virtchnl_vlan {
769 u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */
770 u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
771 * filtering caps
772 */
773 u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in
774 * filtering caps. Note that tpid here does not refer to
775 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
776 * actual 2-byte VLAN TPID
777 */
778 u8 pad[2];
779 };
780
781 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
782
783 struct virtchnl_vlan_filter {
784 struct virtchnl_vlan inner;
785 struct virtchnl_vlan outer;
786 u8 pad[16];
787 };
788
789 VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
790
791 /* VIRTCHNL_OP_ADD_VLAN_V2
792 * VIRTCHNL_OP_DEL_VLAN_V2
793 *
794 * VF sends these messages to add/del one or more VLAN tag filters for Rx
795 * traffic.
796 *
797 * The PF attempts to add the filters and returns status.
798 *
799 * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
800 * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
801 */
802 struct virtchnl_vlan_filter_list_v2 {
803 u16 vport_id;
804 u16 num_elements;
805 u8 pad[4];
806 struct virtchnl_vlan_filter filters[];
807 };
808
809 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2);
810 #define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF 40
811
812 /* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
813 * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
814 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
815 * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
816 *
817 * VF sends this message to enable or disable VLAN stripping or insertion. It
818 * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
819 * allowed and whether or not it's allowed to enable/disable the specific
820 * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
821 * parse the virtchnl_vlan_caps.offloads fields to determine which offload
822 * messages are allowed.
823 *
824 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
825 * following manner the VF will be allowed to enable and/or disable 0x8100 inner
826 * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
827 * case means the outer most or single VLAN from the VF's perspective. This is
828 * because no outer offloads are supported. See the comments above the
829 * virtchnl_vlan_supported_caps structure for more details.
830 *
831 * virtchnl_vlan_caps.offloads.stripping_support.inner =
832 * VIRTCHNL_VLAN_TOGGLE |
833 * VIRTCHNL_VLAN_ETHERTYPE_8100;
834 *
835 * virtchnl_vlan_caps.offloads.insertion_support.inner =
836 * VIRTCHNL_VLAN_TOGGLE |
837 * VIRTCHNL_VLAN_ETHERTYPE_8100;
838 *
839 * In order to enable inner (again note that in this case inner is the outer
840 * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
841 * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
842 * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
843 *
844 * virtchnl_vlan_setting.inner_ethertype_setting =
845 * VIRTCHNL_VLAN_ETHERTYPE_8100;
846 *
847 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
848 * initialization.
849 *
850 * The reason that VLAN TPID(s) are not being used for the
851 * outer_ethertype_setting and inner_ethertype_setting fields is because it's
852 * possible a device could support VLAN insertion and/or stripping offload on
853 * multiple ethertypes concurrently, so this method allows a VF to request
854 * multiple ethertypes in one message using the virtchnl_vlan_support
855 * enumeration.
856 *
857 * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
858 * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
859 * VLAN insertion and stripping simultaneously. The
860 * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
861 * populated based on what the PF can support.
862 *
863 * virtchnl_vlan_caps.offloads.stripping_support.outer =
864 * VIRTCHNL_VLAN_TOGGLE |
865 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
866 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
867 * VIRTCHNL_VLAN_ETHERTYPE_AND;
868 *
869 * virtchnl_vlan_caps.offloads.insertion_support.outer =
870 * VIRTCHNL_VLAN_TOGGLE |
871 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
872 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
873 * VIRTCHNL_VLAN_ETHERTYPE_AND;
874 *
875 * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
876 * would populate the virthcnl_vlan_offload_structure in the following manner
877 * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
878 *
879 * virtchnl_vlan_setting.outer_ethertype_setting =
880 * VIRTHCNL_VLAN_ETHERTYPE_8100 |
881 * VIRTHCNL_VLAN_ETHERTYPE_88A8;
882 *
883 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
884 * initialization.
885 *
886 * There is also the case where a PF and the underlying hardware can support
887 * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
888 * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
889 * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
890 * offloads. The ethertypes must match for stripping and insertion.
891 *
892 * virtchnl_vlan_caps.offloads.stripping_support.outer =
893 * VIRTCHNL_VLAN_TOGGLE |
894 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
895 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
896 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
897 *
898 * virtchnl_vlan_caps.offloads.insertion_support.outer =
899 * VIRTCHNL_VLAN_TOGGLE |
900 * VIRTCHNL_VLAN_ETHERTYPE_8100 |
901 * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
902 * VIRTCHNL_VLAN_ETHERTYPE_XOR;
903 *
904 * virtchnl_vlan_caps.offloads.ethertype_match =
905 * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
906 *
907 * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
908 * populate the virtchnl_vlan_setting structure in the following manner and send
909 * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
910 * ethertype for VLAN insertion if it's enabled. So, for completeness, a
911 * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
912 *
913 * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
914 *
915 * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
916 * initialization.
917 */
918 struct virtchnl_vlan_setting {
919 u32 outer_ethertype_setting;
920 u32 inner_ethertype_setting;
921 u16 vport_id;
922 u8 pad[6];
923 };
924
925 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
926
927 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
928 * VF sends VSI id and flags.
929 * PF returns status code in retval.
930 * Note: we assume that broadcast accept mode is always enabled.
931 */
932 struct virtchnl_promisc_info {
933 u16 vsi_id;
934 u16 flags;
935 };
936
937 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
938
939 #define FLAG_VF_UNICAST_PROMISC 0x00000001
940 #define FLAG_VF_MULTICAST_PROMISC 0x00000002
941
942 /* VIRTCHNL_OP_GET_STATS
943 * VF sends this message to request stats for the selected VSI. VF uses
944 * the virtchnl_queue_select struct to specify the VSI. The queue_id
945 * field is ignored by the PF.
946 *
947 * PF replies with struct eth_stats in an external buffer.
948 */
949
950 /* VIRTCHNL_OP_CONFIG_RSS_KEY
951 * VIRTCHNL_OP_CONFIG_RSS_LUT
952 * VF sends these messages to configure RSS. Only supported if both PF
953 * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
954 * configuration negotiation. If this is the case, then the RSS fields in
955 * the VF resource struct are valid.
956 * Both the key and LUT are initialized to 0 by the PF, meaning that
957 * RSS is effectively disabled until set up by the VF.
958 */
959 struct virtchnl_rss_key {
960 u16 vsi_id;
961 u16 key_len;
962 u8 key[]; /* RSS hash key, packed bytes */
963 };
964
965 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key);
966 #define virtchnl_rss_key_LEGACY_SIZEOF 6
967
968 struct virtchnl_rss_lut {
969 u16 vsi_id;
970 u16 lut_entries;
971 u8 lut[]; /* RSS lookup table */
972 };
973
974 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
975 #define virtchnl_rss_lut_LEGACY_SIZEOF 6
976
977 /* VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS
978 * VIRTCHNL_OP_SET_RSS_HASHCFG
979 * VF sends these messages to get and set the hash filter configuration for RSS.
980 * By default, the PF sets these to all possible traffic types that the
981 * hardware supports. The VF can query this value if it wants to change the
982 * traffic types that are hashed by the hardware.
983 */
984 struct virtchnl_rss_hashcfg {
985 /* Bits defined by enum libie_filter_pctype */
986 u64 hashcfg;
987 };
988
989 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hashcfg);
990
991 /* Type of RSS algorithm */
992 enum virtchnl_rss_algorithm {
993 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
994 VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
995 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
996 VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
997 };
998
999 /* VIRTCHNL_OP_CONFIG_RSS_HFUNC
1000 * VF sends this message to configure the RSS hash function. Only supported
1001 * if both PF and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
1002 * configuration negotiation.
1003 * The hash function is initialized to VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC
1004 * by the PF.
1005 */
1006 struct virtchnl_rss_hfunc {
1007 u16 vsi_id;
1008 u16 rss_algorithm; /* enum virtchnl_rss_algorithm */
1009 u32 reserved;
1010 };
1011
1012 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hfunc);
1013
1014 /* VIRTCHNL_OP_ENABLE_CHANNELS
1015 * VIRTCHNL_OP_DISABLE_CHANNELS
1016 * VF sends these messages to enable or disable channels based on
1017 * the user specified queue count and queue offset for each traffic class.
1018 * This struct encompasses all the information that the PF needs from
1019 * VF to create a channel.
1020 */
1021 struct virtchnl_channel_info {
1022 u16 count; /* number of queues in a channel */
1023 u16 offset; /* queues in a channel start from 'offset' */
1024 u32 pad;
1025 u64 max_tx_rate;
1026 };
1027
1028 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
1029
1030 struct virtchnl_tc_info {
1031 u32 num_tc;
1032 u32 pad;
1033 struct virtchnl_channel_info list[];
1034 };
1035
1036 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info);
1037 #define virtchnl_tc_info_LEGACY_SIZEOF 24
1038
1039 /* VIRTCHNL_ADD_CLOUD_FILTER
1040 * VIRTCHNL_DEL_CLOUD_FILTER
1041 * VF sends these messages to add or delete a cloud filter based on the
1042 * user specified match and action filters. These structures encompass
1043 * all the information that the PF needs from the VF to add/delete a
1044 * cloud filter.
1045 */
1046
1047 struct virtchnl_l4_spec {
1048 u8 src_mac[ETH_ALEN];
1049 u8 dst_mac[ETH_ALEN];
1050 __be16 vlan_id;
1051 __be16 pad; /* reserved for future use */
1052 __be32 src_ip[4];
1053 __be32 dst_ip[4];
1054 __be16 src_port;
1055 __be16 dst_port;
1056 };
1057
1058 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
1059
1060 union virtchnl_flow_spec {
1061 struct virtchnl_l4_spec tcp_spec;
1062 u8 buffer[128]; /* reserved for future use */
1063 };
1064
1065 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
1066
1067 enum virtchnl_action {
1068 /* action types */
1069 VIRTCHNL_ACTION_DROP = 0,
1070 VIRTCHNL_ACTION_TC_REDIRECT,
1071 VIRTCHNL_ACTION_PASSTHRU,
1072 VIRTCHNL_ACTION_QUEUE,
1073 VIRTCHNL_ACTION_Q_REGION,
1074 VIRTCHNL_ACTION_MARK,
1075 VIRTCHNL_ACTION_COUNT,
1076 };
1077
1078 enum virtchnl_flow_type {
1079 /* flow types */
1080 VIRTCHNL_TCP_V4_FLOW = 0,
1081 VIRTCHNL_TCP_V6_FLOW,
1082 };
1083
1084 struct virtchnl_filter {
1085 union virtchnl_flow_spec data;
1086 union virtchnl_flow_spec mask;
1087
1088 /* see enum virtchnl_flow_type */
1089 s32 flow_type;
1090
1091 /* see enum virtchnl_action */
1092 s32 action;
1093 u32 action_meta;
1094 u8 field_flags;
1095 u8 pad[3];
1096 };
1097
1098 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
1099
1100 /* VIRTCHNL_OP_EVENT
1101 * PF sends this message to inform the VF driver of events that may affect it.
1102 * No direct response is expected from the VF, though it may generate other
1103 * messages in response to this one.
1104 */
1105 enum virtchnl_event_codes {
1106 VIRTCHNL_EVENT_UNKNOWN = 0,
1107 VIRTCHNL_EVENT_LINK_CHANGE,
1108 VIRTCHNL_EVENT_RESET_IMPENDING,
1109 VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
1110 };
1111
1112 #define PF_EVENT_SEVERITY_INFO 0
1113 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
1114
1115 struct virtchnl_pf_event {
1116 /* see enum virtchnl_event_codes */
1117 s32 event;
1118 union {
1119 /* If the PF driver does not support the new speed reporting
1120 * capabilities then use link_event else use link_event_adv to
1121 * get the speed and link information. The ability to understand
1122 * new speeds is indicated by setting the capability flag
1123 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
1124 * in virtchnl_vf_resource struct and can be used to determine
1125 * which link event struct to use below.
1126 */
1127 struct {
1128 enum virtchnl_link_speed link_speed;
1129 bool link_status;
1130 u8 pad[3];
1131 } link_event;
1132 struct {
1133 /* link_speed provided in Mbps */
1134 u32 link_speed;
1135 u8 link_status;
1136 u8 pad[3];
1137 } link_event_adv;
1138 } event_data;
1139
1140 s32 severity;
1141 };
1142
1143 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
1144
1145 /* used to specify if a ceq_idx or aeq_idx is invalid */
1146 #define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF
1147 /* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
1148 * VF uses this message to request PF to map RDMA vectors to RDMA queues.
1149 * The request for this originates from the VF RDMA driver through
1150 * a client interface between VF LAN and VF RDMA driver.
1151 * A vector could have an AEQ and CEQ attached to it although
1152 * there is a single AEQ per VF RDMA instance in which case
1153 * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
1154 * idx for ceqs There will never be a case where there will be multiple CEQs
1155 * attached to a single vector.
1156 * PF configures interrupt mapping and returns status.
1157 */
1158
1159 struct virtchnl_rdma_qv_info {
1160 u32 v_idx; /* msix_vector */
1161 u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1162 u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
1163 u8 itr_idx;
1164 u8 pad[3];
1165 };
1166
1167 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
1168
1169 struct virtchnl_rdma_qvlist_info {
1170 u32 num_vectors;
1171 struct virtchnl_rdma_qv_info qv_info[];
1172 };
1173
1174 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info);
1175 #define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF 16
1176
1177 /* VF reset states - these are written into the RSTAT register:
1178 * VFGEN_RSTAT on the VF
1179 * When the PF initiates a reset, it writes 0
1180 * When the reset is complete, it writes 1
1181 * When the PF detects that the VF has recovered, it writes 2
1182 * VF checks this register periodically to determine if a reset has occurred,
1183 * then polls it to know when the reset is complete.
1184 * If either the PF or VF reads the register while the hardware
1185 * is in a reset state, it will return DEADBEEF, which, when masked
1186 * will result in 3.
1187 */
1188 enum virtchnl_vfr_states {
1189 VIRTCHNL_VFR_INPROGRESS = 0,
1190 VIRTCHNL_VFR_COMPLETED,
1191 VIRTCHNL_VFR_VFACTIVE,
1192 };
1193
1194 #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
1195 #define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
1196 #define PROTO_HDR_SHIFT 5
1197 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
1198 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
1199
1200 /* VF use these macros to configure each protocol header.
1201 * Specify which protocol headers and protocol header fields base on
1202 * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
1203 * @param hdr: a struct of virtchnl_proto_hdr
1204 * @param hdr_type: ETH/IPV4/TCP, etc
1205 * @param field: SRC/DST/TEID/SPI, etc
1206 */
1207 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
1208 ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
1209 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
1210 ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
1211 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
1212 ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
1213 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
1214
1215 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1216 (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
1217 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1218 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
1219 (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
1220 VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
1221
1222 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
1223 ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
1224 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
1225 (((hdr)->type) >> PROTO_HDR_SHIFT)
1226 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
1227 ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
1228 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
1229 (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
1230 VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
1231
1232 /* Protocol header type within a packet segment. A segment consists of one or
1233 * more protocol headers that make up a logical group of protocol headers. Each
1234 * logical group of protocol headers encapsulates or is encapsulated using/by
1235 * tunneling or encapsulation protocols for network virtualization.
1236 */
1237 enum virtchnl_proto_hdr_type {
1238 VIRTCHNL_PROTO_HDR_NONE,
1239 VIRTCHNL_PROTO_HDR_ETH,
1240 VIRTCHNL_PROTO_HDR_S_VLAN,
1241 VIRTCHNL_PROTO_HDR_C_VLAN,
1242 VIRTCHNL_PROTO_HDR_IPV4,
1243 VIRTCHNL_PROTO_HDR_IPV6,
1244 VIRTCHNL_PROTO_HDR_TCP,
1245 VIRTCHNL_PROTO_HDR_UDP,
1246 VIRTCHNL_PROTO_HDR_SCTP,
1247 VIRTCHNL_PROTO_HDR_GTPU_IP,
1248 VIRTCHNL_PROTO_HDR_GTPU_EH,
1249 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
1250 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
1251 VIRTCHNL_PROTO_HDR_PPPOE,
1252 VIRTCHNL_PROTO_HDR_L2TPV3,
1253 VIRTCHNL_PROTO_HDR_ESP,
1254 VIRTCHNL_PROTO_HDR_AH,
1255 VIRTCHNL_PROTO_HDR_PFCP,
1256 };
1257
1258 /* Protocol header field within a protocol header. */
1259 enum virtchnl_proto_hdr_field {
1260 /* ETHER */
1261 VIRTCHNL_PROTO_HDR_ETH_SRC =
1262 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
1263 VIRTCHNL_PROTO_HDR_ETH_DST,
1264 VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
1265 /* S-VLAN */
1266 VIRTCHNL_PROTO_HDR_S_VLAN_ID =
1267 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
1268 /* C-VLAN */
1269 VIRTCHNL_PROTO_HDR_C_VLAN_ID =
1270 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
1271 /* IPV4 */
1272 VIRTCHNL_PROTO_HDR_IPV4_SRC =
1273 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
1274 VIRTCHNL_PROTO_HDR_IPV4_DST,
1275 VIRTCHNL_PROTO_HDR_IPV4_DSCP,
1276 VIRTCHNL_PROTO_HDR_IPV4_TTL,
1277 VIRTCHNL_PROTO_HDR_IPV4_PROT,
1278 /* IPV6 */
1279 VIRTCHNL_PROTO_HDR_IPV6_SRC =
1280 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
1281 VIRTCHNL_PROTO_HDR_IPV6_DST,
1282 VIRTCHNL_PROTO_HDR_IPV6_TC,
1283 VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
1284 VIRTCHNL_PROTO_HDR_IPV6_PROT,
1285 /* TCP */
1286 VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
1287 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
1288 VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
1289 /* UDP */
1290 VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
1291 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
1292 VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
1293 /* SCTP */
1294 VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
1295 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
1296 VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
1297 /* GTPU_IP */
1298 VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
1299 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
1300 /* GTPU_EH */
1301 VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
1302 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
1303 VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
1304 /* PPPOE */
1305 VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
1306 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
1307 /* L2TPV3 */
1308 VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
1309 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
1310 /* ESP */
1311 VIRTCHNL_PROTO_HDR_ESP_SPI =
1312 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
1313 /* AH */
1314 VIRTCHNL_PROTO_HDR_AH_SPI =
1315 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
1316 /* PFCP */
1317 VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
1318 PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
1319 VIRTCHNL_PROTO_HDR_PFCP_SEID,
1320 };
1321
1322 struct virtchnl_proto_hdr {
1323 /* see enum virtchnl_proto_hdr_type */
1324 s32 type;
1325 u32 field_selector; /* a bit mask to select field for header type */
1326 u8 buffer[64];
1327 /**
1328 * binary buffer in network order for specific header type.
1329 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
1330 * header is expected to be copied into the buffer.
1331 */
1332 };
1333
1334 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
1335
1336 struct virtchnl_proto_hdrs {
1337 u8 tunnel_level;
1338 u8 pad[3];
1339 /**
1340 * specify where protocol header start from.
1341 * must be 0 when sending a raw packet request.
1342 * 0 - from the outer layer
1343 * 1 - from the first inner layer
1344 * 2 - from the second inner layer
1345 * ....
1346 **/
1347 u32 count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
1348 union {
1349 struct virtchnl_proto_hdr
1350 proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
1351 struct {
1352 u16 pkt_len;
1353 u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
1354 u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
1355 } raw;
1356 };
1357 };
1358
1359 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
1360
1361 struct virtchnl_rss_cfg {
1362 struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
1363
1364 /* see enum virtchnl_rss_algorithm; rss algorithm type */
1365 s32 rss_algorithm;
1366 u8 reserved[128]; /* reserve for future */
1367 };
1368
1369 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
1370
1371 /* action configuration for FDIR */
1372 struct virtchnl_filter_action {
1373 /* see enum virtchnl_action type */
1374 s32 type;
1375 union {
1376 /* used for queue and qgroup action */
1377 struct {
1378 u16 index;
1379 u8 region;
1380 } queue;
1381 /* used for count action */
1382 struct {
1383 /* share counter ID with other flow rules */
1384 u8 shared;
1385 u32 id; /* counter ID */
1386 } count;
1387 /* used for mark action */
1388 u32 mark_id;
1389 u8 reserve[32];
1390 } act_conf;
1391 };
1392
1393 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
1394
1395 #define VIRTCHNL_MAX_NUM_ACTIONS 8
1396
1397 struct virtchnl_filter_action_set {
1398 /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
1399 u32 count;
1400 struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
1401 };
1402
1403 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
1404
1405 /* pattern and action for FDIR rule */
1406 struct virtchnl_fdir_rule {
1407 struct virtchnl_proto_hdrs proto_hdrs;
1408 struct virtchnl_filter_action_set action_set;
1409 };
1410
1411 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
1412
1413 /* Status returned to VF after VF requests FDIR commands
1414 * VIRTCHNL_FDIR_SUCCESS
1415 * VF FDIR related request is successfully done by PF
1416 * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
1417 *
1418 * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
1419 * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
1420 *
1421 * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
1422 * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
1423 *
1424 * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
1425 * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
1426 *
1427 * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
1428 * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
1429 *
1430 * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
1431 * OP_ADD_FDIR_FILTER request is failed due to parameters validation
1432 * or HW doesn't support.
1433 *
1434 * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
1435 * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
1436 * for programming.
1437 *
1438 * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
1439 * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
1440 * for example, VF query counter of a rule who has no counter action.
1441 */
1442 enum virtchnl_fdir_prgm_status {
1443 VIRTCHNL_FDIR_SUCCESS = 0,
1444 VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
1445 VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
1446 VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
1447 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
1448 VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
1449 VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
1450 VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
1451 };
1452
1453 /* VIRTCHNL_OP_ADD_FDIR_FILTER
1454 * VF sends this request to PF by filling out vsi_id,
1455 * validate_only and rule_cfg. PF will return flow_id
1456 * if the request is successfully done and return add_status to VF.
1457 */
1458 struct virtchnl_fdir_add {
1459 u16 vsi_id; /* INPUT */
1460 /*
1461 * 1 for validating a fdir rule, 0 for creating a fdir rule.
1462 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
1463 */
1464 u16 validate_only; /* INPUT */
1465 u32 flow_id; /* OUTPUT */
1466 struct virtchnl_fdir_rule rule_cfg; /* INPUT */
1467
1468 /* see enum virtchnl_fdir_prgm_status; OUTPUT */
1469 s32 status;
1470 };
1471
1472 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
1473
1474 /* VIRTCHNL_OP_DEL_FDIR_FILTER
1475 * VF sends this request to PF by filling out vsi_id
1476 * and flow_id. PF will return del_status to VF.
1477 */
1478 struct virtchnl_fdir_del {
1479 u16 vsi_id; /* INPUT */
1480 u16 pad;
1481 u32 flow_id; /* INPUT */
1482
1483 /* see enum virtchnl_fdir_prgm_status; OUTPUT */
1484 s32 status;
1485 };
1486
1487 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
1488
1489 #define VIRTCHNL_1588_PTP_CAP_RX_TSTAMP BIT(1)
1490 #define VIRTCHNL_1588_PTP_CAP_READ_PHC BIT(2)
1491
1492 /**
1493 * struct virtchnl_ptp_caps - Defines the PTP caps available to the VF.
1494 * @caps: On send, VF sets what capabilities it requests. On reply, PF
1495 * indicates what has been enabled for this VF. The PF shall not set
1496 * bits which were not requested by the VF.
1497 * @rsvd: Reserved bits for future extension.
1498 *
1499 * Structure that defines the PTP capabilities available to the VF. The VF
1500 * sends VIRTCHNL_OP_1588_PTP_GET_CAPS, and must fill in the ptp_caps field
1501 * indicating what capabilities it is requesting. The PF will respond with the
1502 * same message with the virtchnl_ptp_caps structure indicating what is
1503 * enabled for the VF.
1504 *
1505 * VIRTCHNL_1588_PTP_CAP_RX_TSTAMP indicates that the VF receive queues have
1506 * receive timestamps enabled in the flexible descriptors. Note that this
1507 * requires a VF to also negotiate to enable advanced flexible descriptors in
1508 * the receive path instead of the default legacy descriptor format.
1509 *
1510 * VIRTCHNL_1588_PTP_CAP_READ_PHC indicates that the VF may read the PHC time
1511 * via the VIRTCHNL_OP_1588_PTP_GET_TIME command.
1512 *
1513 * Note that in the future, additional capability flags may be added which
1514 * indicate additional extended support. All fields marked as reserved by this
1515 * header will be set to zero. VF implementations should verify this to ensure
1516 * that future extensions do not break compatibility.
1517 */
1518 struct virtchnl_ptp_caps {
1519 u32 caps;
1520 u8 rsvd[44];
1521 };
1522
1523 VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_ptp_caps);
1524
1525 /**
1526 * struct virtchnl_phc_time - Contains the 64bits of PHC clock time in ns.
1527 * @time: PHC time in nanoseconds
1528 * @rsvd: Reserved for future extension
1529 *
1530 * Structure received with VIRTCHNL_OP_1588_PTP_GET_TIME. Contains the 64bits
1531 * of PHC clock time in nanoseconds.
1532 *
1533 * VIRTCHNL_OP_1588_PTP_GET_TIME may be sent to request the current time of
1534 * the PHC. This op is available in case direct access via the PHC registers
1535 * is not available.
1536 */
1537 struct virtchnl_phc_time {
1538 u64 time;
1539 u8 rsvd[8];
1540 };
1541
1542 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_time);
1543
1544 struct virtchnl_shaper_bw {
1545 /* Unit is Kbps */
1546 u32 committed;
1547 u32 peak;
1548 };
1549
1550 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw);
1551
1552 /* VIRTCHNL_OP_GET_QOS_CAPS
1553 * VF sends this message to get its QoS Caps, such as
1554 * TC number, Arbiter and Bandwidth.
1555 */
1556 struct virtchnl_qos_cap_elem {
1557 u8 tc_num;
1558 u8 tc_prio;
1559 #define VIRTCHNL_ABITER_STRICT 0
1560 #define VIRTCHNL_ABITER_ETS 2
1561 u8 arbiter;
1562 #define VIRTCHNL_STRICT_WEIGHT 1
1563 u8 weight;
1564 enum virtchnl_bw_limit_type type;
1565 union {
1566 struct virtchnl_shaper_bw shaper;
1567 u8 pad2[32];
1568 };
1569 };
1570
1571 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem);
1572
1573 struct virtchnl_qos_cap_list {
1574 u16 vsi_id;
1575 u16 num_elem;
1576 struct virtchnl_qos_cap_elem cap[];
1577 };
1578
1579 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_qos_cap_list);
1580 #define virtchnl_qos_cap_list_LEGACY_SIZEOF 44
1581
1582 /* VIRTCHNL_OP_CONFIG_QUEUE_BW */
1583 struct virtchnl_queue_bw {
1584 u16 queue_id;
1585 u8 tc;
1586 u8 pad;
1587 struct virtchnl_shaper_bw shaper;
1588 };
1589
1590 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
1591
1592 struct virtchnl_queues_bw_cfg {
1593 u16 vsi_id;
1594 u16 num_queues;
1595 struct virtchnl_queue_bw cfg[];
1596 };
1597
1598 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_queues_bw_cfg);
1599 #define virtchnl_queues_bw_cfg_LEGACY_SIZEOF 16
1600
1601 enum virtchnl_queue_type {
1602 VIRTCHNL_QUEUE_TYPE_TX = 0,
1603 VIRTCHNL_QUEUE_TYPE_RX = 1,
1604 };
1605
1606 /* structure to specify a chunk of contiguous queues */
1607 struct virtchnl_queue_chunk {
1608 /* see enum virtchnl_queue_type */
1609 s32 type;
1610 u16 start_queue_id;
1611 u16 num_queues;
1612 };
1613
1614 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk);
1615
1616 struct virtchnl_quanta_cfg {
1617 u16 quanta_size;
1618 u16 pad;
1619 struct virtchnl_queue_chunk queue_select;
1620 };
1621
1622 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
1623
1624 #define __vss_byone(p, member, count, old) \
1625 (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0)))
1626
1627 #define __vss_byelem(p, member, count, old) \
1628 (struct_size(p, member, count - 1) + (old - struct_size(p, member, 0)))
1629
1630 #define __vss_full(p, member, count, old) \
1631 (struct_size(p, member, count) + (old - struct_size(p, member, 0)))
1632
1633 #define __vss(type, func, p, member, count) \
1634 struct type: func(p, member, count, type##_LEGACY_SIZEOF)
1635
1636 #define virtchnl_struct_size(p, m, c) \
1637 _Generic(*p, \
1638 __vss(virtchnl_vf_resource, __vss_full, p, m, c), \
1639 __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c), \
1640 __vss(virtchnl_irq_map_info, __vss_full, p, m, c), \
1641 __vss(virtchnl_ether_addr_list, __vss_full, p, m, c), \
1642 __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c), \
1643 __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \
1644 __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \
1645 __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \
1646 __vss(virtchnl_qos_cap_list, __vss_byelem, p, m, c), \
1647 __vss(virtchnl_queues_bw_cfg, __vss_byelem, p, m, c), \
1648 __vss(virtchnl_rss_key, __vss_byone, p, m, c), \
1649 __vss(virtchnl_rss_lut, __vss_byone, p, m, c))
1650
1651 /**
1652 * virtchnl_vc_validate_vf_msg
1653 * @ver: Virtchnl version info
1654 * @v_opcode: Opcode for the message
1655 * @msg: pointer to the msg buffer
1656 * @msglen: msg length
1657 *
1658 * validate msg format against struct for each opcode
1659 */
1660 static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info * ver,u32 v_opcode,u8 * msg,u16 msglen)1661 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
1662 u8 *msg, u16 msglen)
1663 {
1664 bool err_msg_format = false;
1665 u32 valid_len = 0;
1666
1667 /* Validate message length. */
1668 switch (v_opcode) {
1669 case VIRTCHNL_OP_VERSION:
1670 valid_len = sizeof(struct virtchnl_version_info);
1671 break;
1672 case VIRTCHNL_OP_RESET_VF:
1673 break;
1674 case VIRTCHNL_OP_GET_VF_RESOURCES:
1675 if (VF_IS_V11(ver))
1676 valid_len = sizeof(u32);
1677 break;
1678 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1679 valid_len = sizeof(struct virtchnl_txq_info);
1680 break;
1681 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1682 valid_len = sizeof(struct virtchnl_rxq_info);
1683 break;
1684 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1685 valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF;
1686 if (msglen >= valid_len) {
1687 struct virtchnl_vsi_queue_config_info *vqc =
1688 (struct virtchnl_vsi_queue_config_info *)msg;
1689 valid_len = virtchnl_struct_size(vqc, qpair,
1690 vqc->num_queue_pairs);
1691 if (vqc->num_queue_pairs == 0)
1692 err_msg_format = true;
1693 }
1694 break;
1695 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1696 valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF;
1697 if (msglen >= valid_len) {
1698 struct virtchnl_irq_map_info *vimi =
1699 (struct virtchnl_irq_map_info *)msg;
1700 valid_len = virtchnl_struct_size(vimi, vecmap,
1701 vimi->num_vectors);
1702 if (vimi->num_vectors == 0)
1703 err_msg_format = true;
1704 }
1705 break;
1706 case VIRTCHNL_OP_ENABLE_QUEUES:
1707 case VIRTCHNL_OP_DISABLE_QUEUES:
1708 valid_len = sizeof(struct virtchnl_queue_select);
1709 break;
1710 case VIRTCHNL_OP_ADD_ETH_ADDR:
1711 case VIRTCHNL_OP_DEL_ETH_ADDR:
1712 valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF;
1713 if (msglen >= valid_len) {
1714 struct virtchnl_ether_addr_list *veal =
1715 (struct virtchnl_ether_addr_list *)msg;
1716 valid_len = virtchnl_struct_size(veal, list,
1717 veal->num_elements);
1718 if (veal->num_elements == 0)
1719 err_msg_format = true;
1720 }
1721 break;
1722 case VIRTCHNL_OP_ADD_VLAN:
1723 case VIRTCHNL_OP_DEL_VLAN:
1724 valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF;
1725 if (msglen >= valid_len) {
1726 struct virtchnl_vlan_filter_list *vfl =
1727 (struct virtchnl_vlan_filter_list *)msg;
1728 valid_len = virtchnl_struct_size(vfl, vlan_id,
1729 vfl->num_elements);
1730 if (vfl->num_elements == 0)
1731 err_msg_format = true;
1732 }
1733 break;
1734 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1735 valid_len = sizeof(struct virtchnl_promisc_info);
1736 break;
1737 case VIRTCHNL_OP_GET_STATS:
1738 valid_len = sizeof(struct virtchnl_queue_select);
1739 break;
1740 case VIRTCHNL_OP_RDMA:
1741 /* These messages are opaque to us and will be validated in
1742 * the RDMA client code. We just need to check for nonzero
1743 * length. The firmware will enforce max length restrictions.
1744 */
1745 if (msglen)
1746 valid_len = msglen;
1747 else
1748 err_msg_format = true;
1749 break;
1750 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
1751 break;
1752 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
1753 valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF;
1754 if (msglen >= valid_len) {
1755 struct virtchnl_rdma_qvlist_info *qv =
1756 (struct virtchnl_rdma_qvlist_info *)msg;
1757
1758 valid_len = virtchnl_struct_size(qv, qv_info,
1759 qv->num_vectors);
1760 }
1761 break;
1762 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1763 valid_len = virtchnl_rss_key_LEGACY_SIZEOF;
1764 if (msglen >= valid_len) {
1765 struct virtchnl_rss_key *vrk =
1766 (struct virtchnl_rss_key *)msg;
1767 valid_len = virtchnl_struct_size(vrk, key,
1768 vrk->key_len);
1769 }
1770 break;
1771 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1772 valid_len = virtchnl_rss_lut_LEGACY_SIZEOF;
1773 if (msglen >= valid_len) {
1774 struct virtchnl_rss_lut *vrl =
1775 (struct virtchnl_rss_lut *)msg;
1776 valid_len = virtchnl_struct_size(vrl, lut,
1777 vrl->lut_entries);
1778 }
1779 break;
1780 case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
1781 valid_len = sizeof(struct virtchnl_rss_hfunc);
1782 break;
1783 case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
1784 break;
1785 case VIRTCHNL_OP_SET_RSS_HASHCFG:
1786 valid_len = sizeof(struct virtchnl_rss_hashcfg);
1787 break;
1788 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1789 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1790 break;
1791 case VIRTCHNL_OP_REQUEST_QUEUES:
1792 valid_len = sizeof(struct virtchnl_vf_res_request);
1793 break;
1794 case VIRTCHNL_OP_ENABLE_CHANNELS:
1795 valid_len = virtchnl_tc_info_LEGACY_SIZEOF;
1796 if (msglen >= valid_len) {
1797 struct virtchnl_tc_info *vti =
1798 (struct virtchnl_tc_info *)msg;
1799 valid_len = virtchnl_struct_size(vti, list,
1800 vti->num_tc);
1801 if (vti->num_tc == 0)
1802 err_msg_format = true;
1803 }
1804 break;
1805 case VIRTCHNL_OP_DISABLE_CHANNELS:
1806 break;
1807 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
1808 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
1809 valid_len = sizeof(struct virtchnl_filter);
1810 break;
1811 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
1812 break;
1813 case VIRTCHNL_OP_ADD_RSS_CFG:
1814 case VIRTCHNL_OP_DEL_RSS_CFG:
1815 valid_len = sizeof(struct virtchnl_rss_cfg);
1816 break;
1817 case VIRTCHNL_OP_ADD_FDIR_FILTER:
1818 valid_len = sizeof(struct virtchnl_fdir_add);
1819 break;
1820 case VIRTCHNL_OP_DEL_FDIR_FILTER:
1821 valid_len = sizeof(struct virtchnl_fdir_del);
1822 break;
1823 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
1824 break;
1825 case VIRTCHNL_OP_ADD_VLAN_V2:
1826 case VIRTCHNL_OP_DEL_VLAN_V2:
1827 valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF;
1828 if (msglen >= valid_len) {
1829 struct virtchnl_vlan_filter_list_v2 *vfl =
1830 (struct virtchnl_vlan_filter_list_v2 *)msg;
1831
1832 valid_len = virtchnl_struct_size(vfl, filters,
1833 vfl->num_elements);
1834
1835 if (vfl->num_elements == 0) {
1836 err_msg_format = true;
1837 break;
1838 }
1839 }
1840 break;
1841 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1842 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1843 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1844 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1845 valid_len = sizeof(struct virtchnl_vlan_setting);
1846 break;
1847 case VIRTCHNL_OP_GET_QOS_CAPS:
1848 break;
1849 case VIRTCHNL_OP_CONFIG_QUEUE_BW:
1850 valid_len = virtchnl_queues_bw_cfg_LEGACY_SIZEOF;
1851 if (msglen >= valid_len) {
1852 struct virtchnl_queues_bw_cfg *q_bw =
1853 (struct virtchnl_queues_bw_cfg *)msg;
1854
1855 valid_len = virtchnl_struct_size(q_bw, cfg,
1856 q_bw->num_queues);
1857 if (q_bw->num_queues == 0) {
1858 err_msg_format = true;
1859 break;
1860 }
1861 }
1862 break;
1863 case VIRTCHNL_OP_CONFIG_QUANTA:
1864 valid_len = sizeof(struct virtchnl_quanta_cfg);
1865 if (msglen >= valid_len) {
1866 struct virtchnl_quanta_cfg *q_quanta =
1867 (struct virtchnl_quanta_cfg *)msg;
1868
1869 if (q_quanta->quanta_size == 0 ||
1870 q_quanta->queue_select.num_queues == 0) {
1871 err_msg_format = true;
1872 break;
1873 }
1874 }
1875 break;
1876 case VIRTCHNL_OP_1588_PTP_GET_CAPS:
1877 valid_len = sizeof(struct virtchnl_ptp_caps);
1878 break;
1879 case VIRTCHNL_OP_1588_PTP_GET_TIME:
1880 valid_len = sizeof(struct virtchnl_phc_time);
1881 break;
1882 /* These are always errors coming from the VF. */
1883 case VIRTCHNL_OP_EVENT:
1884 case VIRTCHNL_OP_UNKNOWN:
1885 default:
1886 return VIRTCHNL_STATUS_ERR_PARAM;
1887 }
1888 /* few more checks */
1889 if (err_msg_format || valid_len != msglen)
1890 return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
1891
1892 return 0;
1893 }
1894 #endif /* _VIRTCHNL_H_ */
1895