xref: /linux/drivers/net/ethernet/intel/idpf/virtchnl2.h (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #ifndef _VIRTCHNL2_H_
5 #define _VIRTCHNL2_H_
6 
7 #include <linux/if_ether.h>
8 
9 /* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or
10  * VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
11  * and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
12  *
13  * PF/VF uses the virtchnl2 interface defined in this header file to communicate
14  * with device Control Plane (CP). Driver and the CP may run on different
15  * platforms with different endianness. To avoid byte order discrepancies,
16  * all the structures in this header follow little-endian format.
17  *
18  * This is an interface definition file where existing enums and their values
19  * must remain unchanged over time, so we specify explicit values for all enums.
20  */
21 
22 /* This macro is used to generate compilation errors if a structure
23  * is not exactly the correct length.
24  */
25 #define VIRTCHNL2_CHECK_STRUCT_LEN(n, X)	\
26 	static_assert((n) == sizeof(struct X))
27 
28 /* New major set of opcodes introduced and so leaving room for
29  * old misc opcodes to be added in future. Also these opcodes may only
30  * be used if both the PF and VF have successfully negotiated the
31  * VIRTCHNL version as 2.0 during VIRTCHNL2_OP_VERSION exchange.
32  */
33 enum virtchnl2_op {
34 	VIRTCHNL2_OP_UNKNOWN			= 0,
35 	VIRTCHNL2_OP_VERSION			= 1,
36 	VIRTCHNL2_OP_GET_CAPS			= 500,
37 	VIRTCHNL2_OP_CREATE_VPORT		= 501,
38 	VIRTCHNL2_OP_DESTROY_VPORT		= 502,
39 	VIRTCHNL2_OP_ENABLE_VPORT		= 503,
40 	VIRTCHNL2_OP_DISABLE_VPORT		= 504,
41 	VIRTCHNL2_OP_CONFIG_TX_QUEUES		= 505,
42 	VIRTCHNL2_OP_CONFIG_RX_QUEUES		= 506,
43 	VIRTCHNL2_OP_ENABLE_QUEUES		= 507,
44 	VIRTCHNL2_OP_DISABLE_QUEUES		= 508,
45 	VIRTCHNL2_OP_ADD_QUEUES			= 509,
46 	VIRTCHNL2_OP_DEL_QUEUES			= 510,
47 	VIRTCHNL2_OP_MAP_QUEUE_VECTOR		= 511,
48 	VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR		= 512,
49 	VIRTCHNL2_OP_GET_RSS_KEY		= 513,
50 	VIRTCHNL2_OP_SET_RSS_KEY		= 514,
51 	VIRTCHNL2_OP_GET_RSS_LUT		= 515,
52 	VIRTCHNL2_OP_SET_RSS_LUT		= 516,
53 	VIRTCHNL2_OP_GET_RSS_HASH		= 517,
54 	VIRTCHNL2_OP_SET_RSS_HASH		= 518,
55 	VIRTCHNL2_OP_SET_SRIOV_VFS		= 519,
56 	VIRTCHNL2_OP_ALLOC_VECTORS		= 520,
57 	VIRTCHNL2_OP_DEALLOC_VECTORS		= 521,
58 	VIRTCHNL2_OP_EVENT			= 522,
59 	VIRTCHNL2_OP_GET_STATS			= 523,
60 	VIRTCHNL2_OP_RESET_VF			= 524,
61 	VIRTCHNL2_OP_GET_EDT_CAPS		= 525,
62 	VIRTCHNL2_OP_GET_PTYPE_INFO		= 526,
63 	/* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
64 	 * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW.
65 	 */
66 	VIRTCHNL2_OP_RDMA			= 529,
67 	/* Opcodes 530 through 533 are reserved. */
68 	VIRTCHNL2_OP_LOOPBACK			= 534,
69 	VIRTCHNL2_OP_ADD_MAC_ADDR		= 535,
70 	VIRTCHNL2_OP_DEL_MAC_ADDR		= 536,
71 	VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE	= 537,
72 
73 	/* TimeSync opcodes */
74 	VIRTCHNL2_OP_PTP_GET_CAPS			= 541,
75 	VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP		= 542,
76 	VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME		= 543,
77 	VIRTCHNL2_OP_PTP_GET_CROSS_TIME			= 544,
78 	VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME		= 545,
79 	VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE		= 546,
80 	VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME		= 547,
81 	VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS	= 548,
82 	VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS		= 549,
83 	/* Opcode 550 is reserved */
84 	VIRTCHNL2_OP_ADD_FLOW_RULE			= 551,
85 	VIRTCHNL2_OP_GET_FLOW_RULE			= 552,
86 	VIRTCHNL2_OP_DEL_FLOW_RULE			= 553,
87 };
88 
89 /**
90  * enum virtchnl2_vport_type - Type of virtual port.
91  * @VIRTCHNL2_VPORT_TYPE_DEFAULT: Default virtual port type.
92  */
93 enum virtchnl2_vport_type {
94 	VIRTCHNL2_VPORT_TYPE_DEFAULT		= 0,
95 };
96 
97 /**
98  * enum virtchnl2_queue_model - Type of queue model.
99  * @VIRTCHNL2_QUEUE_MODEL_SINGLE: Single queue model.
100  * @VIRTCHNL2_QUEUE_MODEL_SPLIT: Split queue model.
101  *
102  * In the single queue model, the same transmit descriptor queue is used by
103  * software to post descriptors to hardware and by hardware to post completed
104  * descriptors to software.
105  * Likewise, the same receive descriptor queue is used by hardware to post
106  * completions to software and by software to post buffers to hardware.
107  *
108  * In the split queue model, hardware uses transmit completion queues to post
109  * descriptor/buffer completions to software, while software uses transmit
110  * descriptor queues to post descriptors to hardware.
111  * Likewise, hardware posts descriptor completions to the receive descriptor
112  * queue, while software uses receive buffer queues to post buffers to hardware.
113  */
114 enum virtchnl2_queue_model {
115 	VIRTCHNL2_QUEUE_MODEL_SINGLE		= 0,
116 	VIRTCHNL2_QUEUE_MODEL_SPLIT		= 1,
117 };
118 
119 /* Checksum offload capability flags */
120 enum virtchnl2_cap_txrx_csum {
121 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4		= BIT(0),
122 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	= BIT(1),
123 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	= BIT(2),
124 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	= BIT(3),
125 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	= BIT(4),
126 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	= BIT(5),
127 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	= BIT(6),
128 	VIRTCHNL2_CAP_TX_CSUM_GENERIC		= BIT(7),
129 	VIRTCHNL2_CAP_RX_CSUM_L3_IPV4		= BIT(8),
130 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	= BIT(9),
131 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	= BIT(10),
132 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	= BIT(11),
133 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	= BIT(12),
134 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	= BIT(13),
135 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	= BIT(14),
136 	VIRTCHNL2_CAP_RX_CSUM_GENERIC		= BIT(15),
137 	VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL	= BIT(16),
138 	VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL	= BIT(17),
139 	VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL	= BIT(18),
140 	VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL	= BIT(19),
141 	VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL	= BIT(20),
142 	VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL	= BIT(21),
143 	VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL	= BIT(22),
144 	VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL	= BIT(23),
145 };
146 
147 /* Segmentation offload capability flags */
148 enum virtchnl2_cap_seg {
149 	VIRTCHNL2_CAP_SEG_IPV4_TCP		= BIT(0),
150 	VIRTCHNL2_CAP_SEG_IPV4_UDP		= BIT(1),
151 	VIRTCHNL2_CAP_SEG_IPV4_SCTP		= BIT(2),
152 	VIRTCHNL2_CAP_SEG_IPV6_TCP		= BIT(3),
153 	VIRTCHNL2_CAP_SEG_IPV6_UDP		= BIT(4),
154 	VIRTCHNL2_CAP_SEG_IPV6_SCTP		= BIT(5),
155 	VIRTCHNL2_CAP_SEG_GENERIC		= BIT(6),
156 	VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL	= BIT(7),
157 	VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL	= BIT(8),
158 };
159 
160 /* Receive Side Scaling and Flow Steering Flow type capability flags */
161 enum virtchnl2_flow_types {
162 	VIRTCHNL2_FLOW_IPV4_TCP		= BIT(0),
163 	VIRTCHNL2_FLOW_IPV4_UDP		= BIT(1),
164 	VIRTCHNL2_FLOW_IPV4_SCTP	= BIT(2),
165 	VIRTCHNL2_FLOW_IPV4_OTHER	= BIT(3),
166 	VIRTCHNL2_FLOW_IPV6_TCP		= BIT(4),
167 	VIRTCHNL2_FLOW_IPV6_UDP		= BIT(5),
168 	VIRTCHNL2_FLOW_IPV6_SCTP	= BIT(6),
169 	VIRTCHNL2_FLOW_IPV6_OTHER	= BIT(7),
170 	VIRTCHNL2_FLOW_IPV4_AH		= BIT(8),
171 	VIRTCHNL2_FLOW_IPV4_ESP		= BIT(9),
172 	VIRTCHNL2_FLOW_IPV4_AH_ESP	= BIT(10),
173 	VIRTCHNL2_FLOW_IPV6_AH		= BIT(11),
174 	VIRTCHNL2_FLOW_IPV6_ESP		= BIT(12),
175 	VIRTCHNL2_FLOW_IPV6_AH_ESP	= BIT(13),
176 };
177 
178 /* Header split capability flags */
179 enum virtchnl2_cap_rx_hsplit_at {
180 	/* for prepended metadata  */
181 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L2		= BIT(0),
182 	/* all VLANs go into header buffer */
183 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L3		= BIT(1),
184 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4		= BIT(2),
185 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6		= BIT(3),
186 };
187 
188 /* Receive Side Coalescing offload capability flags */
189 enum virtchnl2_cap_rsc {
190 	VIRTCHNL2_CAP_RSC_IPV4_TCP		= BIT(0),
191 	VIRTCHNL2_CAP_RSC_IPV4_SCTP		= BIT(1),
192 	VIRTCHNL2_CAP_RSC_IPV6_TCP		= BIT(2),
193 	VIRTCHNL2_CAP_RSC_IPV6_SCTP		= BIT(3),
194 };
195 
196 /* Other capability flags */
197 enum virtchnl2_cap_other {
198 	VIRTCHNL2_CAP_RDMA			= BIT_ULL(0),
199 	VIRTCHNL2_CAP_SRIOV			= BIT_ULL(1),
200 	VIRTCHNL2_CAP_MACFILTER			= BIT_ULL(2),
201 	/* Other capability 3 is available
202 	 * Queue based scheduling using split queue model
203 	 */
204 	VIRTCHNL2_CAP_SPLITQ_QSCHED		= BIT_ULL(4),
205 	VIRTCHNL2_CAP_CRC			= BIT_ULL(5),
206 	VIRTCHNL2_CAP_ADQ			= BIT_ULL(6),
207 	VIRTCHNL2_CAP_WB_ON_ITR			= BIT_ULL(7),
208 	VIRTCHNL2_CAP_PROMISC			= BIT_ULL(8),
209 	VIRTCHNL2_CAP_LINK_SPEED		= BIT_ULL(9),
210 	VIRTCHNL2_CAP_INLINE_IPSEC		= BIT_ULL(10),
211 	VIRTCHNL2_CAP_LARGE_NUM_QUEUES		= BIT_ULL(11),
212 	VIRTCHNL2_CAP_VLAN			= BIT_ULL(12),
213 	VIRTCHNL2_CAP_PTP			= BIT_ULL(13),
214 	/* EDT: Earliest Departure Time capability used for Timing Wheel */
215 	VIRTCHNL2_CAP_EDT			= BIT_ULL(14),
216 	VIRTCHNL2_CAP_ADV_RSS			= BIT_ULL(15),
217 	/* Other capability 16 is available */
218 	VIRTCHNL2_CAP_RX_FLEX_DESC		= BIT_ULL(17),
219 	VIRTCHNL2_CAP_PTYPE			= BIT_ULL(18),
220 	VIRTCHNL2_CAP_LOOPBACK			= BIT_ULL(19),
221 	/* Other capability 20 is reserved */
222 	VIRTCHNL2_CAP_FLOW_STEER		= BIT_ULL(21),
223 	VIRTCHNL2_CAP_LAN_MEMORY_REGIONS	= BIT_ULL(22),
224 
225 	/* this must be the last capability */
226 	VIRTCHNL2_CAP_OEM			= BIT_ULL(63),
227 };
228 
229 /**
230  * enum virtchnl2_action_types - Available actions for sideband flow steering
231  * @VIRTCHNL2_ACTION_DROP: Drop the packet
232  * @VIRTCHNL2_ACTION_PASSTHRU: Forward the packet to the next classifier/stage
233  * @VIRTCHNL2_ACTION_QUEUE: Forward the packet to a receive queue
234  * @VIRTCHNL2_ACTION_Q_GROUP: Forward the packet to a receive queue group
235  * @VIRTCHNL2_ACTION_MARK: Mark the packet with specific marker value
236  * @VIRTCHNL2_ACTION_COUNT: Increment the corresponding counter
237  */
238 
239 enum virtchnl2_action_types {
240 	VIRTCHNL2_ACTION_DROP		= BIT(0),
241 	VIRTCHNL2_ACTION_PASSTHRU	= BIT(1),
242 	VIRTCHNL2_ACTION_QUEUE		= BIT(2),
243 	VIRTCHNL2_ACTION_Q_GROUP	= BIT(3),
244 	VIRTCHNL2_ACTION_MARK		= BIT(4),
245 	VIRTCHNL2_ACTION_COUNT		= BIT(5),
246 };
247 
248 /* underlying device type */
249 enum virtchl2_device_type {
250 	VIRTCHNL2_MEV_DEVICE			= 0,
251 };
252 
253 /**
254  * enum virtchnl2_txq_sched_mode - Transmit Queue Scheduling Modes.
255  * @VIRTCHNL2_TXQ_SCHED_MODE_QUEUE: Queue mode is the legacy mode i.e. inorder
256  *				    completions where descriptors and buffers
257  *				    are completed at the same time.
258  * @VIRTCHNL2_TXQ_SCHED_MODE_FLOW: Flow scheduling mode allows for out of order
259  *				   packet processing where descriptors are
260  *				   cleaned in order, but buffers can be
261  *				   completed out of order.
262  */
263 enum virtchnl2_txq_sched_mode {
264 	VIRTCHNL2_TXQ_SCHED_MODE_QUEUE		= 0,
265 	VIRTCHNL2_TXQ_SCHED_MODE_FLOW		= 1,
266 };
267 
268 /**
269  * enum virtchnl2_rxq_flags - Receive Queue Feature flags.
270  * @VIRTCHNL2_RXQ_RSC: Rx queue RSC flag.
271  * @VIRTCHNL2_RXQ_HDR_SPLIT: Rx queue header split flag.
272  * @VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK: When set, packet descriptors are flushed
273  *					by hardware immediately after processing
274  *					each packet.
275  * @VIRTCHNL2_RX_DESC_SIZE_16BYTE: Rx queue 16 byte descriptor size.
276  * @VIRTCHNL2_RX_DESC_SIZE_32BYTE: Rx queue 32 byte descriptor size.
277  */
278 enum virtchnl2_rxq_flags {
279 	VIRTCHNL2_RXQ_RSC			= BIT(0),
280 	VIRTCHNL2_RXQ_HDR_SPLIT			= BIT(1),
281 	VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK	= BIT(2),
282 	VIRTCHNL2_RX_DESC_SIZE_16BYTE		= BIT(3),
283 	VIRTCHNL2_RX_DESC_SIZE_32BYTE		= BIT(4),
284 };
285 
286 /* Type of RSS algorithm */
287 enum virtchnl2_rss_alg {
288 	VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC	= 0,
289 	VIRTCHNL2_RSS_ALG_R_ASYMMETRIC		= 1,
290 	VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC	= 2,
291 	VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC		= 3,
292 };
293 
294 /* Type of event */
295 enum virtchnl2_event_codes {
296 	VIRTCHNL2_EVENT_UNKNOWN			= 0,
297 	VIRTCHNL2_EVENT_LINK_CHANGE		= 1,
298 	/* Event type 2, 3 are reserved */
299 };
300 
301 /* Transmit and Receive queue types are valid in legacy as well as split queue
302  * models. With Split Queue model, 2 additional types are introduced -
303  * TX_COMPLETION and RX_BUFFER. In split queue model, receive  corresponds to
304  * the queue where hardware posts completions.
305  */
306 enum virtchnl2_queue_type {
307 	VIRTCHNL2_QUEUE_TYPE_TX			= 0,
308 	VIRTCHNL2_QUEUE_TYPE_RX			= 1,
309 	VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION	= 2,
310 	VIRTCHNL2_QUEUE_TYPE_RX_BUFFER		= 3,
311 	VIRTCHNL2_QUEUE_TYPE_CONFIG_TX		= 4,
312 	VIRTCHNL2_QUEUE_TYPE_CONFIG_RX		= 5,
313 	/* Queue types 6, 7, 8, 9 are reserved */
314 	VIRTCHNL2_QUEUE_TYPE_MBX_TX		= 10,
315 	VIRTCHNL2_QUEUE_TYPE_MBX_RX		= 11,
316 };
317 
318 /* Interrupt throttling rate index */
319 enum virtchnl2_itr_idx {
320 	VIRTCHNL2_ITR_IDX_0			= 0,
321 	VIRTCHNL2_ITR_IDX_1			= 1,
322 };
323 
324 /**
325  * enum virtchnl2_mac_addr_type - MAC address types.
326  * @VIRTCHNL2_MAC_ADDR_PRIMARY: PF/VF driver should set this type for the
327  *				primary/device unicast MAC address filter for
328  *				VIRTCHNL2_OP_ADD_MAC_ADDR and
329  *				VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the
330  *				underlying control plane function to accurately
331  *				track the MAC address and for VM/function reset.
332  *
333  * @VIRTCHNL2_MAC_ADDR_EXTRA: PF/VF driver should set this type for any extra
334  *			      unicast and/or multicast filters that are being
335  *			      added/deleted via VIRTCHNL2_OP_ADD_MAC_ADDR or
336  *			      VIRTCHNL2_OP_DEL_MAC_ADDR.
337  */
338 enum virtchnl2_mac_addr_type {
339 	VIRTCHNL2_MAC_ADDR_PRIMARY		= 1,
340 	VIRTCHNL2_MAC_ADDR_EXTRA		= 2,
341 };
342 
343 /* Flags used for promiscuous mode */
344 enum virtchnl2_promisc_flags {
345 	VIRTCHNL2_UNICAST_PROMISC		= BIT(0),
346 	VIRTCHNL2_MULTICAST_PROMISC		= BIT(1),
347 };
348 
349 /* Protocol header type within a packet segment. A segment consists of one or
350  * more protocol headers that make up a logical group of protocol headers. Each
351  * logical group of protocol headers encapsulates or is encapsulated using/by
352  * tunneling or encapsulation protocols for network virtualization.
353  */
354 enum virtchnl2_proto_hdr_type {
355 	/* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */
356 	VIRTCHNL2_PROTO_HDR_ANY			= 0,
357 	VIRTCHNL2_PROTO_HDR_PRE_MAC		= 1,
358 	/* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */
359 	VIRTCHNL2_PROTO_HDR_MAC			= 2,
360 	VIRTCHNL2_PROTO_HDR_POST_MAC		= 3,
361 	VIRTCHNL2_PROTO_HDR_ETHERTYPE		= 4,
362 	VIRTCHNL2_PROTO_HDR_VLAN		= 5,
363 	VIRTCHNL2_PROTO_HDR_SVLAN		= 6,
364 	VIRTCHNL2_PROTO_HDR_CVLAN		= 7,
365 	VIRTCHNL2_PROTO_HDR_MPLS		= 8,
366 	VIRTCHNL2_PROTO_HDR_UMPLS		= 9,
367 	VIRTCHNL2_PROTO_HDR_MMPLS		= 10,
368 	VIRTCHNL2_PROTO_HDR_PTP			= 11,
369 	VIRTCHNL2_PROTO_HDR_CTRL		= 12,
370 	VIRTCHNL2_PROTO_HDR_LLDP		= 13,
371 	VIRTCHNL2_PROTO_HDR_ARP			= 14,
372 	VIRTCHNL2_PROTO_HDR_ECP			= 15,
373 	VIRTCHNL2_PROTO_HDR_EAPOL		= 16,
374 	VIRTCHNL2_PROTO_HDR_PPPOD		= 17,
375 	VIRTCHNL2_PROTO_HDR_PPPOE		= 18,
376 	/* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */
377 	VIRTCHNL2_PROTO_HDR_IPV4		= 19,
378 	/* IPv4 and IPv6 Fragment header types are only associated to
379 	 * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively,
380 	 * cannot be used independently.
381 	 */
382 	/* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */
383 	VIRTCHNL2_PROTO_HDR_IPV4_FRAG		= 20,
384 	/* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */
385 	VIRTCHNL2_PROTO_HDR_IPV6		= 21,
386 	/* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */
387 	VIRTCHNL2_PROTO_HDR_IPV6_FRAG		= 22,
388 	VIRTCHNL2_PROTO_HDR_IPV6_EH		= 23,
389 	/* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */
390 	VIRTCHNL2_PROTO_HDR_UDP			= 24,
391 	/* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */
392 	VIRTCHNL2_PROTO_HDR_TCP			= 25,
393 	/* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */
394 	VIRTCHNL2_PROTO_HDR_SCTP		= 26,
395 	/* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */
396 	VIRTCHNL2_PROTO_HDR_ICMP		= 27,
397 	/* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */
398 	VIRTCHNL2_PROTO_HDR_ICMPV6		= 28,
399 	VIRTCHNL2_PROTO_HDR_IGMP		= 29,
400 	VIRTCHNL2_PROTO_HDR_AH			= 30,
401 	VIRTCHNL2_PROTO_HDR_ESP			= 31,
402 	VIRTCHNL2_PROTO_HDR_IKE			= 32,
403 	VIRTCHNL2_PROTO_HDR_NATT_KEEP		= 33,
404 	/* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */
405 	VIRTCHNL2_PROTO_HDR_PAY			= 34,
406 	VIRTCHNL2_PROTO_HDR_L2TPV2		= 35,
407 	VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL	= 36,
408 	VIRTCHNL2_PROTO_HDR_L2TPV3		= 37,
409 	VIRTCHNL2_PROTO_HDR_GTP			= 38,
410 	VIRTCHNL2_PROTO_HDR_GTP_EH		= 39,
411 	VIRTCHNL2_PROTO_HDR_GTPCV2		= 40,
412 	VIRTCHNL2_PROTO_HDR_GTPC_TEID		= 41,
413 	VIRTCHNL2_PROTO_HDR_GTPU		= 42,
414 	VIRTCHNL2_PROTO_HDR_GTPU_UL		= 43,
415 	VIRTCHNL2_PROTO_HDR_GTPU_DL		= 44,
416 	VIRTCHNL2_PROTO_HDR_ECPRI		= 45,
417 	VIRTCHNL2_PROTO_HDR_VRRP		= 46,
418 	VIRTCHNL2_PROTO_HDR_OSPF		= 47,
419 	/* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */
420 	VIRTCHNL2_PROTO_HDR_TUN			= 48,
421 	VIRTCHNL2_PROTO_HDR_GRE			= 49,
422 	VIRTCHNL2_PROTO_HDR_NVGRE		= 50,
423 	VIRTCHNL2_PROTO_HDR_VXLAN		= 51,
424 	VIRTCHNL2_PROTO_HDR_VXLAN_GPE		= 52,
425 	VIRTCHNL2_PROTO_HDR_GENEVE		= 53,
426 	VIRTCHNL2_PROTO_HDR_NSH			= 54,
427 	VIRTCHNL2_PROTO_HDR_QUIC		= 55,
428 	VIRTCHNL2_PROTO_HDR_PFCP		= 56,
429 	VIRTCHNL2_PROTO_HDR_PFCP_NODE		= 57,
430 	VIRTCHNL2_PROTO_HDR_PFCP_SESSION	= 58,
431 	VIRTCHNL2_PROTO_HDR_RTP			= 59,
432 	VIRTCHNL2_PROTO_HDR_ROCE		= 60,
433 	VIRTCHNL2_PROTO_HDR_ROCEV1		= 61,
434 	VIRTCHNL2_PROTO_HDR_ROCEV2		= 62,
435 	/* Protocol ids up to 32767 are reserved.
436 	 * 32768 - 65534 are used for user defined protocol ids.
437 	 * VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id.
438 	 */
439 	VIRTCHNL2_PROTO_HDR_NO_PROTO		= 65535,
440 };
441 
442 enum virtchl2_version {
443 	VIRTCHNL2_VERSION_MINOR_0		= 0,
444 	VIRTCHNL2_VERSION_MAJOR_2		= 2,
445 };
446 
447 /**
448  * struct virtchnl2_edt_caps - Get EDT granularity and time horizon.
449  * @tstamp_granularity_ns: Timestamp granularity in nanoseconds.
450  * @time_horizon_ns: Total time window in nanoseconds.
451  *
452  * Associated with VIRTCHNL2_OP_GET_EDT_CAPS.
453  */
454 struct virtchnl2_edt_caps {
455 	__le64 tstamp_granularity_ns;
456 	__le64 time_horizon_ns;
457 };
458 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_edt_caps);
459 
460 /**
461  * struct virtchnl2_version_info - Version information.
462  * @major: Major version.
463  * @minor: Minor version.
464  *
465  * PF/VF posts its version number to the CP. CP responds with its version number
466  * in the same format, along with a return code.
467  * If there is a major version mismatch, then the PF/VF cannot operate.
468  * If there is a minor version mismatch, then the PF/VF can operate but should
469  * add a warning to the system log.
470  *
471  * This version opcode MUST always be specified as == 1, regardless of other
472  * changes in the API. The CP must always respond to this message without
473  * error regardless of version mismatch.
474  *
475  * Associated with VIRTCHNL2_OP_VERSION.
476  */
477 struct virtchnl2_version_info {
478 	__le32 major;
479 	__le32 minor;
480 };
481 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
482 
483 /**
484  * struct virtchnl2_get_capabilities - Capabilities info.
485  * @csum_caps: See enum virtchnl2_cap_txrx_csum.
486  * @seg_caps: See enum virtchnl2_cap_seg.
487  * @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at.
488  * @rsc_caps: See enum virtchnl2_cap_rsc.
489  * @rss_caps: See enum virtchnl2_flow_types.
490  * @other_caps: See enum virtchnl2_cap_other.
491  * @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
492  *		     provided by CP.
493  * @mailbox_vector_id: Mailbox vector id.
494  * @num_allocated_vectors: Maximum number of allocated vectors for the device.
495  * @max_rx_q: Maximum number of supported Rx queues.
496  * @max_tx_q: Maximum number of supported Tx queues.
497  * @max_rx_bufq: Maximum number of supported buffer queues.
498  * @max_tx_complq: Maximum number of supported completion queues.
499  * @max_sriov_vfs: The PF sends the maximum VFs it is requesting. The CP
500  *		   responds with the maximum VFs granted.
501  * @max_vports: Maximum number of vports that can be supported.
502  * @default_num_vports: Default number of vports driver should allocate on load.
503  * @max_tx_hdr_size: Max header length hardware can parse/checksum, in bytes.
504  * @max_sg_bufs_per_tx_pkt: Max number of scatter gather buffers that can be
505  *			    sent per transmit packet without needing to be
506  *			    linearized.
507  * @pad: Padding.
508  * @reserved: Reserved.
509  * @device_type: See enum virtchl2_device_type.
510  * @min_sso_packet_len: Min packet length supported by device for single
511  *			segment offload.
512  * @max_hdr_buf_per_lso: Max number of header buffers that can be used for
513  *			 an LSO.
514  * @num_rdma_allocated_vectors: Maximum number of allocated RDMA vectors for
515  *				the device.
516  * @pad1: Padding for future extensions.
517  *
518  * Dataplane driver sends this message to CP to negotiate capabilities and
519  * provides a virtchnl2_get_capabilities structure with its desired
520  * capabilities, max_sriov_vfs and num_allocated_vectors.
521  * CP responds with a virtchnl2_get_capabilities structure updated
522  * with allowed capabilities and the other fields as below.
523  * If PF sets max_sriov_vfs as 0, CP will respond with max number of VFs
524  * that can be created by this PF. For any other value 'n', CP responds
525  * with max_sriov_vfs set to min(n, x) where x is the max number of VFs
526  * allowed by CP's policy. max_sriov_vfs is not applicable for VFs.
527  * If dataplane driver sets num_allocated_vectors as 0, CP will respond with 1
528  * which is default vector associated with the default mailbox. For any other
529  * value 'n', CP responds with a value <= n based on the CP's policy of
530  * max number of vectors for a PF.
531  * CP will respond with the vector ID of mailbox allocated to the PF in
532  * mailbox_vector_id and the number of itr index registers in itr_idx_map.
533  * It also responds with default number of vports that the dataplane driver
534  * should comeup with in default_num_vports and maximum number of vports that
535  * can be supported in max_vports.
536  *
537  * Associated with VIRTCHNL2_OP_GET_CAPS.
538  */
539 struct virtchnl2_get_capabilities {
540 	__le32 csum_caps;
541 	__le32 seg_caps;
542 	__le32 hsplit_caps;
543 	__le32 rsc_caps;
544 	__le64 rss_caps;
545 	__le64 other_caps;
546 	__le32 mailbox_dyn_ctl;
547 	__le16 mailbox_vector_id;
548 	__le16 num_allocated_vectors;
549 	__le16 max_rx_q;
550 	__le16 max_tx_q;
551 	__le16 max_rx_bufq;
552 	__le16 max_tx_complq;
553 	__le16 max_sriov_vfs;
554 	__le16 max_vports;
555 	__le16 default_num_vports;
556 	__le16 max_tx_hdr_size;
557 	u8 max_sg_bufs_per_tx_pkt;
558 	u8 pad[3];
559 	u8 reserved[4];
560 	__le32 device_type;
561 	u8 min_sso_packet_len;
562 	u8 max_hdr_buf_per_lso;
563 	__le16 num_rdma_allocated_vectors;
564 	u8 pad1[8];
565 };
566 VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
567 
568 /**
569  * struct virtchnl2_queue_reg_chunk - Single queue chunk.
570  * @type: See enum virtchnl2_queue_type.
571  * @start_queue_id: Start Queue ID.
572  * @num_queues: Number of queues in the chunk.
573  * @pad: Padding.
574  * @qtail_reg_start: Queue tail register offset.
575  * @qtail_reg_spacing: Queue tail register spacing.
576  * @pad1: Padding for future extensions.
577  */
578 struct virtchnl2_queue_reg_chunk {
579 	__le32 type;
580 	__le32 start_queue_id;
581 	__le32 num_queues;
582 	__le32 pad;
583 	__le64 qtail_reg_start;
584 	__le32 qtail_reg_spacing;
585 	u8 pad1[4];
586 };
587 VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
588 
589 /**
590  * struct virtchnl2_queue_reg_chunks - Specify several chunks of contiguous
591  *				       queues.
592  * @num_chunks: Number of chunks.
593  * @pad: Padding.
594  * @chunks: Chunks of queue info.
595  */
596 struct virtchnl2_queue_reg_chunks {
597 	__le16 num_chunks;
598 	u8 pad[6];
599 	struct virtchnl2_queue_reg_chunk chunks[] __counted_by_le(num_chunks);
600 };
601 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
602 
603 /**
604  * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities.
605  * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports
606  * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER: Inline flow steering enabled
607  * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ: Inline flow steering enabled
608  *  with explicit Rx queue action
609  * @VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER: Sideband flow steering enabled
610  * @VIRTCHNL2_VPORT_ENABLE_RDMA: RDMA is enabled for this vport
611  */
612 enum virtchnl2_vport_flags {
613 	VIRTCHNL2_VPORT_UPLINK_PORT		= BIT(0),
614 	VIRTCHNL2_VPORT_INLINE_FLOW_STEER	= BIT(1),
615 	VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ	= BIT(2),
616 	VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER	= BIT(3),
617 	VIRTCHNL2_VPORT_ENABLE_RDMA             = BIT(4),
618 };
619 
620 /**
621  * struct virtchnl2_create_vport - Create vport config info.
622  * @vport_type: See enum virtchnl2_vport_type.
623  * @txq_model: See virtchnl2_queue_model.
624  * @rxq_model: See virtchnl2_queue_model.
625  * @num_tx_q: Number of Tx queues.
626  * @num_tx_complq: Valid only if txq_model is split queue.
627  * @num_rx_q: Number of Rx queues.
628  * @num_rx_bufq: Valid only if rxq_model is split queue.
629  * @default_rx_q: Relative receive queue index to be used as default.
630  * @vport_index: Used to align PF and CP in case of default multiple vports,
631  *		 it is filled by the PF and CP returns the same value, to
632  *		 enable the driver to support multiple asynchronous parallel
633  *		 CREATE_VPORT requests and associate a response to a specific
634  *		 request.
635  * @max_mtu: Max MTU. CP populates this field on response.
636  * @vport_id: Vport id. CP populates this field on response.
637  * @default_mac_addr: Default MAC address.
638  * @vport_flags: See enum virtchnl2_vport_flags.
639  * @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
640  * @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
641  * @pad1: Padding.
642  * @inline_flow_caps: Bit mask of supported inline-flow-steering
643  *  flow types (See enum virtchnl2_flow_types)
644  * @sideband_flow_caps: Bit mask of supported sideband-flow-steering
645  *  flow types (See enum virtchnl2_flow_types)
646  * @sideband_flow_actions: Bit mask of supported action types
647  *  for sideband flow steering (See enum virtchnl2_action_types)
648  * @flow_steer_max_rules: Max rules allowed for inline and sideband
649  *  flow steering combined
650  * @rss_algorithm: RSS algorithm.
651  * @rss_key_size: RSS key size.
652  * @rss_lut_size: RSS LUT size.
653  * @rx_split_pos: See enum virtchnl2_cap_rx_hsplit_at.
654  * @pad2: Padding.
655  * @chunks: Chunks of contiguous queues.
656  *
657  * PF sends this message to CP to create a vport by filling in required
658  * fields of virtchnl2_create_vport structure.
659  * CP responds with the updated virtchnl2_create_vport structure containing the
660  * necessary fields followed by chunks which in turn will have an array of
661  * num_chunks entries of virtchnl2_queue_chunk structures.
662  *
663  * Associated with VIRTCHNL2_OP_CREATE_VPORT.
664  */
665 struct virtchnl2_create_vport {
666 	__le16 vport_type;
667 	__le16 txq_model;
668 	__le16 rxq_model;
669 	__le16 num_tx_q;
670 	__le16 num_tx_complq;
671 	__le16 num_rx_q;
672 	__le16 num_rx_bufq;
673 	__le16 default_rx_q;
674 	__le16 vport_index;
675 	/* CP populates the following fields on response */
676 	__le16 max_mtu;
677 	__le32 vport_id;
678 	u8 default_mac_addr[ETH_ALEN];
679 	__le16 vport_flags;
680 	__le64 rx_desc_ids;
681 	__le64 tx_desc_ids;
682 	u8 pad1[48];
683 	__le64 inline_flow_caps;
684 	__le64 sideband_flow_caps;
685 	__le32 sideband_flow_actions;
686 	__le32 flow_steer_max_rules;
687 	__le32 rss_algorithm;
688 	__le16 rss_key_size;
689 	__le16 rss_lut_size;
690 	__le32 rx_split_pos;
691 	u8 pad2[20];
692 	struct virtchnl2_queue_reg_chunks chunks;
693 };
694 VIRTCHNL2_CHECK_STRUCT_LEN(160, virtchnl2_create_vport);
695 
696 /**
697  * struct virtchnl2_vport - Vport ID info.
698  * @vport_id: Vport id.
699  * @pad: Padding for future extensions.
700  *
701  * PF sends this message to CP to destroy, enable or disable a vport by filling
702  * in the vport_id in virtchnl2_vport structure.
703  * CP responds with the status of the requested operation.
704  *
705  * Associated with VIRTCHNL2_OP_DESTROY_VPORT, VIRTCHNL2_OP_ENABLE_VPORT,
706  * VIRTCHNL2_OP_DISABLE_VPORT.
707  */
708 struct virtchnl2_vport {
709 	__le32 vport_id;
710 	u8 pad[4];
711 };
712 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_vport);
713 
714 /**
715  * struct virtchnl2_txq_info - Transmit queue config info
716  * @dma_ring_addr: DMA address.
717  * @type: See enum virtchnl2_queue_type.
718  * @queue_id: Queue ID.
719  * @relative_queue_id: Valid only if queue model is split and type is transmit
720  *		       queue. Used in many to one mapping of transmit queues to
721  *		       completion queue.
722  * @model: See enum virtchnl2_queue_model.
723  * @sched_mode: See enum virtchnl2_txq_sched_mode.
724  * @qflags: TX queue feature flags.
725  * @ring_len: Ring length.
726  * @tx_compl_queue_id: Valid only if queue model is split and type is transmit
727  *		       queue.
728  * @peer_type: Valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX
729  * @peer_rx_queue_id: Valid only if queue type is CONFIG_TX and used to deliver
730  *		      messages for the respective CONFIG_TX queue.
731  * @pad: Padding.
732  * @egress_pasid: Egress PASID info.
733  * @egress_hdr_pasid: Egress HDR passid.
734  * @egress_buf_pasid: Egress buf passid.
735  * @pad1: Padding for future extensions.
736  */
737 struct virtchnl2_txq_info {
738 	__le64 dma_ring_addr;
739 	__le32 type;
740 	__le32 queue_id;
741 	__le16 relative_queue_id;
742 	__le16 model;
743 	__le16 sched_mode;
744 	__le16 qflags;
745 	__le16 ring_len;
746 	__le16 tx_compl_queue_id;
747 	__le16 peer_type;
748 	__le16 peer_rx_queue_id;
749 	u8 pad[4];
750 	__le32 egress_pasid;
751 	__le32 egress_hdr_pasid;
752 	__le32 egress_buf_pasid;
753 	u8 pad1[8];
754 };
755 VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_txq_info);
756 
757 /**
758  * struct virtchnl2_config_tx_queues - TX queue config.
759  * @vport_id: Vport id.
760  * @num_qinfo: Number of virtchnl2_txq_info structs.
761  * @pad: Padding.
762  * @qinfo: Tx queues config info.
763  *
764  * PF sends this message to set up parameters for one or more transmit queues.
765  * This message contains an array of num_qinfo instances of virtchnl2_txq_info
766  * structures. CP configures requested queues and returns a status code. If
767  * num_qinfo specified is greater than the number of queues associated with the
768  * vport, an error is returned and no queues are configured.
769  *
770  * Associated with VIRTCHNL2_OP_CONFIG_TX_QUEUES.
771  */
772 struct virtchnl2_config_tx_queues {
773 	__le32 vport_id;
774 	__le16 num_qinfo;
775 	u8 pad[10];
776 	struct virtchnl2_txq_info qinfo[] __counted_by_le(num_qinfo);
777 };
778 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues);
779 
780 /**
781  * struct virtchnl2_rxq_info - Receive queue config info.
782  * @desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
783  * @dma_ring_addr: See VIRTCHNL2_RX_DESC_IDS definitions.
784  * @type: See enum virtchnl2_queue_type.
785  * @queue_id: Queue id.
786  * @model: See enum virtchnl2_queue_model.
787  * @hdr_buffer_size: Header buffer size.
788  * @data_buffer_size: Data buffer size.
789  * @max_pkt_size: Max packet size.
790  * @ring_len: Ring length.
791  * @buffer_notif_stride: Buffer notification stride in units of 32-descriptors.
792  *			 This field must be a power of 2.
793  * @pad: Padding.
794  * @dma_head_wb_addr: Applicable only for receive buffer queues.
795  * @qflags: Applicable only for receive completion queues.
796  *	    See enum virtchnl2_rxq_flags.
797  * @rx_buffer_low_watermark: Rx buffer low watermark.
798  * @rx_bufq1_id: Buffer queue index of the first buffer queue associated with
799  *		 the Rx queue. Valid only in split queue model.
800  * @rx_bufq2_id: Buffer queue index of the second buffer queue associated with
801  *		 the Rx queue. Valid only in split queue model.
802  * @bufq2_ena: It indicates if there is a second buffer, rx_bufq2_id is valid
803  *	       only if this field is set.
804  * @pad1: Padding.
805  * @ingress_pasid: Ingress PASID.
806  * @ingress_hdr_pasid: Ingress PASID header.
807  * @ingress_buf_pasid: Ingress PASID buffer.
808  * @pad2: Padding for future extensions.
809  */
810 struct virtchnl2_rxq_info {
811 	__le64 desc_ids;
812 	__le64 dma_ring_addr;
813 	__le32 type;
814 	__le32 queue_id;
815 	__le16 model;
816 	__le16 hdr_buffer_size;
817 	__le32 data_buffer_size;
818 	__le32 max_pkt_size;
819 	__le16 ring_len;
820 	u8 buffer_notif_stride;
821 	u8 pad;
822 	__le64 dma_head_wb_addr;
823 	__le16 qflags;
824 	__le16 rx_buffer_low_watermark;
825 	__le16 rx_bufq1_id;
826 	__le16 rx_bufq2_id;
827 	u8 bufq2_ena;
828 	u8 pad1[3];
829 	__le32 ingress_pasid;
830 	__le32 ingress_hdr_pasid;
831 	__le32 ingress_buf_pasid;
832 	u8 pad2[16];
833 };
834 VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_rxq_info);
835 
836 /**
837  * struct virtchnl2_config_rx_queues - Rx queues config.
838  * @vport_id: Vport id.
839  * @num_qinfo: Number of instances.
840  * @pad: Padding.
841  * @qinfo: Rx queues config info.
842  *
843  * PF sends this message to set up parameters for one or more receive queues.
844  * This message contains an array of num_qinfo instances of virtchnl2_rxq_info
845  * structures. CP configures requested queues and returns a status code.
846  * If the number of queues specified is greater than the number of queues
847  * associated with the vport, an error is returned and no queues are configured.
848  *
849  * Associated with VIRTCHNL2_OP_CONFIG_RX_QUEUES.
850  */
851 struct virtchnl2_config_rx_queues {
852 	__le32 vport_id;
853 	__le16 num_qinfo;
854 	u8 pad[18];
855 	struct virtchnl2_rxq_info qinfo[] __counted_by_le(num_qinfo);
856 };
857 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues);
858 
859 /**
860  * struct virtchnl2_add_queues - data for VIRTCHNL2_OP_ADD_QUEUES.
861  * @vport_id: Vport id.
862  * @num_tx_q: Number of Tx qieues.
863  * @num_tx_complq: Number of Tx completion queues.
864  * @num_rx_q:  Number of Rx queues.
865  * @num_rx_bufq:  Number of Rx buffer queues.
866  * @pad: Padding.
867  * @chunks: Chunks of contiguous queues.
868  *
869  * PF sends this message to request additional transmit/receive queues beyond
870  * the ones that were assigned via CREATE_VPORT request. virtchnl2_add_queues
871  * structure is used to specify the number of each type of queues.
872  * CP responds with the same structure with the actual number of queues assigned
873  * followed by num_chunks of virtchnl2_queue_chunk structures.
874  *
875  * Associated with VIRTCHNL2_OP_ADD_QUEUES.
876  */
877 struct virtchnl2_add_queues {
878 	__le32 vport_id;
879 	__le16 num_tx_q;
880 	__le16 num_tx_complq;
881 	__le16 num_rx_q;
882 	__le16 num_rx_bufq;
883 	u8 pad[4];
884 	struct virtchnl2_queue_reg_chunks chunks;
885 };
886 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_add_queues);
887 
888 /**
889  * struct virtchnl2_vector_chunk - Structure to specify a chunk of contiguous
890  *				   interrupt vectors.
891  * @start_vector_id: Start vector id.
892  * @start_evv_id: Start EVV id.
893  * @num_vectors: Number of vectors.
894  * @pad: Padding.
895  * @dynctl_reg_start: DYN_CTL register offset.
896  * @dynctl_reg_spacing: register spacing between DYN_CTL registers of 2
897  *			consecutive vectors.
898  * @itrn_reg_start: ITRN register offset.
899  * @itrn_reg_spacing: Register spacing between dynctl registers of 2
900  *		      consecutive vectors.
901  * @itrn_index_spacing: Register spacing between itrn registers of the same
902  *			vector where n=0..2.
903  * @pad1: Padding for future extensions.
904  *
905  * Register offsets and spacing provided by CP.
906  * Dynamic control registers are used for enabling/disabling/re-enabling
907  * interrupts and updating interrupt rates in the hotpath. Any changes
908  * to interrupt rates in the dynamic control registers will be reflected
909  * in the interrupt throttling rate registers.
910  * itrn registers are used to update interrupt rates for specific
911  * interrupt indices without modifying the state of the interrupt.
912  */
913 struct virtchnl2_vector_chunk {
914 	__le16 start_vector_id;
915 	__le16 start_evv_id;
916 	__le16 num_vectors;
917 	__le16 pad;
918 	__le32 dynctl_reg_start;
919 	__le32 dynctl_reg_spacing;
920 	__le32 itrn_reg_start;
921 	__le32 itrn_reg_spacing;
922 	__le32 itrn_index_spacing;
923 	u8 pad1[4];
924 };
925 VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
926 
927 /**
928  * struct virtchnl2_vector_chunks - chunks of contiguous interrupt vectors.
929  * @num_vchunks: number of vector chunks.
930  * @pad: Padding.
931  * @vchunks: Chunks of contiguous vector info.
932  *
933  * PF sends virtchnl2_vector_chunks struct to specify the vectors it is giving
934  * away. CP performs requested action and returns status.
935  *
936  * Associated with VIRTCHNL2_OP_DEALLOC_VECTORS.
937  */
938 struct virtchnl2_vector_chunks {
939 	__le16 num_vchunks;
940 	u8 pad[14];
941 	struct virtchnl2_vector_chunk vchunks[] __counted_by_le(num_vchunks);
942 };
943 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks);
944 
945 /**
946  * struct virtchnl2_alloc_vectors - vector allocation info.
947  * @num_vectors: Number of vectors.
948  * @pad: Padding.
949  * @vchunks: Chunks of contiguous vector info.
950  *
951  * PF sends this message to request additional interrupt vectors beyond the
952  * ones that were assigned via GET_CAPS request. virtchnl2_alloc_vectors
953  * structure is used to specify the number of vectors requested. CP responds
954  * with the same structure with the actual number of vectors assigned followed
955  * by virtchnl2_vector_chunks structure identifying the vector ids.
956  *
957  * Associated with VIRTCHNL2_OP_ALLOC_VECTORS.
958  */
959 struct virtchnl2_alloc_vectors {
960 	__le16 num_vectors;
961 	u8 pad[14];
962 	struct virtchnl2_vector_chunks vchunks;
963 };
964 VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_alloc_vectors);
965 
966 /**
967  * struct virtchnl2_rss_lut - RSS LUT info.
968  * @vport_id: Vport id.
969  * @lut_entries_start: Start of LUT entries.
970  * @lut_entries: Number of LUT entrties.
971  * @pad: Padding.
972  * @lut: RSS lookup table.
973  *
974  * PF sends this message to get or set RSS lookup table. Only supported if
975  * both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
976  * negotiation.
977  *
978  * Associated with VIRTCHNL2_OP_GET_RSS_LUT and VIRTCHNL2_OP_SET_RSS_LUT.
979  */
980 struct virtchnl2_rss_lut {
981 	__le32 vport_id;
982 	__le16 lut_entries_start;
983 	__le16 lut_entries;
984 	u8 pad[4];
985 	__le32 lut[] __counted_by_le(lut_entries);
986 };
987 VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut);
988 
989 /**
990  * struct virtchnl2_rss_hash - RSS hash info.
991  * @ptype_groups: Packet type groups bitmap.
992  * @vport_id: Vport id.
993  * @pad: Padding for future extensions.
994  *
995  * PF sends these messages to get and set the hash filter enable bits for RSS.
996  * By default, the CP sets these to all possible traffic types that the
997  * hardware supports. The PF can query this value if it wants to change the
998  * traffic types that are hashed by the hardware.
999  * Only supported if both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit
1000  * during configuration negotiation.
1001  *
1002  * Associated with VIRTCHNL2_OP_GET_RSS_HASH and VIRTCHNL2_OP_SET_RSS_HASH
1003  */
1004 struct virtchnl2_rss_hash {
1005 	__le64 ptype_groups;
1006 	__le32 vport_id;
1007 	u8 pad[4];
1008 };
1009 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_hash);
1010 
1011 /**
1012  * struct virtchnl2_sriov_vfs_info - VFs info.
1013  * @num_vfs: Number of VFs.
1014  * @pad: Padding for future extensions.
1015  *
1016  * This message is used to set number of SRIOV VFs to be created. The actual
1017  * allocation of resources for the VFs in terms of vport, queues and interrupts
1018  * is done by CP. When this call completes, the IDPF driver calls
1019  * pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices.
1020  * The number of VFs set to 0 will destroy all the VFs of this function.
1021  *
1022  * Associated with VIRTCHNL2_OP_SET_SRIOV_VFS.
1023  */
1024 struct virtchnl2_sriov_vfs_info {
1025 	__le16 num_vfs;
1026 	__le16 pad;
1027 };
1028 VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);
1029 
1030 /**
1031  * struct virtchnl2_ptype - Packet type info.
1032  * @ptype_id_10: 10-bit packet type.
1033  * @ptype_id_8: 8-bit packet type.
1034  * @proto_id_count: Number of protocol ids the packet supports, maximum of 32
1035  *		    protocol ids are supported.
1036  * @pad: Padding.
1037  * @proto_id: proto_id_count decides the allocation of protocol id array.
1038  *	      See enum virtchnl2_proto_hdr_type.
1039  *
1040  * Based on the descriptor type the PF supports, CP fills ptype_id_10 or
1041  * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value
1042  * is set to 0xFFFF, PF should consider this ptype as dummy one and it is the
1043  * last ptype.
1044  */
1045 struct virtchnl2_ptype {
1046 	__le16 ptype_id_10;
1047 	u8 ptype_id_8;
1048 	u8 proto_id_count;
1049 	__le16 pad;
1050 	__le16 proto_id[] __counted_by(proto_id_count);
1051 } __packed __aligned(2);
1052 VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
1053 
1054 /**
1055  * struct virtchnl2_get_ptype_info - Packet type info.
1056  * @start_ptype_id: Starting ptype ID.
1057  * @num_ptypes: Number of packet types from start_ptype_id.
1058  * @pad: Padding for future extensions.
1059  *
1060  * The total number of supported packet types is based on the descriptor type.
1061  * For the flex descriptor, it is 1024 (10-bit ptype), and for the base
1062  * descriptor, it is 256 (8-bit ptype). Send this message to the CP by
1063  * populating the 'start_ptype_id' and the 'num_ptypes'. CP responds with the
1064  * 'start_ptype_id', 'num_ptypes', and the array of ptype (virtchnl2_ptype) that
1065  * are added at the end of the 'virtchnl2_get_ptype_info' message (Note: There
1066  * is no specific field for the ptypes but are added at the end of the
1067  * ptype info message. PF/VF is expected to extract the ptypes accordingly.
1068  * Reason for doing this is because compiler doesn't allow nested flexible
1069  * array fields).
1070  *
1071  * If all the ptypes don't fit into one mailbox buffer, CP splits the
1072  * ptype info into multiple messages, where each message will have its own
1073  * 'start_ptype_id', 'num_ptypes', and the ptype array itself. When CP is done
1074  * updating all the ptype information extracted from the package (the number of
1075  * ptypes extracted might be less than what PF/VF expects), it will append a
1076  * dummy ptype (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF)
1077  * to the ptype array.
1078  *
1079  * PF/VF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO messages.
1080  *
1081  * Associated with VIRTCHNL2_OP_GET_PTYPE_INFO.
1082  */
1083 struct virtchnl2_get_ptype_info {
1084 	__le16 start_ptype_id;
1085 	__le16 num_ptypes;
1086 	__le32 pad;
1087 };
1088 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_ptype_info);
1089 
1090 /**
1091  * struct virtchnl2_vport_stats - Vport statistics.
1092  * @vport_id: Vport id.
1093  * @pad: Padding.
1094  * @rx_bytes: Received bytes.
1095  * @rx_unicast: Received unicast packets.
1096  * @rx_multicast: Received multicast packets.
1097  * @rx_broadcast: Received broadcast packets.
1098  * @rx_discards: Discarded packets on receive.
1099  * @rx_errors: Receive errors.
1100  * @rx_unknown_protocol: Unlnown protocol.
1101  * @tx_bytes: Transmitted bytes.
1102  * @tx_unicast: Transmitted unicast packets.
1103  * @tx_multicast: Transmitted multicast packets.
1104  * @tx_broadcast: Transmitted broadcast packets.
1105  * @tx_discards: Discarded packets on transmit.
1106  * @tx_errors: Transmit errors.
1107  * @rx_invalid_frame_length: Packets with invalid frame length.
1108  * @rx_overflow_drop: Packets dropped on buffer overflow.
1109  *
1110  * PF/VF sends this message to CP to get the update stats by specifying the
1111  * vport_id. CP responds with stats in struct virtchnl2_vport_stats.
1112  *
1113  * Associated with VIRTCHNL2_OP_GET_STATS.
1114  */
1115 struct virtchnl2_vport_stats {
1116 	__le32 vport_id;
1117 	u8 pad[4];
1118 	__le64 rx_bytes;
1119 	__le64 rx_unicast;
1120 	__le64 rx_multicast;
1121 	__le64 rx_broadcast;
1122 	__le64 rx_discards;
1123 	__le64 rx_errors;
1124 	__le64 rx_unknown_protocol;
1125 	__le64 tx_bytes;
1126 	__le64 tx_unicast;
1127 	__le64 tx_multicast;
1128 	__le64 tx_broadcast;
1129 	__le64 tx_discards;
1130 	__le64 tx_errors;
1131 	__le64 rx_invalid_frame_length;
1132 	__le64 rx_overflow_drop;
1133 };
1134 VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);
1135 
1136 /**
1137  * struct virtchnl2_event - Event info.
1138  * @event: Event opcode. See enum virtchnl2_event_codes.
1139  * @link_speed: Link_speed provided in Mbps.
1140  * @vport_id: Vport ID.
1141  * @link_status: Link status.
1142  * @pad: Padding.
1143  * @reserved: Reserved.
1144  *
1145  * CP sends this message to inform the PF/VF driver of events that may affect
1146  * it. No direct response is expected from the driver, though it may generate
1147  * other messages in response to this one.
1148  *
1149  * Associated with VIRTCHNL2_OP_EVENT.
1150  */
1151 struct virtchnl2_event {
1152 	__le32 event;
1153 	__le32 link_speed;
1154 	__le32 vport_id;
1155 	u8 link_status;
1156 	u8 pad;
1157 	__le16 reserved;
1158 };
1159 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_event);
1160 
1161 /**
1162  * struct virtchnl2_rss_key - RSS key info.
1163  * @vport_id: Vport id.
1164  * @key_len: Length of RSS key.
1165  * @pad: Padding.
1166  * @key_flex: RSS hash key, packed bytes.
1167  * PF/VF sends this message to get or set RSS key. Only supported if both
1168  * PF/VF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
1169  * negotiation.
1170  *
1171  * Associated with VIRTCHNL2_OP_GET_RSS_KEY and VIRTCHNL2_OP_SET_RSS_KEY.
1172  */
1173 struct virtchnl2_rss_key {
1174 	__le32 vport_id;
1175 	__le16 key_len;
1176 	u8 pad;
1177 	u8 key_flex[] __counted_by_le(key_len);
1178 } __packed;
1179 VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key);
1180 
1181 /**
1182  * struct virtchnl2_queue_chunk - chunk of contiguous queues
1183  * @type: See enum virtchnl2_queue_type.
1184  * @start_queue_id: Starting queue id.
1185  * @num_queues: Number of queues.
1186  * @pad: Padding for future extensions.
1187  */
1188 struct virtchnl2_queue_chunk {
1189 	__le32 type;
1190 	__le32 start_queue_id;
1191 	__le32 num_queues;
1192 	u8 pad[4];
1193 };
1194 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
1195 
1196 /* struct virtchnl2_queue_chunks - chunks of contiguous queues
1197  * @num_chunks: Number of chunks.
1198  * @pad: Padding.
1199  * @chunks: Chunks of contiguous queues info.
1200  */
1201 struct virtchnl2_queue_chunks {
1202 	__le16 num_chunks;
1203 	u8 pad[6];
1204 	struct virtchnl2_queue_chunk chunks[] __counted_by_le(num_chunks);
1205 };
1206 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks);
1207 
1208 /**
1209  * struct virtchnl2_del_ena_dis_queues - Enable/disable queues info.
1210  * @vport_id: Vport id.
1211  * @pad: Padding.
1212  * @chunks: Chunks of contiguous queues info.
1213  *
1214  * PF sends these messages to enable, disable or delete queues specified in
1215  * chunks. PF sends virtchnl2_del_ena_dis_queues struct to specify the queues
1216  * to be enabled/disabled/deleted. Also applicable to single queue receive or
1217  * transmit. CP performs requested action and returns status.
1218  *
1219  * Associated with VIRTCHNL2_OP_ENABLE_QUEUES, VIRTCHNL2_OP_DISABLE_QUEUES and
1220  * VIRTCHNL2_OP_DISABLE_QUEUES.
1221  */
1222 struct virtchnl2_del_ena_dis_queues {
1223 	__le32 vport_id;
1224 	u8 pad[4];
1225 	struct virtchnl2_queue_chunks chunks;
1226 };
1227 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_del_ena_dis_queues);
1228 
1229 /**
1230  * struct virtchnl2_queue_vector - Queue to vector mapping.
1231  * @queue_id: Queue id.
1232  * @vector_id: Vector id.
1233  * @pad: Padding.
1234  * @itr_idx: See enum virtchnl2_itr_idx.
1235  * @queue_type: See enum virtchnl2_queue_type.
1236  * @pad1: Padding for future extensions.
1237  */
1238 struct virtchnl2_queue_vector {
1239 	__le32 queue_id;
1240 	__le16 vector_id;
1241 	u8 pad[2];
1242 	__le32 itr_idx;
1243 	__le32 queue_type;
1244 	u8 pad1[8];
1245 };
1246 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_vector);
1247 
1248 /**
1249  * struct virtchnl2_queue_vector_maps - Map/unmap queues info.
1250  * @vport_id: Vport id.
1251  * @num_qv_maps: Number of queue vector maps.
1252  * @pad: Padding.
1253  * @qv_maps: Queue to vector maps.
1254  *
1255  * PF sends this message to map or unmap queues to vectors and interrupt
1256  * throttling rate index registers. External data buffer contains
1257  * virtchnl2_queue_vector_maps structure that contains num_qv_maps of
1258  * virtchnl2_queue_vector structures. CP maps the requested queue vector maps
1259  * after validating the queue and vector ids and returns a status code.
1260  *
1261  * Associated with VIRTCHNL2_OP_MAP_QUEUE_VECTOR and
1262  * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR.
1263  */
1264 struct virtchnl2_queue_vector_maps {
1265 	__le32 vport_id;
1266 	__le16 num_qv_maps;
1267 	u8 pad[10];
1268 	struct virtchnl2_queue_vector qv_maps[] __counted_by_le(num_qv_maps);
1269 };
1270 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps);
1271 
1272 /**
1273  * struct virtchnl2_loopback - Loopback info.
1274  * @vport_id: Vport id.
1275  * @enable: Enable/disable.
1276  * @pad: Padding for future extensions.
1277  *
1278  * PF/VF sends this message to transition to/from the loopback state. Setting
1279  * the 'enable' to 1 enables the loopback state and setting 'enable' to 0
1280  * disables it. CP configures the state to loopback and returns status.
1281  *
1282  * Associated with VIRTCHNL2_OP_LOOPBACK.
1283  */
1284 struct virtchnl2_loopback {
1285 	__le32 vport_id;
1286 	u8 enable;
1287 	u8 pad[3];
1288 };
1289 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_loopback);
1290 
1291 /* struct virtchnl2_mac_addr - MAC address info.
1292  * @addr: MAC address.
1293  * @type: MAC type. See enum virtchnl2_mac_addr_type.
1294  * @pad: Padding for future extensions.
1295  */
1296 struct virtchnl2_mac_addr {
1297 	u8 addr[ETH_ALEN];
1298 	u8 type;
1299 	u8 pad;
1300 };
1301 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr);
1302 
1303 /**
1304  * struct virtchnl2_mac_addr_list - List of MAC addresses.
1305  * @vport_id: Vport id.
1306  * @num_mac_addr: Number of MAC addresses.
1307  * @pad: Padding.
1308  * @mac_addr_list: List with MAC address info.
1309  *
1310  * PF/VF driver uses this structure to send list of MAC addresses to be
1311  * added/deleted to the CP where as CP performs the action and returns the
1312  * status.
1313  *
1314  * Associated with VIRTCHNL2_OP_ADD_MAC_ADDR and VIRTCHNL2_OP_DEL_MAC_ADDR.
1315  */
1316 struct virtchnl2_mac_addr_list {
1317 	__le32 vport_id;
1318 	__le16 num_mac_addr;
1319 	u8 pad[2];
1320 	struct virtchnl2_mac_addr mac_addr_list[] __counted_by_le(num_mac_addr);
1321 };
1322 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list);
1323 
1324 /**
1325  * struct virtchnl2_promisc_info - Promisc type info.
1326  * @vport_id: Vport id.
1327  * @flags: See enum virtchnl2_promisc_flags.
1328  * @pad: Padding for future extensions.
1329  *
1330  * PF/VF sends vport id and flags to the CP where as CP performs the action
1331  * and returns the status.
1332  *
1333  * Associated with VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE.
1334  */
1335 struct virtchnl2_promisc_info {
1336 	__le32 vport_id;
1337 	/* See VIRTCHNL2_PROMISC_FLAGS definitions */
1338 	__le16 flags;
1339 	u8 pad[2];
1340 };
1341 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
1342 
1343 /**
1344  * enum virtchnl2_ptp_caps - PTP capabilities
1345  * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME: direct access to get the time of
1346  *					   device clock
1347  * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB: mailbox access to get the time of
1348  *					      device clock
1349  * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME: direct access to cross timestamp
1350  * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB: mailbox access to cross timestamp
1351  * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME: direct access to set the time of
1352  *					   device clock
1353  * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB: mailbox access to set the time of
1354  *					      device clock
1355  * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK: direct access to adjust the time of device
1356  *				      clock
1357  * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB: mailbox access to adjust the time of
1358  *					 device clock
1359  * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS: direct access to the Tx timestamping
1360  * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB: mailbox access to the Tx timestamping
1361  *
1362  * PF/VF negotiates a set of supported PTP capabilities with the Control Plane.
1363  * There are two access methods - mailbox (_MB) and direct.
1364  * PTP capabilities enables Main Timer operations: get/set/adjust Main Timer,
1365  * cross timestamping and the Tx timestamping.
1366  */
1367 enum virtchnl2_ptp_caps {
1368 	VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME		= BIT(0),
1369 	VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB	= BIT(1),
1370 	VIRTCHNL2_CAP_PTP_GET_CROSS_TIME		= BIT(2),
1371 	VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB		= BIT(3),
1372 	VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME		= BIT(4),
1373 	VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB	= BIT(5),
1374 	VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK		= BIT(6),
1375 	VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB		= BIT(7),
1376 	VIRTCHNL2_CAP_PTP_TX_TSTAMPS			= BIT(8),
1377 	VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB			= BIT(9),
1378 };
1379 
1380 /**
1381  * struct virtchnl2_ptp_clk_reg_offsets - Offsets of device and PHY clocks
1382  *					  registers.
1383  * @dev_clk_ns_l: Device clock low register offset
1384  * @dev_clk_ns_h: Device clock high register offset
1385  * @phy_clk_ns_l: PHY clock low register offset
1386  * @phy_clk_ns_h: PHY clock high register offset
1387  * @cmd_sync_trigger: The command sync trigger register offset
1388  * @pad: Padding for future extensions
1389  */
1390 struct virtchnl2_ptp_clk_reg_offsets {
1391 	__le32 dev_clk_ns_l;
1392 	__le32 dev_clk_ns_h;
1393 	__le32 phy_clk_ns_l;
1394 	__le32 phy_clk_ns_h;
1395 	__le32 cmd_sync_trigger;
1396 	u8 pad[4];
1397 };
1398 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_clk_reg_offsets);
1399 
1400 /**
1401  * struct virtchnl2_ptp_cross_time_reg_offsets - Offsets of the device cross
1402  *						 time registers.
1403  * @sys_time_ns_l: System time low register offset
1404  * @sys_time_ns_h: System time high register offset
1405  * @cmd_sync_trigger: The command sync trigger register offset
1406  * @pad: Padding for future extensions
1407  */
1408 struct virtchnl2_ptp_cross_time_reg_offsets {
1409 	__le32 sys_time_ns_l;
1410 	__le32 sys_time_ns_h;
1411 	__le32 cmd_sync_trigger;
1412 	u8 pad[4];
1413 };
1414 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_cross_time_reg_offsets);
1415 
1416 /**
1417  * struct virtchnl2_ptp_clk_adj_reg_offsets - Offsets of device and PHY clocks
1418  *					      adjustments registers.
1419  * @dev_clk_cmd_type: Device clock command type register offset
1420  * @dev_clk_incval_l: Device clock increment value low register offset
1421  * @dev_clk_incval_h: Device clock increment value high registers offset
1422  * @dev_clk_shadj_l: Device clock shadow adjust low register offset
1423  * @dev_clk_shadj_h: Device clock shadow adjust high register offset
1424  * @phy_clk_cmd_type: PHY timer command type register offset
1425  * @phy_clk_incval_l: PHY timer increment value low register offset
1426  * @phy_clk_incval_h: PHY timer increment value high register offset
1427  * @phy_clk_shadj_l: PHY timer shadow adjust low register offset
1428  * @phy_clk_shadj_h: PHY timer shadow adjust high register offset
1429  */
1430 struct virtchnl2_ptp_clk_adj_reg_offsets {
1431 	__le32 dev_clk_cmd_type;
1432 	__le32 dev_clk_incval_l;
1433 	__le32 dev_clk_incval_h;
1434 	__le32 dev_clk_shadj_l;
1435 	__le32 dev_clk_shadj_h;
1436 	__le32 phy_clk_cmd_type;
1437 	__le32 phy_clk_incval_l;
1438 	__le32 phy_clk_incval_h;
1439 	__le32 phy_clk_shadj_l;
1440 	__le32 phy_clk_shadj_h;
1441 };
1442 VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_ptp_clk_adj_reg_offsets);
1443 
1444 /**
1445  * struct virtchnl2_ptp_tx_tstamp_latch_caps - PTP Tx timestamp latch
1446  *					       capabilities.
1447  * @tx_latch_reg_offset_l: Tx timestamp latch low register offset
1448  * @tx_latch_reg_offset_h: Tx timestamp latch high register offset
1449  * @index: Latch index provided to the Tx descriptor
1450  * @pad: Padding for future extensions
1451  */
1452 struct virtchnl2_ptp_tx_tstamp_latch_caps {
1453 	__le32 tx_latch_reg_offset_l;
1454 	__le32 tx_latch_reg_offset_h;
1455 	u8 index;
1456 	u8 pad[7];
1457 };
1458 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch_caps);
1459 
1460 /**
1461  * struct virtchnl2_ptp_get_vport_tx_tstamp_caps - Structure that defines Tx
1462  *						   tstamp entries.
1463  * @vport_id: Vport number
1464  * @num_latches: Total number of latches
1465  * @tstamp_ns_lo_bit: First bit for nanosecond part of the timestamp
1466  * @tstamp_ns_hi_bit: Last bit for nanosecond part of the timestamp
1467  * @pad: Padding for future tstamp granularity extensions
1468  * @tstamp_latches: Capabilities of Tx timestamp entries
1469  *
1470  * PF/VF sends this message to negotiate the Tx timestamp latches for each
1471  * Vport.
1472  *
1473  * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS.
1474  */
1475 struct virtchnl2_ptp_get_vport_tx_tstamp_caps {
1476 	__le32 vport_id;
1477 	__le16 num_latches;
1478 	u8 tstamp_ns_lo_bit;
1479 	u8 tstamp_ns_hi_bit;
1480 	u8 pad[8];
1481 
1482 	struct virtchnl2_ptp_tx_tstamp_latch_caps tstamp_latches[]
1483 						  __counted_by_le(num_latches);
1484 };
1485 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_caps);
1486 
1487 /**
1488  * struct virtchnl2_ptp_get_caps - Get PTP capabilities
1489  * @caps: PTP capability bitmap. See enum virtchnl2_ptp_caps
1490  * @max_adj: The maximum possible frequency adjustment
1491  * @base_incval: The default timer increment value
1492  * @peer_mbx_q_id: ID of the PTP Device Control daemon queue
1493  * @peer_id: Peer ID for PTP Device Control daemon
1494  * @secondary_mbx: Indicates to the driver that it should create a secondary
1495  *		   mailbox to inetract with control plane for PTP
1496  * @pad: Padding for future extensions
1497  * @clk_offsets: Main timer and PHY registers offsets
1498  * @cross_time_offsets: Cross time registers offsets
1499  * @clk_adj_offsets: Offsets needed to adjust the PHY and the main timer
1500  *
1501  * PF/VF sends this message to negotiate PTP capabilities. CP updates bitmap
1502  * with supported features and fulfills appropriate structures.
1503  * If HW uses primary MBX for PTP: secondary_mbx is set to false.
1504  * If HW uses secondary MBX for PTP: secondary_mbx is set to true.
1505  *	Control plane has 2 MBX and the driver has 1 MBX, send to peer
1506  *	driver may be used to send a message using valid ptp_peer_mb_q_id and
1507  *	ptp_peer_id.
1508  * If HW does not use send to peer driver: secondary_mbx is no care field and
1509  * peer_mbx_q_id holds invalid value (0xFFFF).
1510  *
1511  * Associated with VIRTCHNL2_OP_PTP_GET_CAPS.
1512  */
1513 struct virtchnl2_ptp_get_caps {
1514 	__le32 caps;
1515 	__le32 max_adj;
1516 	__le64 base_incval;
1517 	__le16 peer_mbx_q_id;
1518 	u8 peer_id;
1519 	u8 secondary_mbx;
1520 	u8 pad[4];
1521 
1522 	struct virtchnl2_ptp_clk_reg_offsets clk_offsets;
1523 	struct virtchnl2_ptp_cross_time_reg_offsets cross_time_offsets;
1524 	struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
1525 };
1526 VIRTCHNL2_CHECK_STRUCT_LEN(104, virtchnl2_ptp_get_caps);
1527 
1528 /**
1529  * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp
1530  *					  values, index and validity.
1531  * @tstamp: Timestamp value
1532  * @index: Timestamp index from which the value is read
1533  * @valid: Timestamp validity
1534  * @pad: Padding for future extensions
1535  */
1536 struct virtchnl2_ptp_tx_tstamp_latch {
1537 	__le64 tstamp;
1538 	u8 index;
1539 	u8 valid;
1540 	u8 pad[6];
1541 };
1542 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
1543 
1544 /**
1545  * struct virtchnl2_ptp_get_vport_tx_tstamp_latches - Tx timestamp latches
1546  *						      associated with the vport.
1547  * @vport_id: Number of vport that requests the timestamp
1548  * @num_latches: Number of latches
1549  * @get_devtime_with_txtstmp: Flag to request device time along with Tx timestamp
1550  * @pad: Padding for future extensions
1551  * @device_time: device time if get_devtime_with_txtstmp was set in request
1552  * @tstamp_latches: PTP TX timestamp latch
1553  *
1554  * PF/VF sends this message to receive a specified number of timestamps
1555  * entries.
1556  *
1557  * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP.
1558  */
1559 struct virtchnl2_ptp_get_vport_tx_tstamp_latches {
1560 	__le32 vport_id;
1561 	__le16 num_latches;
1562 	u8 get_devtime_with_txtstmp;
1563 	u8 pad[1];
1564 	__le64 device_time;
1565 
1566 	struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[]
1567 					     __counted_by_le(num_latches);
1568 };
1569 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_latches);
1570 
1571 /**
1572  * struct virtchnl2_ptp_get_dev_clk_time - Associated with message
1573  *					   VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME.
1574  * @dev_time_ns: Device clock time value in nanoseconds
1575  *
1576  * PF/VF sends this message to receive the time from the main timer.
1577  */
1578 struct virtchnl2_ptp_get_dev_clk_time {
1579 	__le64 dev_time_ns;
1580 };
1581 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_get_dev_clk_time);
1582 
1583 /**
1584  * struct virtchnl2_ptp_get_cross_time: Associated with message
1585  *					VIRTCHNL2_OP_PTP_GET_CROSS_TIME.
1586  * @sys_time_ns: System counter value expressed in nanoseconds, read
1587  *		 synchronously with device time
1588  * @dev_time_ns: Device clock time value expressed in nanoseconds
1589  *
1590  * PF/VF sends this message to receive the cross time.
1591  */
1592 struct virtchnl2_ptp_get_cross_time {
1593 	__le64 sys_time_ns;
1594 	__le64 dev_time_ns;
1595 };
1596 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_cross_time);
1597 
1598 /**
1599  * struct virtchnl2_ptp_set_dev_clk_time: Associated with message
1600  *					  VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME.
1601  * @dev_time_ns: Device time value expressed in nanoseconds to set
1602  *
1603  * PF/VF sends this message to set the time of the main timer.
1604  */
1605 struct virtchnl2_ptp_set_dev_clk_time {
1606 	__le64 dev_time_ns;
1607 };
1608 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_set_dev_clk_time);
1609 
1610 /**
1611  * struct virtchnl2_ptp_adj_dev_clk_fine: Associated with message
1612  *					  VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE.
1613  * @incval: Source timer increment value per clock cycle
1614  *
1615  * PF/VF sends this message to adjust the frequency of the main timer by the
1616  * indicated increment value.
1617  */
1618 struct virtchnl2_ptp_adj_dev_clk_fine {
1619 	__le64 incval;
1620 };
1621 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_fine);
1622 
1623 /**
1624  * struct virtchnl2_ptp_adj_dev_clk_time: Associated with message
1625  *					  VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME.
1626  * @delta: Offset in nanoseconds to adjust the time by
1627  *
1628  * PF/VF sends this message to adjust the time of the main timer by the delta.
1629  */
1630 struct virtchnl2_ptp_adj_dev_clk_time {
1631 	__le64 delta;
1632 };
1633 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time);
1634 
1635 /**
1636  * struct virtchnl2_mem_region - MMIO memory region
1637  * @start_offset: starting offset of the MMIO memory region
1638  * @size: size of the MMIO memory region
1639  */
1640 struct virtchnl2_mem_region {
1641 	__le64 start_offset;
1642 	__le64 size;
1643 };
1644 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mem_region);
1645 
1646 /**
1647  * struct virtchnl2_get_lan_memory_regions - List of LAN MMIO memory regions
1648  * @num_memory_regions: number of memory regions
1649  * @pad: Padding
1650  * @mem_reg: List with memory region info
1651  *
1652  * PF/VF sends this message to learn what LAN MMIO memory regions it should map.
1653  */
1654 struct virtchnl2_get_lan_memory_regions {
1655 	__le16 num_memory_regions;
1656 	u8 pad[6];
1657 	struct virtchnl2_mem_region mem_reg[];
1658 };
1659 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_lan_memory_regions);
1660 
1661 #define VIRTCHNL2_MAX_NUM_PROTO_HDRS	4
1662 #define VIRTCHNL2_MAX_SIZE_RAW_PACKET	256
1663 #define VIRTCHNL2_MAX_NUM_ACTIONS	8
1664 
1665 /**
1666  * struct virtchnl2_proto_hdr - represent one protocol header
1667  * @hdr_type: See enum virtchnl2_proto_hdr_type
1668  * @pad: padding
1669  * @buffer_spec: binary buffer based on header type.
1670  * @buffer_mask: mask applied on buffer_spec.
1671  *
1672  * Structure to hold protocol headers based on hdr_type
1673  */
1674 struct virtchnl2_proto_hdr {
1675 	__le32 hdr_type;
1676 	u8 pad[4];
1677 	u8 buffer_spec[64];
1678 	u8 buffer_mask[64];
1679 };
1680 VIRTCHNL2_CHECK_STRUCT_LEN(136, virtchnl2_proto_hdr);
1681 
1682 /**
1683  * struct virtchnl2_proto_hdrs - struct to represent match criteria
1684  * @tunnel_level: specify where protocol header(s) start from.
1685  *                 must be 0 when sending a raw packet request.
1686  *                 0 - from the outer layer
1687  *                 1 - from the first inner layer
1688  *                 2 - from the second inner layer
1689  * @pad: Padding bytes
1690  * @count: total number of protocol headers in proto_hdr. 0 for raw packet.
1691  * @proto_hdr: Array of protocol headers
1692  * @raw: struct holding raw packet buffer when count is 0
1693  */
1694 struct virtchnl2_proto_hdrs {
1695 	u8 tunnel_level;
1696 	u8 pad[3];
1697 	__le32 count;
1698 	union {
1699 		struct virtchnl2_proto_hdr
1700 			proto_hdr[VIRTCHNL2_MAX_NUM_PROTO_HDRS];
1701 		struct {
1702 			__le16 pkt_len;
1703 			u8 spec[VIRTCHNL2_MAX_SIZE_RAW_PACKET];
1704 			u8 mask[VIRTCHNL2_MAX_SIZE_RAW_PACKET];
1705 		} raw;
1706 	};
1707 };
1708 VIRTCHNL2_CHECK_STRUCT_LEN(552, virtchnl2_proto_hdrs);
1709 
1710 /**
1711  * struct virtchnl2_rule_action - struct representing single action for a flow
1712  * @action_type: see enum virtchnl2_action_types
1713  * @act_conf: union representing action depending on action_type.
1714  * @act_conf.q_id: queue id to redirect the packets to.
1715  * @act_conf.q_grp_id: queue group id to redirect the packets to.
1716  * @act_conf.ctr_id: used for count action. If input value 0xFFFFFFFF control
1717  *                    plane assigns a new counter and returns the counter ID to
1718  *                    the driver. If input value is not 0xFFFFFFFF then it must
1719  *                    be an existing counter given to the driver for an earlier
1720  *                    flow. Then this flow will share the counter.
1721  * @act_conf.mark_id: Value used to mark the packets. Used for mark action.
1722  * @act_conf.reserved: Reserved for future use.
1723  */
1724 struct virtchnl2_rule_action {
1725 	__le32 action_type;
1726 	union {
1727 		__le32 q_id;
1728 		__le32 q_grp_id;
1729 		__le32 ctr_id;
1730 		__le32 mark_id;
1731 		u8 reserved[8];
1732 	} act_conf;
1733 };
1734 VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rule_action);
1735 
1736 /**
1737  * struct virtchnl2_rule_action_set - struct representing multiple actions
1738  * @count: number of valid actions in the action set of a rule
1739  * @actions: array of struct virtchnl2_rule_action
1740  */
1741 struct virtchnl2_rule_action_set {
1742 	/* action count must be less than VIRTCHNL2_MAX_NUM_ACTIONS */
1743 	__le32 count;
1744 	struct virtchnl2_rule_action actions[VIRTCHNL2_MAX_NUM_ACTIONS];
1745 };
1746 VIRTCHNL2_CHECK_STRUCT_LEN(100, virtchnl2_rule_action_set);
1747 
1748 /**
1749  * struct virtchnl2_flow_rule - represent one flow steering rule
1750  * @proto_hdrs: array of protocol header buffers representing match criteria
1751  * @action_set: series of actions to be applied for given rule
1752  * @priority: rule priority.
1753  * @pad: padding for future extensions.
1754  */
1755 struct virtchnl2_flow_rule {
1756 	struct virtchnl2_proto_hdrs proto_hdrs;
1757 	struct virtchnl2_rule_action_set action_set;
1758 	__le32 priority;
1759 	u8 pad[8];
1760 };
1761 VIRTCHNL2_CHECK_STRUCT_LEN(664, virtchnl2_flow_rule);
1762 
1763 enum virtchnl2_flow_rule_status {
1764 	VIRTCHNL2_FLOW_RULE_SUCCESS			= 1,
1765 	VIRTCHNL2_FLOW_RULE_NORESOURCE			= 2,
1766 	VIRTCHNL2_FLOW_RULE_EXIST			= 3,
1767 	VIRTCHNL2_FLOW_RULE_TIMEOUT			= 4,
1768 	VIRTCHNL2_FLOW_RULE_FLOW_TYPE_NOT_SUPPORTED	= 5,
1769 	VIRTCHNL2_FLOW_RULE_MATCH_KEY_NOT_SUPPORTED	= 6,
1770 	VIRTCHNL2_FLOW_RULE_ACTION_NOT_SUPPORTED	= 7,
1771 	VIRTCHNL2_FLOW_RULE_ACTION_COMBINATION_INVALID	= 8,
1772 	VIRTCHNL2_FLOW_RULE_ACTION_DATA_INVALID		= 9,
1773 	VIRTCHNL2_FLOW_RULE_NOT_ADDED			= 10,
1774 };
1775 
1776 /**
1777  * struct virtchnl2_flow_rule_info: structure representing single flow rule
1778  * @rule_id: rule_id associated with the flow_rule.
1779  * @rule_cfg: structure representing rule.
1780  * @status: status of rule programming. See enum virtchnl2_flow_rule_status.
1781  */
1782 struct virtchnl2_flow_rule_info {
1783 	__le32 rule_id;
1784 	struct virtchnl2_flow_rule rule_cfg;
1785 	__le32 status;
1786 };
1787 VIRTCHNL2_CHECK_STRUCT_LEN(672, virtchnl2_flow_rule_info);
1788 
1789 /**
1790  * struct virtchnl2_flow_rule_add_del - add/delete a flow steering rule
1791  * @vport_id: vport id for which the rule is to be added or deleted.
1792  * @count: Indicates number of rules to be added or deleted.
1793  * @rule_info: Array of flow rules to be added or deleted.
1794  *
1795  * For VIRTCHNL2_OP_FLOW_RULE_ADD, rule_info contains list of rules to be
1796  * added. If rule_id is 0xFFFFFFFF, then the rule is programmed and not cached.
1797  *
1798  * For VIRTCHNL2_OP_FLOW_RULE_DEL, there are two possibilities. The structure
1799  * can contain either array of rule_ids or array of match keys to be deleted.
1800  * When match keys are used the corresponding rule_ids must be 0xFFFFFFFF.
1801  *
1802  * status member of each rule indicates the result. Maximum of 6 rules can be
1803  * added or deleted using this method. Driver has to retry in case of any
1804  * failure of ADD or DEL opcode. CP doesn't retry in case of failure.
1805  */
1806 struct virtchnl2_flow_rule_add_del {
1807 	__le32 vport_id;
1808 	__le32 count;
1809 	struct virtchnl2_flow_rule_info rule_info[] __counted_by_le(count);
1810 };
1811 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_flow_rule_add_del);
1812 
1813 #endif /* _VIRTCHNL_2_H_ */
1814