xref: /linux/drivers/net/ethernet/intel/idpf/virtchnl2.h (revision af2d6148d2a159e1a0862bce5a2c88c1618a2b27)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #ifndef _VIRTCHNL2_H_
5 #define _VIRTCHNL2_H_
6 
7 #include <linux/if_ether.h>
8 
9 /* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or
10  * VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
11  * and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
12  *
13  * PF/VF uses the virtchnl2 interface defined in this header file to communicate
14  * with device Control Plane (CP). Driver and the CP may run on different
15  * platforms with different endianness. To avoid byte order discrepancies,
16  * all the structures in this header follow little-endian format.
17  *
18  * This is an interface definition file where existing enums and their values
19  * must remain unchanged over time, so we specify explicit values for all enums.
20  */
21 
22 /* This macro is used to generate compilation errors if a structure
23  * is not exactly the correct length.
24  */
25 #define VIRTCHNL2_CHECK_STRUCT_LEN(n, X)	\
26 	static_assert((n) == sizeof(struct X))
27 
28 /* New major set of opcodes introduced and so leaving room for
29  * old misc opcodes to be added in future. Also these opcodes may only
30  * be used if both the PF and VF have successfully negotiated the
31  * VIRTCHNL version as 2.0 during VIRTCHNL2_OP_VERSION exchange.
32  */
33 enum virtchnl2_op {
34 	VIRTCHNL2_OP_UNKNOWN			= 0,
35 	VIRTCHNL2_OP_VERSION			= 1,
36 	VIRTCHNL2_OP_GET_CAPS			= 500,
37 	VIRTCHNL2_OP_CREATE_VPORT		= 501,
38 	VIRTCHNL2_OP_DESTROY_VPORT		= 502,
39 	VIRTCHNL2_OP_ENABLE_VPORT		= 503,
40 	VIRTCHNL2_OP_DISABLE_VPORT		= 504,
41 	VIRTCHNL2_OP_CONFIG_TX_QUEUES		= 505,
42 	VIRTCHNL2_OP_CONFIG_RX_QUEUES		= 506,
43 	VIRTCHNL2_OP_ENABLE_QUEUES		= 507,
44 	VIRTCHNL2_OP_DISABLE_QUEUES		= 508,
45 	VIRTCHNL2_OP_ADD_QUEUES			= 509,
46 	VIRTCHNL2_OP_DEL_QUEUES			= 510,
47 	VIRTCHNL2_OP_MAP_QUEUE_VECTOR		= 511,
48 	VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR		= 512,
49 	VIRTCHNL2_OP_GET_RSS_KEY		= 513,
50 	VIRTCHNL2_OP_SET_RSS_KEY		= 514,
51 	VIRTCHNL2_OP_GET_RSS_LUT		= 515,
52 	VIRTCHNL2_OP_SET_RSS_LUT		= 516,
53 	VIRTCHNL2_OP_GET_RSS_HASH		= 517,
54 	VIRTCHNL2_OP_SET_RSS_HASH		= 518,
55 	VIRTCHNL2_OP_SET_SRIOV_VFS		= 519,
56 	VIRTCHNL2_OP_ALLOC_VECTORS		= 520,
57 	VIRTCHNL2_OP_DEALLOC_VECTORS		= 521,
58 	VIRTCHNL2_OP_EVENT			= 522,
59 	VIRTCHNL2_OP_GET_STATS			= 523,
60 	VIRTCHNL2_OP_RESET_VF			= 524,
61 	VIRTCHNL2_OP_GET_EDT_CAPS		= 525,
62 	VIRTCHNL2_OP_GET_PTYPE_INFO		= 526,
63 	/* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
64 	 * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW.
65 	 */
66 	VIRTCHNL2_OP_RDMA			= 529,
67 	/* Opcodes 530 through 533 are reserved. */
68 	VIRTCHNL2_OP_LOOPBACK			= 534,
69 	VIRTCHNL2_OP_ADD_MAC_ADDR		= 535,
70 	VIRTCHNL2_OP_DEL_MAC_ADDR		= 536,
71 	VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE	= 537,
72 
73 	/* TimeSync opcodes */
74 	VIRTCHNL2_OP_PTP_GET_CAPS			= 541,
75 	VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP		= 542,
76 	VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME		= 543,
77 	VIRTCHNL2_OP_PTP_GET_CROSS_TIME			= 544,
78 	VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME		= 545,
79 	VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE		= 546,
80 	VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME		= 547,
81 	VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS	= 548,
82 	VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS		= 549,
83 };
84 
85 /**
86  * enum virtchnl2_vport_type - Type of virtual port.
87  * @VIRTCHNL2_VPORT_TYPE_DEFAULT: Default virtual port type.
88  */
89 enum virtchnl2_vport_type {
90 	VIRTCHNL2_VPORT_TYPE_DEFAULT		= 0,
91 };
92 
93 /**
94  * enum virtchnl2_queue_model - Type of queue model.
95  * @VIRTCHNL2_QUEUE_MODEL_SINGLE: Single queue model.
96  * @VIRTCHNL2_QUEUE_MODEL_SPLIT: Split queue model.
97  *
98  * In the single queue model, the same transmit descriptor queue is used by
99  * software to post descriptors to hardware and by hardware to post completed
100  * descriptors to software.
101  * Likewise, the same receive descriptor queue is used by hardware to post
102  * completions to software and by software to post buffers to hardware.
103  *
104  * In the split queue model, hardware uses transmit completion queues to post
105  * descriptor/buffer completions to software, while software uses transmit
106  * descriptor queues to post descriptors to hardware.
107  * Likewise, hardware posts descriptor completions to the receive descriptor
108  * queue, while software uses receive buffer queues to post buffers to hardware.
109  */
110 enum virtchnl2_queue_model {
111 	VIRTCHNL2_QUEUE_MODEL_SINGLE		= 0,
112 	VIRTCHNL2_QUEUE_MODEL_SPLIT		= 1,
113 };
114 
115 /* Checksum offload capability flags */
116 enum virtchnl2_cap_txrx_csum {
117 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4		= BIT(0),
118 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	= BIT(1),
119 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	= BIT(2),
120 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	= BIT(3),
121 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	= BIT(4),
122 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	= BIT(5),
123 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	= BIT(6),
124 	VIRTCHNL2_CAP_TX_CSUM_GENERIC		= BIT(7),
125 	VIRTCHNL2_CAP_RX_CSUM_L3_IPV4		= BIT(8),
126 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	= BIT(9),
127 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	= BIT(10),
128 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	= BIT(11),
129 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	= BIT(12),
130 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	= BIT(13),
131 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	= BIT(14),
132 	VIRTCHNL2_CAP_RX_CSUM_GENERIC		= BIT(15),
133 	VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL	= BIT(16),
134 	VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL	= BIT(17),
135 	VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL	= BIT(18),
136 	VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL	= BIT(19),
137 	VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL	= BIT(20),
138 	VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL	= BIT(21),
139 	VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL	= BIT(22),
140 	VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL	= BIT(23),
141 };
142 
143 /* Segmentation offload capability flags */
144 enum virtchnl2_cap_seg {
145 	VIRTCHNL2_CAP_SEG_IPV4_TCP		= BIT(0),
146 	VIRTCHNL2_CAP_SEG_IPV4_UDP		= BIT(1),
147 	VIRTCHNL2_CAP_SEG_IPV4_SCTP		= BIT(2),
148 	VIRTCHNL2_CAP_SEG_IPV6_TCP		= BIT(3),
149 	VIRTCHNL2_CAP_SEG_IPV6_UDP		= BIT(4),
150 	VIRTCHNL2_CAP_SEG_IPV6_SCTP		= BIT(5),
151 	VIRTCHNL2_CAP_SEG_GENERIC		= BIT(6),
152 	VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL	= BIT(7),
153 	VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL	= BIT(8),
154 };
155 
156 /* Receive Side Scaling Flow type capability flags */
157 enum virtchnl2_cap_rss {
158 	VIRTCHNL2_CAP_RSS_IPV4_TCP		= BIT(0),
159 	VIRTCHNL2_CAP_RSS_IPV4_UDP		= BIT(1),
160 	VIRTCHNL2_CAP_RSS_IPV4_SCTP		= BIT(2),
161 	VIRTCHNL2_CAP_RSS_IPV4_OTHER		= BIT(3),
162 	VIRTCHNL2_CAP_RSS_IPV6_TCP		= BIT(4),
163 	VIRTCHNL2_CAP_RSS_IPV6_UDP		= BIT(5),
164 	VIRTCHNL2_CAP_RSS_IPV6_SCTP		= BIT(6),
165 	VIRTCHNL2_CAP_RSS_IPV6_OTHER		= BIT(7),
166 	VIRTCHNL2_CAP_RSS_IPV4_AH		= BIT(8),
167 	VIRTCHNL2_CAP_RSS_IPV4_ESP		= BIT(9),
168 	VIRTCHNL2_CAP_RSS_IPV4_AH_ESP		= BIT(10),
169 	VIRTCHNL2_CAP_RSS_IPV6_AH		= BIT(11),
170 	VIRTCHNL2_CAP_RSS_IPV6_ESP		= BIT(12),
171 	VIRTCHNL2_CAP_RSS_IPV6_AH_ESP		= BIT(13),
172 };
173 
174 /* Header split capability flags */
175 enum virtchnl2_cap_rx_hsplit_at {
176 	/* for prepended metadata  */
177 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L2		= BIT(0),
178 	/* all VLANs go into header buffer */
179 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L3		= BIT(1),
180 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4		= BIT(2),
181 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6		= BIT(3),
182 };
183 
184 /* Receive Side Coalescing offload capability flags */
185 enum virtchnl2_cap_rsc {
186 	VIRTCHNL2_CAP_RSC_IPV4_TCP		= BIT(0),
187 	VIRTCHNL2_CAP_RSC_IPV4_SCTP		= BIT(1),
188 	VIRTCHNL2_CAP_RSC_IPV6_TCP		= BIT(2),
189 	VIRTCHNL2_CAP_RSC_IPV6_SCTP		= BIT(3),
190 };
191 
192 /* Other capability flags */
193 enum virtchnl2_cap_other {
194 	VIRTCHNL2_CAP_RDMA			= BIT_ULL(0),
195 	VIRTCHNL2_CAP_SRIOV			= BIT_ULL(1),
196 	VIRTCHNL2_CAP_MACFILTER			= BIT_ULL(2),
197 	VIRTCHNL2_CAP_FLOW_DIRECTOR		= BIT_ULL(3),
198 	/* Queue based scheduling using split queue model */
199 	VIRTCHNL2_CAP_SPLITQ_QSCHED		= BIT_ULL(4),
200 	VIRTCHNL2_CAP_CRC			= BIT_ULL(5),
201 	VIRTCHNL2_CAP_ADQ			= BIT_ULL(6),
202 	VIRTCHNL2_CAP_WB_ON_ITR			= BIT_ULL(7),
203 	VIRTCHNL2_CAP_PROMISC			= BIT_ULL(8),
204 	VIRTCHNL2_CAP_LINK_SPEED		= BIT_ULL(9),
205 	VIRTCHNL2_CAP_INLINE_IPSEC		= BIT_ULL(10),
206 	VIRTCHNL2_CAP_LARGE_NUM_QUEUES		= BIT_ULL(11),
207 	VIRTCHNL2_CAP_VLAN			= BIT_ULL(12),
208 	VIRTCHNL2_CAP_PTP			= BIT_ULL(13),
209 	/* EDT: Earliest Departure Time capability used for Timing Wheel */
210 	VIRTCHNL2_CAP_EDT			= BIT_ULL(14),
211 	VIRTCHNL2_CAP_ADV_RSS			= BIT_ULL(15),
212 	VIRTCHNL2_CAP_FDIR			= BIT_ULL(16),
213 	VIRTCHNL2_CAP_RX_FLEX_DESC		= BIT_ULL(17),
214 	VIRTCHNL2_CAP_PTYPE			= BIT_ULL(18),
215 	VIRTCHNL2_CAP_LOOPBACK			= BIT_ULL(19),
216 	/* Other capability 20-21 is reserved */
217 	VIRTCHNL2_CAP_LAN_MEMORY_REGIONS	= BIT_ULL(22),
218 
219 	/* this must be the last capability */
220 	VIRTCHNL2_CAP_OEM			= BIT_ULL(63),
221 };
222 
223 /* underlying device type */
224 enum virtchl2_device_type {
225 	VIRTCHNL2_MEV_DEVICE			= 0,
226 };
227 
228 /**
229  * enum virtchnl2_txq_sched_mode - Transmit Queue Scheduling Modes.
230  * @VIRTCHNL2_TXQ_SCHED_MODE_QUEUE: Queue mode is the legacy mode i.e. inorder
231  *				    completions where descriptors and buffers
232  *				    are completed at the same time.
233  * @VIRTCHNL2_TXQ_SCHED_MODE_FLOW: Flow scheduling mode allows for out of order
234  *				   packet processing where descriptors are
235  *				   cleaned in order, but buffers can be
236  *				   completed out of order.
237  */
238 enum virtchnl2_txq_sched_mode {
239 	VIRTCHNL2_TXQ_SCHED_MODE_QUEUE		= 0,
240 	VIRTCHNL2_TXQ_SCHED_MODE_FLOW		= 1,
241 };
242 
243 /**
244  * enum virtchnl2_rxq_flags - Receive Queue Feature flags.
245  * @VIRTCHNL2_RXQ_RSC: Rx queue RSC flag.
246  * @VIRTCHNL2_RXQ_HDR_SPLIT: Rx queue header split flag.
247  * @VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK: When set, packet descriptors are flushed
248  *					by hardware immediately after processing
249  *					each packet.
250  * @VIRTCHNL2_RX_DESC_SIZE_16BYTE: Rx queue 16 byte descriptor size.
251  * @VIRTCHNL2_RX_DESC_SIZE_32BYTE: Rx queue 32 byte descriptor size.
252  */
253 enum virtchnl2_rxq_flags {
254 	VIRTCHNL2_RXQ_RSC			= BIT(0),
255 	VIRTCHNL2_RXQ_HDR_SPLIT			= BIT(1),
256 	VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK	= BIT(2),
257 	VIRTCHNL2_RX_DESC_SIZE_16BYTE		= BIT(3),
258 	VIRTCHNL2_RX_DESC_SIZE_32BYTE		= BIT(4),
259 };
260 
261 /* Type of RSS algorithm */
262 enum virtchnl2_rss_alg {
263 	VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC	= 0,
264 	VIRTCHNL2_RSS_ALG_R_ASYMMETRIC		= 1,
265 	VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC	= 2,
266 	VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC		= 3,
267 };
268 
269 /* Type of event */
270 enum virtchnl2_event_codes {
271 	VIRTCHNL2_EVENT_UNKNOWN			= 0,
272 	VIRTCHNL2_EVENT_LINK_CHANGE		= 1,
273 	/* Event type 2, 3 are reserved */
274 };
275 
276 /* Transmit and Receive queue types are valid in legacy as well as split queue
277  * models. With Split Queue model, 2 additional types are introduced -
278  * TX_COMPLETION and RX_BUFFER. In split queue model, receive  corresponds to
279  * the queue where hardware posts completions.
280  */
281 enum virtchnl2_queue_type {
282 	VIRTCHNL2_QUEUE_TYPE_TX			= 0,
283 	VIRTCHNL2_QUEUE_TYPE_RX			= 1,
284 	VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION	= 2,
285 	VIRTCHNL2_QUEUE_TYPE_RX_BUFFER		= 3,
286 	VIRTCHNL2_QUEUE_TYPE_CONFIG_TX		= 4,
287 	VIRTCHNL2_QUEUE_TYPE_CONFIG_RX		= 5,
288 	/* Queue types 6, 7, 8, 9 are reserved */
289 	VIRTCHNL2_QUEUE_TYPE_MBX_TX		= 10,
290 	VIRTCHNL2_QUEUE_TYPE_MBX_RX		= 11,
291 };
292 
293 /* Interrupt throttling rate index */
294 enum virtchnl2_itr_idx {
295 	VIRTCHNL2_ITR_IDX_0			= 0,
296 	VIRTCHNL2_ITR_IDX_1			= 1,
297 };
298 
299 /**
300  * enum virtchnl2_mac_addr_type - MAC address types.
301  * @VIRTCHNL2_MAC_ADDR_PRIMARY: PF/VF driver should set this type for the
302  *				primary/device unicast MAC address filter for
303  *				VIRTCHNL2_OP_ADD_MAC_ADDR and
304  *				VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the
305  *				underlying control plane function to accurately
306  *				track the MAC address and for VM/function reset.
307  *
308  * @VIRTCHNL2_MAC_ADDR_EXTRA: PF/VF driver should set this type for any extra
309  *			      unicast and/or multicast filters that are being
310  *			      added/deleted via VIRTCHNL2_OP_ADD_MAC_ADDR or
311  *			      VIRTCHNL2_OP_DEL_MAC_ADDR.
312  */
313 enum virtchnl2_mac_addr_type {
314 	VIRTCHNL2_MAC_ADDR_PRIMARY		= 1,
315 	VIRTCHNL2_MAC_ADDR_EXTRA		= 2,
316 };
317 
318 /* Flags used for promiscuous mode */
319 enum virtchnl2_promisc_flags {
320 	VIRTCHNL2_UNICAST_PROMISC		= BIT(0),
321 	VIRTCHNL2_MULTICAST_PROMISC		= BIT(1),
322 };
323 
324 /* Protocol header type within a packet segment. A segment consists of one or
325  * more protocol headers that make up a logical group of protocol headers. Each
326  * logical group of protocol headers encapsulates or is encapsulated using/by
327  * tunneling or encapsulation protocols for network virtualization.
328  */
329 enum virtchnl2_proto_hdr_type {
330 	/* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */
331 	VIRTCHNL2_PROTO_HDR_ANY			= 0,
332 	VIRTCHNL2_PROTO_HDR_PRE_MAC		= 1,
333 	/* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */
334 	VIRTCHNL2_PROTO_HDR_MAC			= 2,
335 	VIRTCHNL2_PROTO_HDR_POST_MAC		= 3,
336 	VIRTCHNL2_PROTO_HDR_ETHERTYPE		= 4,
337 	VIRTCHNL2_PROTO_HDR_VLAN		= 5,
338 	VIRTCHNL2_PROTO_HDR_SVLAN		= 6,
339 	VIRTCHNL2_PROTO_HDR_CVLAN		= 7,
340 	VIRTCHNL2_PROTO_HDR_MPLS		= 8,
341 	VIRTCHNL2_PROTO_HDR_UMPLS		= 9,
342 	VIRTCHNL2_PROTO_HDR_MMPLS		= 10,
343 	VIRTCHNL2_PROTO_HDR_PTP			= 11,
344 	VIRTCHNL2_PROTO_HDR_CTRL		= 12,
345 	VIRTCHNL2_PROTO_HDR_LLDP		= 13,
346 	VIRTCHNL2_PROTO_HDR_ARP			= 14,
347 	VIRTCHNL2_PROTO_HDR_ECP			= 15,
348 	VIRTCHNL2_PROTO_HDR_EAPOL		= 16,
349 	VIRTCHNL2_PROTO_HDR_PPPOD		= 17,
350 	VIRTCHNL2_PROTO_HDR_PPPOE		= 18,
351 	/* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */
352 	VIRTCHNL2_PROTO_HDR_IPV4		= 19,
353 	/* IPv4 and IPv6 Fragment header types are only associated to
354 	 * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively,
355 	 * cannot be used independently.
356 	 */
357 	/* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */
358 	VIRTCHNL2_PROTO_HDR_IPV4_FRAG		= 20,
359 	/* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */
360 	VIRTCHNL2_PROTO_HDR_IPV6		= 21,
361 	/* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */
362 	VIRTCHNL2_PROTO_HDR_IPV6_FRAG		= 22,
363 	VIRTCHNL2_PROTO_HDR_IPV6_EH		= 23,
364 	/* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */
365 	VIRTCHNL2_PROTO_HDR_UDP			= 24,
366 	/* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */
367 	VIRTCHNL2_PROTO_HDR_TCP			= 25,
368 	/* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */
369 	VIRTCHNL2_PROTO_HDR_SCTP		= 26,
370 	/* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */
371 	VIRTCHNL2_PROTO_HDR_ICMP		= 27,
372 	/* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */
373 	VIRTCHNL2_PROTO_HDR_ICMPV6		= 28,
374 	VIRTCHNL2_PROTO_HDR_IGMP		= 29,
375 	VIRTCHNL2_PROTO_HDR_AH			= 30,
376 	VIRTCHNL2_PROTO_HDR_ESP			= 31,
377 	VIRTCHNL2_PROTO_HDR_IKE			= 32,
378 	VIRTCHNL2_PROTO_HDR_NATT_KEEP		= 33,
379 	/* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */
380 	VIRTCHNL2_PROTO_HDR_PAY			= 34,
381 	VIRTCHNL2_PROTO_HDR_L2TPV2		= 35,
382 	VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL	= 36,
383 	VIRTCHNL2_PROTO_HDR_L2TPV3		= 37,
384 	VIRTCHNL2_PROTO_HDR_GTP			= 38,
385 	VIRTCHNL2_PROTO_HDR_GTP_EH		= 39,
386 	VIRTCHNL2_PROTO_HDR_GTPCV2		= 40,
387 	VIRTCHNL2_PROTO_HDR_GTPC_TEID		= 41,
388 	VIRTCHNL2_PROTO_HDR_GTPU		= 42,
389 	VIRTCHNL2_PROTO_HDR_GTPU_UL		= 43,
390 	VIRTCHNL2_PROTO_HDR_GTPU_DL		= 44,
391 	VIRTCHNL2_PROTO_HDR_ECPRI		= 45,
392 	VIRTCHNL2_PROTO_HDR_VRRP		= 46,
393 	VIRTCHNL2_PROTO_HDR_OSPF		= 47,
394 	/* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */
395 	VIRTCHNL2_PROTO_HDR_TUN			= 48,
396 	VIRTCHNL2_PROTO_HDR_GRE			= 49,
397 	VIRTCHNL2_PROTO_HDR_NVGRE		= 50,
398 	VIRTCHNL2_PROTO_HDR_VXLAN		= 51,
399 	VIRTCHNL2_PROTO_HDR_VXLAN_GPE		= 52,
400 	VIRTCHNL2_PROTO_HDR_GENEVE		= 53,
401 	VIRTCHNL2_PROTO_HDR_NSH			= 54,
402 	VIRTCHNL2_PROTO_HDR_QUIC		= 55,
403 	VIRTCHNL2_PROTO_HDR_PFCP		= 56,
404 	VIRTCHNL2_PROTO_HDR_PFCP_NODE		= 57,
405 	VIRTCHNL2_PROTO_HDR_PFCP_SESSION	= 58,
406 	VIRTCHNL2_PROTO_HDR_RTP			= 59,
407 	VIRTCHNL2_PROTO_HDR_ROCE		= 60,
408 	VIRTCHNL2_PROTO_HDR_ROCEV1		= 61,
409 	VIRTCHNL2_PROTO_HDR_ROCEV2		= 62,
410 	/* Protocol ids up to 32767 are reserved.
411 	 * 32768 - 65534 are used for user defined protocol ids.
412 	 * VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id.
413 	 */
414 	VIRTCHNL2_PROTO_HDR_NO_PROTO		= 65535,
415 };
416 
417 enum virtchl2_version {
418 	VIRTCHNL2_VERSION_MINOR_0		= 0,
419 	VIRTCHNL2_VERSION_MAJOR_2		= 2,
420 };
421 
422 /**
423  * struct virtchnl2_edt_caps - Get EDT granularity and time horizon.
424  * @tstamp_granularity_ns: Timestamp granularity in nanoseconds.
425  * @time_horizon_ns: Total time window in nanoseconds.
426  *
427  * Associated with VIRTCHNL2_OP_GET_EDT_CAPS.
428  */
429 struct virtchnl2_edt_caps {
430 	__le64 tstamp_granularity_ns;
431 	__le64 time_horizon_ns;
432 };
433 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_edt_caps);
434 
435 /**
436  * struct virtchnl2_version_info - Version information.
437  * @major: Major version.
438  * @minor: Minor version.
439  *
440  * PF/VF posts its version number to the CP. CP responds with its version number
441  * in the same format, along with a return code.
442  * If there is a major version mismatch, then the PF/VF cannot operate.
443  * If there is a minor version mismatch, then the PF/VF can operate but should
444  * add a warning to the system log.
445  *
446  * This version opcode MUST always be specified as == 1, regardless of other
447  * changes in the API. The CP must always respond to this message without
448  * error regardless of version mismatch.
449  *
450  * Associated with VIRTCHNL2_OP_VERSION.
451  */
452 struct virtchnl2_version_info {
453 	__le32 major;
454 	__le32 minor;
455 };
456 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
457 
458 /**
459  * struct virtchnl2_get_capabilities - Capabilities info.
460  * @csum_caps: See enum virtchnl2_cap_txrx_csum.
461  * @seg_caps: See enum virtchnl2_cap_seg.
462  * @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at.
463  * @rsc_caps: See enum virtchnl2_cap_rsc.
464  * @rss_caps: See enum virtchnl2_cap_rss.
465  * @other_caps: See enum virtchnl2_cap_other.
466  * @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
467  *		     provided by CP.
468  * @mailbox_vector_id: Mailbox vector id.
469  * @num_allocated_vectors: Maximum number of allocated vectors for the device.
470  * @max_rx_q: Maximum number of supported Rx queues.
471  * @max_tx_q: Maximum number of supported Tx queues.
472  * @max_rx_bufq: Maximum number of supported buffer queues.
473  * @max_tx_complq: Maximum number of supported completion queues.
474  * @max_sriov_vfs: The PF sends the maximum VFs it is requesting. The CP
475  *		   responds with the maximum VFs granted.
476  * @max_vports: Maximum number of vports that can be supported.
477  * @default_num_vports: Default number of vports driver should allocate on load.
478  * @max_tx_hdr_size: Max header length hardware can parse/checksum, in bytes.
479  * @max_sg_bufs_per_tx_pkt: Max number of scatter gather buffers that can be
480  *			    sent per transmit packet without needing to be
481  *			    linearized.
482  * @pad: Padding.
483  * @reserved: Reserved.
484  * @device_type: See enum virtchl2_device_type.
485  * @min_sso_packet_len: Min packet length supported by device for single
486  *			segment offload.
487  * @max_hdr_buf_per_lso: Max number of header buffers that can be used for
488  *			 an LSO.
489  * @num_rdma_allocated_vectors: Maximum number of allocated RDMA vectors for
490  *				the device.
491  * @pad1: Padding for future extensions.
492  *
493  * Dataplane driver sends this message to CP to negotiate capabilities and
494  * provides a virtchnl2_get_capabilities structure with its desired
495  * capabilities, max_sriov_vfs and num_allocated_vectors.
496  * CP responds with a virtchnl2_get_capabilities structure updated
497  * with allowed capabilities and the other fields as below.
498  * If PF sets max_sriov_vfs as 0, CP will respond with max number of VFs
499  * that can be created by this PF. For any other value 'n', CP responds
500  * with max_sriov_vfs set to min(n, x) where x is the max number of VFs
501  * allowed by CP's policy. max_sriov_vfs is not applicable for VFs.
502  * If dataplane driver sets num_allocated_vectors as 0, CP will respond with 1
503  * which is default vector associated with the default mailbox. For any other
504  * value 'n', CP responds with a value <= n based on the CP's policy of
505  * max number of vectors for a PF.
506  * CP will respond with the vector ID of mailbox allocated to the PF in
507  * mailbox_vector_id and the number of itr index registers in itr_idx_map.
508  * It also responds with default number of vports that the dataplane driver
509  * should comeup with in default_num_vports and maximum number of vports that
510  * can be supported in max_vports.
511  *
512  * Associated with VIRTCHNL2_OP_GET_CAPS.
513  */
514 struct virtchnl2_get_capabilities {
515 	__le32 csum_caps;
516 	__le32 seg_caps;
517 	__le32 hsplit_caps;
518 	__le32 rsc_caps;
519 	__le64 rss_caps;
520 	__le64 other_caps;
521 	__le32 mailbox_dyn_ctl;
522 	__le16 mailbox_vector_id;
523 	__le16 num_allocated_vectors;
524 	__le16 max_rx_q;
525 	__le16 max_tx_q;
526 	__le16 max_rx_bufq;
527 	__le16 max_tx_complq;
528 	__le16 max_sriov_vfs;
529 	__le16 max_vports;
530 	__le16 default_num_vports;
531 	__le16 max_tx_hdr_size;
532 	u8 max_sg_bufs_per_tx_pkt;
533 	u8 pad[3];
534 	u8 reserved[4];
535 	__le32 device_type;
536 	u8 min_sso_packet_len;
537 	u8 max_hdr_buf_per_lso;
538 	__le16 num_rdma_allocated_vectors;
539 	u8 pad1[8];
540 };
541 VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
542 
543 /**
544  * struct virtchnl2_queue_reg_chunk - Single queue chunk.
545  * @type: See enum virtchnl2_queue_type.
546  * @start_queue_id: Start Queue ID.
547  * @num_queues: Number of queues in the chunk.
548  * @pad: Padding.
549  * @qtail_reg_start: Queue tail register offset.
550  * @qtail_reg_spacing: Queue tail register spacing.
551  * @pad1: Padding for future extensions.
552  */
553 struct virtchnl2_queue_reg_chunk {
554 	__le32 type;
555 	__le32 start_queue_id;
556 	__le32 num_queues;
557 	__le32 pad;
558 	__le64 qtail_reg_start;
559 	__le32 qtail_reg_spacing;
560 	u8 pad1[4];
561 };
562 VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
563 
564 /**
565  * struct virtchnl2_queue_reg_chunks - Specify several chunks of contiguous
566  *				       queues.
567  * @num_chunks: Number of chunks.
568  * @pad: Padding.
569  * @chunks: Chunks of queue info.
570  */
571 struct virtchnl2_queue_reg_chunks {
572 	__le16 num_chunks;
573 	u8 pad[6];
574 	struct virtchnl2_queue_reg_chunk chunks[] __counted_by_le(num_chunks);
575 };
576 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
577 
578 /**
579  * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities.
580  * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports
581  * @VIRTCHNL2_VPORT_ENABLE_RDMA: RDMA is enabled for this vport
582  */
583 enum virtchnl2_vport_flags {
584 	VIRTCHNL2_VPORT_UPLINK_PORT	= BIT(0),
585 	/* VIRTCHNL2_VPORT_* bits [1:3] rsvd */
586 	VIRTCHNL2_VPORT_ENABLE_RDMA             = BIT(4),
587 };
588 
589 /**
590  * struct virtchnl2_create_vport - Create vport config info.
591  * @vport_type: See enum virtchnl2_vport_type.
592  * @txq_model: See virtchnl2_queue_model.
593  * @rxq_model: See virtchnl2_queue_model.
594  * @num_tx_q: Number of Tx queues.
595  * @num_tx_complq: Valid only if txq_model is split queue.
596  * @num_rx_q: Number of Rx queues.
597  * @num_rx_bufq: Valid only if rxq_model is split queue.
598  * @default_rx_q: Relative receive queue index to be used as default.
599  * @vport_index: Used to align PF and CP in case of default multiple vports,
600  *		 it is filled by the PF and CP returns the same value, to
601  *		 enable the driver to support multiple asynchronous parallel
602  *		 CREATE_VPORT requests and associate a response to a specific
603  *		 request.
604  * @max_mtu: Max MTU. CP populates this field on response.
605  * @vport_id: Vport id. CP populates this field on response.
606  * @default_mac_addr: Default MAC address.
607  * @vport_flags: See enum virtchnl2_vport_flags.
608  * @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
609  * @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
610  * @pad1: Padding.
611  * @rss_algorithm: RSS algorithm.
612  * @rss_key_size: RSS key size.
613  * @rss_lut_size: RSS LUT size.
614  * @rx_split_pos: See enum virtchnl2_cap_rx_hsplit_at.
615  * @pad2: Padding.
616  * @chunks: Chunks of contiguous queues.
617  *
618  * PF sends this message to CP to create a vport by filling in required
619  * fields of virtchnl2_create_vport structure.
620  * CP responds with the updated virtchnl2_create_vport structure containing the
621  * necessary fields followed by chunks which in turn will have an array of
622  * num_chunks entries of virtchnl2_queue_chunk structures.
623  *
624  * Associated with VIRTCHNL2_OP_CREATE_VPORT.
625  */
626 struct virtchnl2_create_vport {
627 	__le16 vport_type;
628 	__le16 txq_model;
629 	__le16 rxq_model;
630 	__le16 num_tx_q;
631 	__le16 num_tx_complq;
632 	__le16 num_rx_q;
633 	__le16 num_rx_bufq;
634 	__le16 default_rx_q;
635 	__le16 vport_index;
636 	/* CP populates the following fields on response */
637 	__le16 max_mtu;
638 	__le32 vport_id;
639 	u8 default_mac_addr[ETH_ALEN];
640 	__le16 vport_flags;
641 	__le64 rx_desc_ids;
642 	__le64 tx_desc_ids;
643 	u8 pad1[72];
644 	__le32 rss_algorithm;
645 	__le16 rss_key_size;
646 	__le16 rss_lut_size;
647 	__le32 rx_split_pos;
648 	u8 pad2[20];
649 	struct virtchnl2_queue_reg_chunks chunks;
650 };
651 VIRTCHNL2_CHECK_STRUCT_LEN(160, virtchnl2_create_vport);
652 
653 /**
654  * struct virtchnl2_vport - Vport ID info.
655  * @vport_id: Vport id.
656  * @pad: Padding for future extensions.
657  *
658  * PF sends this message to CP to destroy, enable or disable a vport by filling
659  * in the vport_id in virtchnl2_vport structure.
660  * CP responds with the status of the requested operation.
661  *
662  * Associated with VIRTCHNL2_OP_DESTROY_VPORT, VIRTCHNL2_OP_ENABLE_VPORT,
663  * VIRTCHNL2_OP_DISABLE_VPORT.
664  */
665 struct virtchnl2_vport {
666 	__le32 vport_id;
667 	u8 pad[4];
668 };
669 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_vport);
670 
671 /**
672  * struct virtchnl2_txq_info - Transmit queue config info
673  * @dma_ring_addr: DMA address.
674  * @type: See enum virtchnl2_queue_type.
675  * @queue_id: Queue ID.
676  * @relative_queue_id: Valid only if queue model is split and type is transmit
677  *		       queue. Used in many to one mapping of transmit queues to
678  *		       completion queue.
679  * @model: See enum virtchnl2_queue_model.
680  * @sched_mode: See enum virtchnl2_txq_sched_mode.
681  * @qflags: TX queue feature flags.
682  * @ring_len: Ring length.
683  * @tx_compl_queue_id: Valid only if queue model is split and type is transmit
684  *		       queue.
685  * @peer_type: Valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX
686  * @peer_rx_queue_id: Valid only if queue type is CONFIG_TX and used to deliver
687  *		      messages for the respective CONFIG_TX queue.
688  * @pad: Padding.
689  * @egress_pasid: Egress PASID info.
690  * @egress_hdr_pasid: Egress HDR passid.
691  * @egress_buf_pasid: Egress buf passid.
692  * @pad1: Padding for future extensions.
693  */
694 struct virtchnl2_txq_info {
695 	__le64 dma_ring_addr;
696 	__le32 type;
697 	__le32 queue_id;
698 	__le16 relative_queue_id;
699 	__le16 model;
700 	__le16 sched_mode;
701 	__le16 qflags;
702 	__le16 ring_len;
703 	__le16 tx_compl_queue_id;
704 	__le16 peer_type;
705 	__le16 peer_rx_queue_id;
706 	u8 pad[4];
707 	__le32 egress_pasid;
708 	__le32 egress_hdr_pasid;
709 	__le32 egress_buf_pasid;
710 	u8 pad1[8];
711 };
712 VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_txq_info);
713 
714 /**
715  * struct virtchnl2_config_tx_queues - TX queue config.
716  * @vport_id: Vport id.
717  * @num_qinfo: Number of virtchnl2_txq_info structs.
718  * @pad: Padding.
719  * @qinfo: Tx queues config info.
720  *
721  * PF sends this message to set up parameters for one or more transmit queues.
722  * This message contains an array of num_qinfo instances of virtchnl2_txq_info
723  * structures. CP configures requested queues and returns a status code. If
724  * num_qinfo specified is greater than the number of queues associated with the
725  * vport, an error is returned and no queues are configured.
726  *
727  * Associated with VIRTCHNL2_OP_CONFIG_TX_QUEUES.
728  */
729 struct virtchnl2_config_tx_queues {
730 	__le32 vport_id;
731 	__le16 num_qinfo;
732 	u8 pad[10];
733 	struct virtchnl2_txq_info qinfo[] __counted_by_le(num_qinfo);
734 };
735 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues);
736 
737 /**
738  * struct virtchnl2_rxq_info - Receive queue config info.
739  * @desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
740  * @dma_ring_addr: See VIRTCHNL2_RX_DESC_IDS definitions.
741  * @type: See enum virtchnl2_queue_type.
742  * @queue_id: Queue id.
743  * @model: See enum virtchnl2_queue_model.
744  * @hdr_buffer_size: Header buffer size.
745  * @data_buffer_size: Data buffer size.
746  * @max_pkt_size: Max packet size.
747  * @ring_len: Ring length.
748  * @buffer_notif_stride: Buffer notification stride in units of 32-descriptors.
749  *			 This field must be a power of 2.
750  * @pad: Padding.
751  * @dma_head_wb_addr: Applicable only for receive buffer queues.
752  * @qflags: Applicable only for receive completion queues.
753  *	    See enum virtchnl2_rxq_flags.
754  * @rx_buffer_low_watermark: Rx buffer low watermark.
755  * @rx_bufq1_id: Buffer queue index of the first buffer queue associated with
756  *		 the Rx queue. Valid only in split queue model.
757  * @rx_bufq2_id: Buffer queue index of the second buffer queue associated with
758  *		 the Rx queue. Valid only in split queue model.
759  * @bufq2_ena: It indicates if there is a second buffer, rx_bufq2_id is valid
760  *	       only if this field is set.
761  * @pad1: Padding.
762  * @ingress_pasid: Ingress PASID.
763  * @ingress_hdr_pasid: Ingress PASID header.
764  * @ingress_buf_pasid: Ingress PASID buffer.
765  * @pad2: Padding for future extensions.
766  */
767 struct virtchnl2_rxq_info {
768 	__le64 desc_ids;
769 	__le64 dma_ring_addr;
770 	__le32 type;
771 	__le32 queue_id;
772 	__le16 model;
773 	__le16 hdr_buffer_size;
774 	__le32 data_buffer_size;
775 	__le32 max_pkt_size;
776 	__le16 ring_len;
777 	u8 buffer_notif_stride;
778 	u8 pad;
779 	__le64 dma_head_wb_addr;
780 	__le16 qflags;
781 	__le16 rx_buffer_low_watermark;
782 	__le16 rx_bufq1_id;
783 	__le16 rx_bufq2_id;
784 	u8 bufq2_ena;
785 	u8 pad1[3];
786 	__le32 ingress_pasid;
787 	__le32 ingress_hdr_pasid;
788 	__le32 ingress_buf_pasid;
789 	u8 pad2[16];
790 };
791 VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_rxq_info);
792 
793 /**
794  * struct virtchnl2_config_rx_queues - Rx queues config.
795  * @vport_id: Vport id.
796  * @num_qinfo: Number of instances.
797  * @pad: Padding.
798  * @qinfo: Rx queues config info.
799  *
800  * PF sends this message to set up parameters for one or more receive queues.
801  * This message contains an array of num_qinfo instances of virtchnl2_rxq_info
802  * structures. CP configures requested queues and returns a status code.
803  * If the number of queues specified is greater than the number of queues
804  * associated with the vport, an error is returned and no queues are configured.
805  *
806  * Associated with VIRTCHNL2_OP_CONFIG_RX_QUEUES.
807  */
808 struct virtchnl2_config_rx_queues {
809 	__le32 vport_id;
810 	__le16 num_qinfo;
811 	u8 pad[18];
812 	struct virtchnl2_rxq_info qinfo[] __counted_by_le(num_qinfo);
813 };
814 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues);
815 
816 /**
817  * struct virtchnl2_add_queues - data for VIRTCHNL2_OP_ADD_QUEUES.
818  * @vport_id: Vport id.
819  * @num_tx_q: Number of Tx qieues.
820  * @num_tx_complq: Number of Tx completion queues.
821  * @num_rx_q:  Number of Rx queues.
822  * @num_rx_bufq:  Number of Rx buffer queues.
823  * @pad: Padding.
824  * @chunks: Chunks of contiguous queues.
825  *
826  * PF sends this message to request additional transmit/receive queues beyond
827  * the ones that were assigned via CREATE_VPORT request. virtchnl2_add_queues
828  * structure is used to specify the number of each type of queues.
829  * CP responds with the same structure with the actual number of queues assigned
830  * followed by num_chunks of virtchnl2_queue_chunk structures.
831  *
832  * Associated with VIRTCHNL2_OP_ADD_QUEUES.
833  */
834 struct virtchnl2_add_queues {
835 	__le32 vport_id;
836 	__le16 num_tx_q;
837 	__le16 num_tx_complq;
838 	__le16 num_rx_q;
839 	__le16 num_rx_bufq;
840 	u8 pad[4];
841 	struct virtchnl2_queue_reg_chunks chunks;
842 };
843 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_add_queues);
844 
845 /**
846  * struct virtchnl2_vector_chunk - Structure to specify a chunk of contiguous
847  *				   interrupt vectors.
848  * @start_vector_id: Start vector id.
849  * @start_evv_id: Start EVV id.
850  * @num_vectors: Number of vectors.
851  * @pad: Padding.
852  * @dynctl_reg_start: DYN_CTL register offset.
853  * @dynctl_reg_spacing: register spacing between DYN_CTL registers of 2
854  *			consecutive vectors.
855  * @itrn_reg_start: ITRN register offset.
856  * @itrn_reg_spacing: Register spacing between dynctl registers of 2
857  *		      consecutive vectors.
858  * @itrn_index_spacing: Register spacing between itrn registers of the same
859  *			vector where n=0..2.
860  * @pad1: Padding for future extensions.
861  *
862  * Register offsets and spacing provided by CP.
863  * Dynamic control registers are used for enabling/disabling/re-enabling
864  * interrupts and updating interrupt rates in the hotpath. Any changes
865  * to interrupt rates in the dynamic control registers will be reflected
866  * in the interrupt throttling rate registers.
867  * itrn registers are used to update interrupt rates for specific
868  * interrupt indices without modifying the state of the interrupt.
869  */
870 struct virtchnl2_vector_chunk {
871 	__le16 start_vector_id;
872 	__le16 start_evv_id;
873 	__le16 num_vectors;
874 	__le16 pad;
875 	__le32 dynctl_reg_start;
876 	__le32 dynctl_reg_spacing;
877 	__le32 itrn_reg_start;
878 	__le32 itrn_reg_spacing;
879 	__le32 itrn_index_spacing;
880 	u8 pad1[4];
881 };
882 VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
883 
884 /**
885  * struct virtchnl2_vector_chunks - chunks of contiguous interrupt vectors.
886  * @num_vchunks: number of vector chunks.
887  * @pad: Padding.
888  * @vchunks: Chunks of contiguous vector info.
889  *
890  * PF sends virtchnl2_vector_chunks struct to specify the vectors it is giving
891  * away. CP performs requested action and returns status.
892  *
893  * Associated with VIRTCHNL2_OP_DEALLOC_VECTORS.
894  */
895 struct virtchnl2_vector_chunks {
896 	__le16 num_vchunks;
897 	u8 pad[14];
898 	struct virtchnl2_vector_chunk vchunks[] __counted_by_le(num_vchunks);
899 };
900 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks);
901 
902 /**
903  * struct virtchnl2_alloc_vectors - vector allocation info.
904  * @num_vectors: Number of vectors.
905  * @pad: Padding.
906  * @vchunks: Chunks of contiguous vector info.
907  *
908  * PF sends this message to request additional interrupt vectors beyond the
909  * ones that were assigned via GET_CAPS request. virtchnl2_alloc_vectors
910  * structure is used to specify the number of vectors requested. CP responds
911  * with the same structure with the actual number of vectors assigned followed
912  * by virtchnl2_vector_chunks structure identifying the vector ids.
913  *
914  * Associated with VIRTCHNL2_OP_ALLOC_VECTORS.
915  */
916 struct virtchnl2_alloc_vectors {
917 	__le16 num_vectors;
918 	u8 pad[14];
919 	struct virtchnl2_vector_chunks vchunks;
920 };
921 VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_alloc_vectors);
922 
923 /**
924  * struct virtchnl2_rss_lut - RSS LUT info.
925  * @vport_id: Vport id.
926  * @lut_entries_start: Start of LUT entries.
927  * @lut_entries: Number of LUT entrties.
928  * @pad: Padding.
929  * @lut: RSS lookup table.
930  *
931  * PF sends this message to get or set RSS lookup table. Only supported if
932  * both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
933  * negotiation.
934  *
935  * Associated with VIRTCHNL2_OP_GET_RSS_LUT and VIRTCHNL2_OP_SET_RSS_LUT.
936  */
937 struct virtchnl2_rss_lut {
938 	__le32 vport_id;
939 	__le16 lut_entries_start;
940 	__le16 lut_entries;
941 	u8 pad[4];
942 	__le32 lut[] __counted_by_le(lut_entries);
943 };
944 VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut);
945 
946 /**
947  * struct virtchnl2_rss_hash - RSS hash info.
948  * @ptype_groups: Packet type groups bitmap.
949  * @vport_id: Vport id.
950  * @pad: Padding for future extensions.
951  *
952  * PF sends these messages to get and set the hash filter enable bits for RSS.
953  * By default, the CP sets these to all possible traffic types that the
954  * hardware supports. The PF can query this value if it wants to change the
955  * traffic types that are hashed by the hardware.
956  * Only supported if both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit
957  * during configuration negotiation.
958  *
959  * Associated with VIRTCHNL2_OP_GET_RSS_HASH and VIRTCHNL2_OP_SET_RSS_HASH
960  */
961 struct virtchnl2_rss_hash {
962 	__le64 ptype_groups;
963 	__le32 vport_id;
964 	u8 pad[4];
965 };
966 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_hash);
967 
968 /**
969  * struct virtchnl2_sriov_vfs_info - VFs info.
970  * @num_vfs: Number of VFs.
971  * @pad: Padding for future extensions.
972  *
973  * This message is used to set number of SRIOV VFs to be created. The actual
974  * allocation of resources for the VFs in terms of vport, queues and interrupts
975  * is done by CP. When this call completes, the IDPF driver calls
976  * pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices.
977  * The number of VFs set to 0 will destroy all the VFs of this function.
978  *
979  * Associated with VIRTCHNL2_OP_SET_SRIOV_VFS.
980  */
981 struct virtchnl2_sriov_vfs_info {
982 	__le16 num_vfs;
983 	__le16 pad;
984 };
985 VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);
986 
987 /**
988  * struct virtchnl2_ptype - Packet type info.
989  * @ptype_id_10: 10-bit packet type.
990  * @ptype_id_8: 8-bit packet type.
991  * @proto_id_count: Number of protocol ids the packet supports, maximum of 32
992  *		    protocol ids are supported.
993  * @pad: Padding.
994  * @proto_id: proto_id_count decides the allocation of protocol id array.
995  *	      See enum virtchnl2_proto_hdr_type.
996  *
997  * Based on the descriptor type the PF supports, CP fills ptype_id_10 or
998  * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value
999  * is set to 0xFFFF, PF should consider this ptype as dummy one and it is the
1000  * last ptype.
1001  */
1002 struct virtchnl2_ptype {
1003 	__le16 ptype_id_10;
1004 	u8 ptype_id_8;
1005 	u8 proto_id_count;
1006 	__le16 pad;
1007 	__le16 proto_id[] __counted_by(proto_id_count);
1008 } __packed __aligned(2);
1009 VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
1010 
1011 /**
1012  * struct virtchnl2_get_ptype_info - Packet type info.
1013  * @start_ptype_id: Starting ptype ID.
1014  * @num_ptypes: Number of packet types from start_ptype_id.
1015  * @pad: Padding for future extensions.
1016  *
1017  * The total number of supported packet types is based on the descriptor type.
1018  * For the flex descriptor, it is 1024 (10-bit ptype), and for the base
1019  * descriptor, it is 256 (8-bit ptype). Send this message to the CP by
1020  * populating the 'start_ptype_id' and the 'num_ptypes'. CP responds with the
1021  * 'start_ptype_id', 'num_ptypes', and the array of ptype (virtchnl2_ptype) that
1022  * are added at the end of the 'virtchnl2_get_ptype_info' message (Note: There
1023  * is no specific field for the ptypes but are added at the end of the
1024  * ptype info message. PF/VF is expected to extract the ptypes accordingly.
1025  * Reason for doing this is because compiler doesn't allow nested flexible
1026  * array fields).
1027  *
1028  * If all the ptypes don't fit into one mailbox buffer, CP splits the
1029  * ptype info into multiple messages, where each message will have its own
1030  * 'start_ptype_id', 'num_ptypes', and the ptype array itself. When CP is done
1031  * updating all the ptype information extracted from the package (the number of
1032  * ptypes extracted might be less than what PF/VF expects), it will append a
1033  * dummy ptype (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF)
1034  * to the ptype array.
1035  *
1036  * PF/VF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO messages.
1037  *
1038  * Associated with VIRTCHNL2_OP_GET_PTYPE_INFO.
1039  */
1040 struct virtchnl2_get_ptype_info {
1041 	__le16 start_ptype_id;
1042 	__le16 num_ptypes;
1043 	__le32 pad;
1044 };
1045 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_ptype_info);
1046 
1047 /**
1048  * struct virtchnl2_vport_stats - Vport statistics.
1049  * @vport_id: Vport id.
1050  * @pad: Padding.
1051  * @rx_bytes: Received bytes.
1052  * @rx_unicast: Received unicast packets.
1053  * @rx_multicast: Received multicast packets.
1054  * @rx_broadcast: Received broadcast packets.
1055  * @rx_discards: Discarded packets on receive.
1056  * @rx_errors: Receive errors.
1057  * @rx_unknown_protocol: Unlnown protocol.
1058  * @tx_bytes: Transmitted bytes.
1059  * @tx_unicast: Transmitted unicast packets.
1060  * @tx_multicast: Transmitted multicast packets.
1061  * @tx_broadcast: Transmitted broadcast packets.
1062  * @tx_discards: Discarded packets on transmit.
1063  * @tx_errors: Transmit errors.
1064  * @rx_invalid_frame_length: Packets with invalid frame length.
1065  * @rx_overflow_drop: Packets dropped on buffer overflow.
1066  *
1067  * PF/VF sends this message to CP to get the update stats by specifying the
1068  * vport_id. CP responds with stats in struct virtchnl2_vport_stats.
1069  *
1070  * Associated with VIRTCHNL2_OP_GET_STATS.
1071  */
1072 struct virtchnl2_vport_stats {
1073 	__le32 vport_id;
1074 	u8 pad[4];
1075 	__le64 rx_bytes;
1076 	__le64 rx_unicast;
1077 	__le64 rx_multicast;
1078 	__le64 rx_broadcast;
1079 	__le64 rx_discards;
1080 	__le64 rx_errors;
1081 	__le64 rx_unknown_protocol;
1082 	__le64 tx_bytes;
1083 	__le64 tx_unicast;
1084 	__le64 tx_multicast;
1085 	__le64 tx_broadcast;
1086 	__le64 tx_discards;
1087 	__le64 tx_errors;
1088 	__le64 rx_invalid_frame_length;
1089 	__le64 rx_overflow_drop;
1090 };
1091 VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);
1092 
1093 /**
1094  * struct virtchnl2_event - Event info.
1095  * @event: Event opcode. See enum virtchnl2_event_codes.
1096  * @link_speed: Link_speed provided in Mbps.
1097  * @vport_id: Vport ID.
1098  * @link_status: Link status.
1099  * @pad: Padding.
1100  * @reserved: Reserved.
1101  *
1102  * CP sends this message to inform the PF/VF driver of events that may affect
1103  * it. No direct response is expected from the driver, though it may generate
1104  * other messages in response to this one.
1105  *
1106  * Associated with VIRTCHNL2_OP_EVENT.
1107  */
1108 struct virtchnl2_event {
1109 	__le32 event;
1110 	__le32 link_speed;
1111 	__le32 vport_id;
1112 	u8 link_status;
1113 	u8 pad;
1114 	__le16 reserved;
1115 };
1116 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_event);
1117 
1118 /**
1119  * struct virtchnl2_rss_key - RSS key info.
1120  * @vport_id: Vport id.
1121  * @key_len: Length of RSS key.
1122  * @pad: Padding.
1123  * @key_flex: RSS hash key, packed bytes.
1124  * PF/VF sends this message to get or set RSS key. Only supported if both
1125  * PF/VF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
1126  * negotiation.
1127  *
1128  * Associated with VIRTCHNL2_OP_GET_RSS_KEY and VIRTCHNL2_OP_SET_RSS_KEY.
1129  */
1130 struct virtchnl2_rss_key {
1131 	__le32 vport_id;
1132 	__le16 key_len;
1133 	u8 pad;
1134 	u8 key_flex[] __counted_by_le(key_len);
1135 } __packed;
1136 VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key);
1137 
1138 /**
1139  * struct virtchnl2_queue_chunk - chunk of contiguous queues
1140  * @type: See enum virtchnl2_queue_type.
1141  * @start_queue_id: Starting queue id.
1142  * @num_queues: Number of queues.
1143  * @pad: Padding for future extensions.
1144  */
1145 struct virtchnl2_queue_chunk {
1146 	__le32 type;
1147 	__le32 start_queue_id;
1148 	__le32 num_queues;
1149 	u8 pad[4];
1150 };
1151 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
1152 
1153 /* struct virtchnl2_queue_chunks - chunks of contiguous queues
1154  * @num_chunks: Number of chunks.
1155  * @pad: Padding.
1156  * @chunks: Chunks of contiguous queues info.
1157  */
1158 struct virtchnl2_queue_chunks {
1159 	__le16 num_chunks;
1160 	u8 pad[6];
1161 	struct virtchnl2_queue_chunk chunks[] __counted_by_le(num_chunks);
1162 };
1163 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks);
1164 
1165 /**
1166  * struct virtchnl2_del_ena_dis_queues - Enable/disable queues info.
1167  * @vport_id: Vport id.
1168  * @pad: Padding.
1169  * @chunks: Chunks of contiguous queues info.
1170  *
1171  * PF sends these messages to enable, disable or delete queues specified in
1172  * chunks. PF sends virtchnl2_del_ena_dis_queues struct to specify the queues
1173  * to be enabled/disabled/deleted. Also applicable to single queue receive or
1174  * transmit. CP performs requested action and returns status.
1175  *
1176  * Associated with VIRTCHNL2_OP_ENABLE_QUEUES, VIRTCHNL2_OP_DISABLE_QUEUES and
1177  * VIRTCHNL2_OP_DISABLE_QUEUES.
1178  */
1179 struct virtchnl2_del_ena_dis_queues {
1180 	__le32 vport_id;
1181 	u8 pad[4];
1182 	struct virtchnl2_queue_chunks chunks;
1183 };
1184 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_del_ena_dis_queues);
1185 
1186 /**
1187  * struct virtchnl2_queue_vector - Queue to vector mapping.
1188  * @queue_id: Queue id.
1189  * @vector_id: Vector id.
1190  * @pad: Padding.
1191  * @itr_idx: See enum virtchnl2_itr_idx.
1192  * @queue_type: See enum virtchnl2_queue_type.
1193  * @pad1: Padding for future extensions.
1194  */
1195 struct virtchnl2_queue_vector {
1196 	__le32 queue_id;
1197 	__le16 vector_id;
1198 	u8 pad[2];
1199 	__le32 itr_idx;
1200 	__le32 queue_type;
1201 	u8 pad1[8];
1202 };
1203 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_vector);
1204 
1205 /**
1206  * struct virtchnl2_queue_vector_maps - Map/unmap queues info.
1207  * @vport_id: Vport id.
1208  * @num_qv_maps: Number of queue vector maps.
1209  * @pad: Padding.
1210  * @qv_maps: Queue to vector maps.
1211  *
1212  * PF sends this message to map or unmap queues to vectors and interrupt
1213  * throttling rate index registers. External data buffer contains
1214  * virtchnl2_queue_vector_maps structure that contains num_qv_maps of
1215  * virtchnl2_queue_vector structures. CP maps the requested queue vector maps
1216  * after validating the queue and vector ids and returns a status code.
1217  *
1218  * Associated with VIRTCHNL2_OP_MAP_QUEUE_VECTOR and
1219  * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR.
1220  */
1221 struct virtchnl2_queue_vector_maps {
1222 	__le32 vport_id;
1223 	__le16 num_qv_maps;
1224 	u8 pad[10];
1225 	struct virtchnl2_queue_vector qv_maps[] __counted_by_le(num_qv_maps);
1226 };
1227 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps);
1228 
1229 /**
1230  * struct virtchnl2_loopback - Loopback info.
1231  * @vport_id: Vport id.
1232  * @enable: Enable/disable.
1233  * @pad: Padding for future extensions.
1234  *
1235  * PF/VF sends this message to transition to/from the loopback state. Setting
1236  * the 'enable' to 1 enables the loopback state and setting 'enable' to 0
1237  * disables it. CP configures the state to loopback and returns status.
1238  *
1239  * Associated with VIRTCHNL2_OP_LOOPBACK.
1240  */
1241 struct virtchnl2_loopback {
1242 	__le32 vport_id;
1243 	u8 enable;
1244 	u8 pad[3];
1245 };
1246 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_loopback);
1247 
1248 /* struct virtchnl2_mac_addr - MAC address info.
1249  * @addr: MAC address.
1250  * @type: MAC type. See enum virtchnl2_mac_addr_type.
1251  * @pad: Padding for future extensions.
1252  */
1253 struct virtchnl2_mac_addr {
1254 	u8 addr[ETH_ALEN];
1255 	u8 type;
1256 	u8 pad;
1257 };
1258 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr);
1259 
1260 /**
1261  * struct virtchnl2_mac_addr_list - List of MAC addresses.
1262  * @vport_id: Vport id.
1263  * @num_mac_addr: Number of MAC addresses.
1264  * @pad: Padding.
1265  * @mac_addr_list: List with MAC address info.
1266  *
1267  * PF/VF driver uses this structure to send list of MAC addresses to be
1268  * added/deleted to the CP where as CP performs the action and returns the
1269  * status.
1270  *
1271  * Associated with VIRTCHNL2_OP_ADD_MAC_ADDR and VIRTCHNL2_OP_DEL_MAC_ADDR.
1272  */
1273 struct virtchnl2_mac_addr_list {
1274 	__le32 vport_id;
1275 	__le16 num_mac_addr;
1276 	u8 pad[2];
1277 	struct virtchnl2_mac_addr mac_addr_list[] __counted_by_le(num_mac_addr);
1278 };
1279 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list);
1280 
1281 /**
1282  * struct virtchnl2_promisc_info - Promisc type info.
1283  * @vport_id: Vport id.
1284  * @flags: See enum virtchnl2_promisc_flags.
1285  * @pad: Padding for future extensions.
1286  *
1287  * PF/VF sends vport id and flags to the CP where as CP performs the action
1288  * and returns the status.
1289  *
1290  * Associated with VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE.
1291  */
1292 struct virtchnl2_promisc_info {
1293 	__le32 vport_id;
1294 	/* See VIRTCHNL2_PROMISC_FLAGS definitions */
1295 	__le16 flags;
1296 	u8 pad[2];
1297 };
1298 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
1299 
1300 /**
1301  * enum virtchnl2_ptp_caps - PTP capabilities
1302  * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME: direct access to get the time of
1303  *					   device clock
1304  * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB: mailbox access to get the time of
1305  *					      device clock
1306  * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME: direct access to cross timestamp
1307  * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB: mailbox access to cross timestamp
1308  * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME: direct access to set the time of
1309  *					   device clock
1310  * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB: mailbox access to set the time of
1311  *					      device clock
1312  * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK: direct access to adjust the time of device
1313  *				      clock
1314  * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB: mailbox access to adjust the time of
1315  *					 device clock
1316  * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS: direct access to the Tx timestamping
1317  * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB: mailbox access to the Tx timestamping
1318  *
1319  * PF/VF negotiates a set of supported PTP capabilities with the Control Plane.
1320  * There are two access methods - mailbox (_MB) and direct.
1321  * PTP capabilities enables Main Timer operations: get/set/adjust Main Timer,
1322  * cross timestamping and the Tx timestamping.
1323  */
1324 enum virtchnl2_ptp_caps {
1325 	VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME		= BIT(0),
1326 	VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB	= BIT(1),
1327 	VIRTCHNL2_CAP_PTP_GET_CROSS_TIME		= BIT(2),
1328 	VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB		= BIT(3),
1329 	VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME		= BIT(4),
1330 	VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB	= BIT(5),
1331 	VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK		= BIT(6),
1332 	VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB		= BIT(7),
1333 	VIRTCHNL2_CAP_PTP_TX_TSTAMPS			= BIT(8),
1334 	VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB			= BIT(9),
1335 };
1336 
1337 /**
1338  * struct virtchnl2_ptp_clk_reg_offsets - Offsets of device and PHY clocks
1339  *					  registers.
1340  * @dev_clk_ns_l: Device clock low register offset
1341  * @dev_clk_ns_h: Device clock high register offset
1342  * @phy_clk_ns_l: PHY clock low register offset
1343  * @phy_clk_ns_h: PHY clock high register offset
1344  * @cmd_sync_trigger: The command sync trigger register offset
1345  * @pad: Padding for future extensions
1346  */
1347 struct virtchnl2_ptp_clk_reg_offsets {
1348 	__le32 dev_clk_ns_l;
1349 	__le32 dev_clk_ns_h;
1350 	__le32 phy_clk_ns_l;
1351 	__le32 phy_clk_ns_h;
1352 	__le32 cmd_sync_trigger;
1353 	u8 pad[4];
1354 };
1355 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_clk_reg_offsets);
1356 
1357 /**
1358  * struct virtchnl2_ptp_cross_time_reg_offsets - Offsets of the device cross
1359  *						 time registers.
1360  * @sys_time_ns_l: System time low register offset
1361  * @sys_time_ns_h: System time high register offset
1362  * @cmd_sync_trigger: The command sync trigger register offset
1363  * @pad: Padding for future extensions
1364  */
1365 struct virtchnl2_ptp_cross_time_reg_offsets {
1366 	__le32 sys_time_ns_l;
1367 	__le32 sys_time_ns_h;
1368 	__le32 cmd_sync_trigger;
1369 	u8 pad[4];
1370 };
1371 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_cross_time_reg_offsets);
1372 
1373 /**
1374  * struct virtchnl2_ptp_clk_adj_reg_offsets - Offsets of device and PHY clocks
1375  *					      adjustments registers.
1376  * @dev_clk_cmd_type: Device clock command type register offset
1377  * @dev_clk_incval_l: Device clock increment value low register offset
1378  * @dev_clk_incval_h: Device clock increment value high registers offset
1379  * @dev_clk_shadj_l: Device clock shadow adjust low register offset
1380  * @dev_clk_shadj_h: Device clock shadow adjust high register offset
1381  * @phy_clk_cmd_type: PHY timer command type register offset
1382  * @phy_clk_incval_l: PHY timer increment value low register offset
1383  * @phy_clk_incval_h: PHY timer increment value high register offset
1384  * @phy_clk_shadj_l: PHY timer shadow adjust low register offset
1385  * @phy_clk_shadj_h: PHY timer shadow adjust high register offset
1386  */
1387 struct virtchnl2_ptp_clk_adj_reg_offsets {
1388 	__le32 dev_clk_cmd_type;
1389 	__le32 dev_clk_incval_l;
1390 	__le32 dev_clk_incval_h;
1391 	__le32 dev_clk_shadj_l;
1392 	__le32 dev_clk_shadj_h;
1393 	__le32 phy_clk_cmd_type;
1394 	__le32 phy_clk_incval_l;
1395 	__le32 phy_clk_incval_h;
1396 	__le32 phy_clk_shadj_l;
1397 	__le32 phy_clk_shadj_h;
1398 };
1399 VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_ptp_clk_adj_reg_offsets);
1400 
1401 /**
1402  * struct virtchnl2_ptp_tx_tstamp_latch_caps - PTP Tx timestamp latch
1403  *					       capabilities.
1404  * @tx_latch_reg_offset_l: Tx timestamp latch low register offset
1405  * @tx_latch_reg_offset_h: Tx timestamp latch high register offset
1406  * @index: Latch index provided to the Tx descriptor
1407  * @pad: Padding for future extensions
1408  */
1409 struct virtchnl2_ptp_tx_tstamp_latch_caps {
1410 	__le32 tx_latch_reg_offset_l;
1411 	__le32 tx_latch_reg_offset_h;
1412 	u8 index;
1413 	u8 pad[7];
1414 };
1415 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch_caps);
1416 
1417 /**
1418  * struct virtchnl2_ptp_get_vport_tx_tstamp_caps - Structure that defines Tx
1419  *						   tstamp entries.
1420  * @vport_id: Vport number
1421  * @num_latches: Total number of latches
1422  * @tstamp_ns_lo_bit: First bit for nanosecond part of the timestamp
1423  * @tstamp_ns_hi_bit: Last bit for nanosecond part of the timestamp
1424  * @pad: Padding for future tstamp granularity extensions
1425  * @tstamp_latches: Capabilities of Tx timestamp entries
1426  *
1427  * PF/VF sends this message to negotiate the Tx timestamp latches for each
1428  * Vport.
1429  *
1430  * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS.
1431  */
1432 struct virtchnl2_ptp_get_vport_tx_tstamp_caps {
1433 	__le32 vport_id;
1434 	__le16 num_latches;
1435 	u8 tstamp_ns_lo_bit;
1436 	u8 tstamp_ns_hi_bit;
1437 	u8 pad[8];
1438 
1439 	struct virtchnl2_ptp_tx_tstamp_latch_caps tstamp_latches[]
1440 						  __counted_by_le(num_latches);
1441 };
1442 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_caps);
1443 
1444 /**
1445  * struct virtchnl2_ptp_get_caps - Get PTP capabilities
1446  * @caps: PTP capability bitmap. See enum virtchnl2_ptp_caps
1447  * @max_adj: The maximum possible frequency adjustment
1448  * @base_incval: The default timer increment value
1449  * @peer_mbx_q_id: ID of the PTP Device Control daemon queue
1450  * @peer_id: Peer ID for PTP Device Control daemon
1451  * @secondary_mbx: Indicates to the driver that it should create a secondary
1452  *		   mailbox to inetract with control plane for PTP
1453  * @pad: Padding for future extensions
1454  * @clk_offsets: Main timer and PHY registers offsets
1455  * @cross_time_offsets: Cross time registers offsets
1456  * @clk_adj_offsets: Offsets needed to adjust the PHY and the main timer
1457  *
1458  * PF/VF sends this message to negotiate PTP capabilities. CP updates bitmap
1459  * with supported features and fulfills appropriate structures.
1460  * If HW uses primary MBX for PTP: secondary_mbx is set to false.
1461  * If HW uses secondary MBX for PTP: secondary_mbx is set to true.
1462  *	Control plane has 2 MBX and the driver has 1 MBX, send to peer
1463  *	driver may be used to send a message using valid ptp_peer_mb_q_id and
1464  *	ptp_peer_id.
1465  * If HW does not use send to peer driver: secondary_mbx is no care field and
1466  * peer_mbx_q_id holds invalid value (0xFFFF).
1467  *
1468  * Associated with VIRTCHNL2_OP_PTP_GET_CAPS.
1469  */
1470 struct virtchnl2_ptp_get_caps {
1471 	__le32 caps;
1472 	__le32 max_adj;
1473 	__le64 base_incval;
1474 	__le16 peer_mbx_q_id;
1475 	u8 peer_id;
1476 	u8 secondary_mbx;
1477 	u8 pad[4];
1478 
1479 	struct virtchnl2_ptp_clk_reg_offsets clk_offsets;
1480 	struct virtchnl2_ptp_cross_time_reg_offsets cross_time_offsets;
1481 	struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
1482 };
1483 VIRTCHNL2_CHECK_STRUCT_LEN(104, virtchnl2_ptp_get_caps);
1484 
1485 /**
1486  * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp
1487  *					  values, index and validity.
1488  * @tstamp: Timestamp value
1489  * @index: Timestamp index from which the value is read
1490  * @valid: Timestamp validity
1491  * @pad: Padding for future extensions
1492  */
1493 struct virtchnl2_ptp_tx_tstamp_latch {
1494 	__le64 tstamp;
1495 	u8 index;
1496 	u8 valid;
1497 	u8 pad[6];
1498 };
1499 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
1500 
1501 /**
1502  * struct virtchnl2_ptp_get_vport_tx_tstamp_latches - Tx timestamp latches
1503  *						      associated with the vport.
1504  * @vport_id: Number of vport that requests the timestamp
1505  * @num_latches: Number of latches
1506  * @get_devtime_with_txtstmp: Flag to request device time along with Tx timestamp
1507  * @pad: Padding for future extensions
1508  * @device_time: device time if get_devtime_with_txtstmp was set in request
1509  * @tstamp_latches: PTP TX timestamp latch
1510  *
1511  * PF/VF sends this message to receive a specified number of timestamps
1512  * entries.
1513  *
1514  * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP.
1515  */
1516 struct virtchnl2_ptp_get_vport_tx_tstamp_latches {
1517 	__le32 vport_id;
1518 	__le16 num_latches;
1519 	u8 get_devtime_with_txtstmp;
1520 	u8 pad[1];
1521 	__le64 device_time;
1522 
1523 	struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[]
1524 					     __counted_by_le(num_latches);
1525 };
1526 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_latches);
1527 
1528 /**
1529  * struct virtchnl2_ptp_get_dev_clk_time - Associated with message
1530  *					   VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME.
1531  * @dev_time_ns: Device clock time value in nanoseconds
1532  *
1533  * PF/VF sends this message to receive the time from the main timer.
1534  */
1535 struct virtchnl2_ptp_get_dev_clk_time {
1536 	__le64 dev_time_ns;
1537 };
1538 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_get_dev_clk_time);
1539 
1540 /**
1541  * struct virtchnl2_ptp_get_cross_time: Associated with message
1542  *					VIRTCHNL2_OP_PTP_GET_CROSS_TIME.
1543  * @sys_time_ns: System counter value expressed in nanoseconds, read
1544  *		 synchronously with device time
1545  * @dev_time_ns: Device clock time value expressed in nanoseconds
1546  *
1547  * PF/VF sends this message to receive the cross time.
1548  */
1549 struct virtchnl2_ptp_get_cross_time {
1550 	__le64 sys_time_ns;
1551 	__le64 dev_time_ns;
1552 };
1553 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_cross_time);
1554 
1555 /**
1556  * struct virtchnl2_ptp_set_dev_clk_time: Associated with message
1557  *					  VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME.
1558  * @dev_time_ns: Device time value expressed in nanoseconds to set
1559  *
1560  * PF/VF sends this message to set the time of the main timer.
1561  */
1562 struct virtchnl2_ptp_set_dev_clk_time {
1563 	__le64 dev_time_ns;
1564 };
1565 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_set_dev_clk_time);
1566 
1567 /**
1568  * struct virtchnl2_ptp_adj_dev_clk_fine: Associated with message
1569  *					  VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE.
1570  * @incval: Source timer increment value per clock cycle
1571  *
1572  * PF/VF sends this message to adjust the frequency of the main timer by the
1573  * indicated increment value.
1574  */
1575 struct virtchnl2_ptp_adj_dev_clk_fine {
1576 	__le64 incval;
1577 };
1578 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_fine);
1579 
1580 /**
1581  * struct virtchnl2_ptp_adj_dev_clk_time: Associated with message
1582  *					  VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME.
1583  * @delta: Offset in nanoseconds to adjust the time by
1584  *
1585  * PF/VF sends this message to adjust the time of the main timer by the delta.
1586  */
1587 struct virtchnl2_ptp_adj_dev_clk_time {
1588 	__le64 delta;
1589 };
1590 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time);
1591 
1592 /**
1593  * struct virtchnl2_mem_region - MMIO memory region
1594  * @start_offset: starting offset of the MMIO memory region
1595  * @size: size of the MMIO memory region
1596  */
1597 struct virtchnl2_mem_region {
1598 	__le64 start_offset;
1599 	__le64 size;
1600 };
1601 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mem_region);
1602 
1603 /**
1604  * struct virtchnl2_get_lan_memory_regions - List of LAN MMIO memory regions
1605  * @num_memory_regions: number of memory regions
1606  * @pad: Padding
1607  * @mem_reg: List with memory region info
1608  *
1609  * PF/VF sends this message to learn what LAN MMIO memory regions it should map.
1610  */
1611 struct virtchnl2_get_lan_memory_regions {
1612 	__le16 num_memory_regions;
1613 	u8 pad[6];
1614 	struct virtchnl2_mem_region mem_reg[];
1615 };
1616 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_lan_memory_regions);
1617 
1618 #endif /* _VIRTCHNL_2_H_ */
1619