xref: /linux/drivers/net/ethernet/intel/idpf/virtchnl2.h (revision e47a324d6f07c9ef252cfce1f14cfa5110cbed99)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #ifndef _VIRTCHNL2_H_
5 #define _VIRTCHNL2_H_
6 
7 #include <linux/if_ether.h>
8 
9 /* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or
10  * VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
11  * and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
12  *
13  * PF/VF uses the virtchnl2 interface defined in this header file to communicate
14  * with device Control Plane (CP). Driver and the CP may run on different
15  * platforms with different endianness. To avoid byte order discrepancies,
16  * all the structures in this header follow little-endian format.
17  *
18  * This is an interface definition file where existing enums and their values
19  * must remain unchanged over time, so we specify explicit values for all enums.
20  */
21 
22 /* This macro is used to generate compilation errors if a structure
23  * is not exactly the correct length.
24  */
25 #define VIRTCHNL2_CHECK_STRUCT_LEN(n, X)	\
26 	static_assert((n) == sizeof(struct X))
27 
28 /* New major set of opcodes introduced and so leaving room for
29  * old misc opcodes to be added in future. Also these opcodes may only
30  * be used if both the PF and VF have successfully negotiated the
31  * VIRTCHNL version as 2.0 during VIRTCHNL2_OP_VERSION exchange.
32  */
33 enum virtchnl2_op {
34 	VIRTCHNL2_OP_UNKNOWN			= 0,
35 	VIRTCHNL2_OP_VERSION			= 1,
36 	VIRTCHNL2_OP_GET_CAPS			= 500,
37 	VIRTCHNL2_OP_CREATE_VPORT		= 501,
38 	VIRTCHNL2_OP_DESTROY_VPORT		= 502,
39 	VIRTCHNL2_OP_ENABLE_VPORT		= 503,
40 	VIRTCHNL2_OP_DISABLE_VPORT		= 504,
41 	VIRTCHNL2_OP_CONFIG_TX_QUEUES		= 505,
42 	VIRTCHNL2_OP_CONFIG_RX_QUEUES		= 506,
43 	VIRTCHNL2_OP_ENABLE_QUEUES		= 507,
44 	VIRTCHNL2_OP_DISABLE_QUEUES		= 508,
45 	VIRTCHNL2_OP_ADD_QUEUES			= 509,
46 	VIRTCHNL2_OP_DEL_QUEUES			= 510,
47 	VIRTCHNL2_OP_MAP_QUEUE_VECTOR		= 511,
48 	VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR		= 512,
49 	VIRTCHNL2_OP_GET_RSS_KEY		= 513,
50 	VIRTCHNL2_OP_SET_RSS_KEY		= 514,
51 	VIRTCHNL2_OP_GET_RSS_LUT		= 515,
52 	VIRTCHNL2_OP_SET_RSS_LUT		= 516,
53 	VIRTCHNL2_OP_GET_RSS_HASH		= 517,
54 	VIRTCHNL2_OP_SET_RSS_HASH		= 518,
55 	VIRTCHNL2_OP_SET_SRIOV_VFS		= 519,
56 	VIRTCHNL2_OP_ALLOC_VECTORS		= 520,
57 	VIRTCHNL2_OP_DEALLOC_VECTORS		= 521,
58 	VIRTCHNL2_OP_EVENT			= 522,
59 	VIRTCHNL2_OP_GET_STATS			= 523,
60 	VIRTCHNL2_OP_RESET_VF			= 524,
61 	VIRTCHNL2_OP_GET_EDT_CAPS		= 525,
62 	VIRTCHNL2_OP_GET_PTYPE_INFO		= 526,
63 	/* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
64 	 * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW.
65 	 * Opcodes 529, 530, 531, 532 and 533 are reserved.
66 	 */
67 	VIRTCHNL2_OP_LOOPBACK			= 534,
68 	VIRTCHNL2_OP_ADD_MAC_ADDR		= 535,
69 	VIRTCHNL2_OP_DEL_MAC_ADDR		= 536,
70 	VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE	= 537,
71 
72 	/* TimeSync opcodes */
73 	VIRTCHNL2_OP_PTP_GET_CAPS			= 541,
74 	VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP		= 542,
75 	VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME		= 543,
76 	VIRTCHNL2_OP_PTP_GET_CROSS_TIME			= 544,
77 	VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME		= 545,
78 	VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE		= 546,
79 	VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME		= 547,
80 	VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS	= 548,
81 };
82 
83 /**
84  * enum virtchnl2_vport_type - Type of virtual port.
85  * @VIRTCHNL2_VPORT_TYPE_DEFAULT: Default virtual port type.
86  */
87 enum virtchnl2_vport_type {
88 	VIRTCHNL2_VPORT_TYPE_DEFAULT		= 0,
89 };
90 
91 /**
92  * enum virtchnl2_queue_model - Type of queue model.
93  * @VIRTCHNL2_QUEUE_MODEL_SINGLE: Single queue model.
94  * @VIRTCHNL2_QUEUE_MODEL_SPLIT: Split queue model.
95  *
96  * In the single queue model, the same transmit descriptor queue is used by
97  * software to post descriptors to hardware and by hardware to post completed
98  * descriptors to software.
99  * Likewise, the same receive descriptor queue is used by hardware to post
100  * completions to software and by software to post buffers to hardware.
101  *
102  * In the split queue model, hardware uses transmit completion queues to post
103  * descriptor/buffer completions to software, while software uses transmit
104  * descriptor queues to post descriptors to hardware.
105  * Likewise, hardware posts descriptor completions to the receive descriptor
106  * queue, while software uses receive buffer queues to post buffers to hardware.
107  */
108 enum virtchnl2_queue_model {
109 	VIRTCHNL2_QUEUE_MODEL_SINGLE		= 0,
110 	VIRTCHNL2_QUEUE_MODEL_SPLIT		= 1,
111 };
112 
113 /* Checksum offload capability flags */
114 enum virtchnl2_cap_txrx_csum {
115 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4		= BIT(0),
116 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	= BIT(1),
117 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	= BIT(2),
118 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	= BIT(3),
119 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	= BIT(4),
120 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	= BIT(5),
121 	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	= BIT(6),
122 	VIRTCHNL2_CAP_TX_CSUM_GENERIC		= BIT(7),
123 	VIRTCHNL2_CAP_RX_CSUM_L3_IPV4		= BIT(8),
124 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	= BIT(9),
125 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	= BIT(10),
126 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	= BIT(11),
127 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	= BIT(12),
128 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	= BIT(13),
129 	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	= BIT(14),
130 	VIRTCHNL2_CAP_RX_CSUM_GENERIC		= BIT(15),
131 	VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL	= BIT(16),
132 	VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL	= BIT(17),
133 	VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL	= BIT(18),
134 	VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL	= BIT(19),
135 	VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL	= BIT(20),
136 	VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL	= BIT(21),
137 	VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL	= BIT(22),
138 	VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL	= BIT(23),
139 };
140 
141 /* Segmentation offload capability flags */
142 enum virtchnl2_cap_seg {
143 	VIRTCHNL2_CAP_SEG_IPV4_TCP		= BIT(0),
144 	VIRTCHNL2_CAP_SEG_IPV4_UDP		= BIT(1),
145 	VIRTCHNL2_CAP_SEG_IPV4_SCTP		= BIT(2),
146 	VIRTCHNL2_CAP_SEG_IPV6_TCP		= BIT(3),
147 	VIRTCHNL2_CAP_SEG_IPV6_UDP		= BIT(4),
148 	VIRTCHNL2_CAP_SEG_IPV6_SCTP		= BIT(5),
149 	VIRTCHNL2_CAP_SEG_GENERIC		= BIT(6),
150 	VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL	= BIT(7),
151 	VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL	= BIT(8),
152 };
153 
154 /* Receive Side Scaling Flow type capability flags */
155 enum virtchnl2_cap_rss {
156 	VIRTCHNL2_CAP_RSS_IPV4_TCP		= BIT(0),
157 	VIRTCHNL2_CAP_RSS_IPV4_UDP		= BIT(1),
158 	VIRTCHNL2_CAP_RSS_IPV4_SCTP		= BIT(2),
159 	VIRTCHNL2_CAP_RSS_IPV4_OTHER		= BIT(3),
160 	VIRTCHNL2_CAP_RSS_IPV6_TCP		= BIT(4),
161 	VIRTCHNL2_CAP_RSS_IPV6_UDP		= BIT(5),
162 	VIRTCHNL2_CAP_RSS_IPV6_SCTP		= BIT(6),
163 	VIRTCHNL2_CAP_RSS_IPV6_OTHER		= BIT(7),
164 	VIRTCHNL2_CAP_RSS_IPV4_AH		= BIT(8),
165 	VIRTCHNL2_CAP_RSS_IPV4_ESP		= BIT(9),
166 	VIRTCHNL2_CAP_RSS_IPV4_AH_ESP		= BIT(10),
167 	VIRTCHNL2_CAP_RSS_IPV6_AH		= BIT(11),
168 	VIRTCHNL2_CAP_RSS_IPV6_ESP		= BIT(12),
169 	VIRTCHNL2_CAP_RSS_IPV6_AH_ESP		= BIT(13),
170 };
171 
172 /* Header split capability flags */
173 enum virtchnl2_cap_rx_hsplit_at {
174 	/* for prepended metadata  */
175 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L2		= BIT(0),
176 	/* all VLANs go into header buffer */
177 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L3		= BIT(1),
178 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4		= BIT(2),
179 	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6		= BIT(3),
180 };
181 
182 /* Receive Side Coalescing offload capability flags */
183 enum virtchnl2_cap_rsc {
184 	VIRTCHNL2_CAP_RSC_IPV4_TCP		= BIT(0),
185 	VIRTCHNL2_CAP_RSC_IPV4_SCTP		= BIT(1),
186 	VIRTCHNL2_CAP_RSC_IPV6_TCP		= BIT(2),
187 	VIRTCHNL2_CAP_RSC_IPV6_SCTP		= BIT(3),
188 };
189 
190 /* Other capability flags */
191 enum virtchnl2_cap_other {
192 	VIRTCHNL2_CAP_RDMA			= BIT_ULL(0),
193 	VIRTCHNL2_CAP_SRIOV			= BIT_ULL(1),
194 	VIRTCHNL2_CAP_MACFILTER			= BIT_ULL(2),
195 	VIRTCHNL2_CAP_FLOW_DIRECTOR		= BIT_ULL(3),
196 	/* Queue based scheduling using split queue model */
197 	VIRTCHNL2_CAP_SPLITQ_QSCHED		= BIT_ULL(4),
198 	VIRTCHNL2_CAP_CRC			= BIT_ULL(5),
199 	VIRTCHNL2_CAP_ADQ			= BIT_ULL(6),
200 	VIRTCHNL2_CAP_WB_ON_ITR			= BIT_ULL(7),
201 	VIRTCHNL2_CAP_PROMISC			= BIT_ULL(8),
202 	VIRTCHNL2_CAP_LINK_SPEED		= BIT_ULL(9),
203 	VIRTCHNL2_CAP_INLINE_IPSEC		= BIT_ULL(10),
204 	VIRTCHNL2_CAP_LARGE_NUM_QUEUES		= BIT_ULL(11),
205 	VIRTCHNL2_CAP_VLAN			= BIT_ULL(12),
206 	VIRTCHNL2_CAP_PTP			= BIT_ULL(13),
207 	/* EDT: Earliest Departure Time capability used for Timing Wheel */
208 	VIRTCHNL2_CAP_EDT			= BIT_ULL(14),
209 	VIRTCHNL2_CAP_ADV_RSS			= BIT_ULL(15),
210 	VIRTCHNL2_CAP_FDIR			= BIT_ULL(16),
211 	VIRTCHNL2_CAP_RX_FLEX_DESC		= BIT_ULL(17),
212 	VIRTCHNL2_CAP_PTYPE			= BIT_ULL(18),
213 	VIRTCHNL2_CAP_LOOPBACK			= BIT_ULL(19),
214 	/* Other capability 20 is reserved */
215 
216 	/* this must be the last capability */
217 	VIRTCHNL2_CAP_OEM			= BIT_ULL(63),
218 };
219 
220 /* underlying device type */
221 enum virtchl2_device_type {
222 	VIRTCHNL2_MEV_DEVICE			= 0,
223 };
224 
225 /**
226  * enum virtchnl2_txq_sched_mode - Transmit Queue Scheduling Modes.
227  * @VIRTCHNL2_TXQ_SCHED_MODE_QUEUE: Queue mode is the legacy mode i.e. inorder
228  *				    completions where descriptors and buffers
229  *				    are completed at the same time.
230  * @VIRTCHNL2_TXQ_SCHED_MODE_FLOW: Flow scheduling mode allows for out of order
231  *				   packet processing where descriptors are
232  *				   cleaned in order, but buffers can be
233  *				   completed out of order.
234  */
235 enum virtchnl2_txq_sched_mode {
236 	VIRTCHNL2_TXQ_SCHED_MODE_QUEUE		= 0,
237 	VIRTCHNL2_TXQ_SCHED_MODE_FLOW		= 1,
238 };
239 
240 /**
241  * enum virtchnl2_rxq_flags - Receive Queue Feature flags.
242  * @VIRTCHNL2_RXQ_RSC: Rx queue RSC flag.
243  * @VIRTCHNL2_RXQ_HDR_SPLIT: Rx queue header split flag.
244  * @VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK: When set, packet descriptors are flushed
245  *					by hardware immediately after processing
246  *					each packet.
247  * @VIRTCHNL2_RX_DESC_SIZE_16BYTE: Rx queue 16 byte descriptor size.
248  * @VIRTCHNL2_RX_DESC_SIZE_32BYTE: Rx queue 32 byte descriptor size.
249  */
250 enum virtchnl2_rxq_flags {
251 	VIRTCHNL2_RXQ_RSC			= BIT(0),
252 	VIRTCHNL2_RXQ_HDR_SPLIT			= BIT(1),
253 	VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK	= BIT(2),
254 	VIRTCHNL2_RX_DESC_SIZE_16BYTE		= BIT(3),
255 	VIRTCHNL2_RX_DESC_SIZE_32BYTE		= BIT(4),
256 };
257 
258 /* Type of RSS algorithm */
259 enum virtchnl2_rss_alg {
260 	VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC	= 0,
261 	VIRTCHNL2_RSS_ALG_R_ASYMMETRIC		= 1,
262 	VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC	= 2,
263 	VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC		= 3,
264 };
265 
266 /* Type of event */
267 enum virtchnl2_event_codes {
268 	VIRTCHNL2_EVENT_UNKNOWN			= 0,
269 	VIRTCHNL2_EVENT_LINK_CHANGE		= 1,
270 	/* Event type 2, 3 are reserved */
271 };
272 
273 /* Transmit and Receive queue types are valid in legacy as well as split queue
274  * models. With Split Queue model, 2 additional types are introduced -
275  * TX_COMPLETION and RX_BUFFER. In split queue model, receive  corresponds to
276  * the queue where hardware posts completions.
277  */
278 enum virtchnl2_queue_type {
279 	VIRTCHNL2_QUEUE_TYPE_TX			= 0,
280 	VIRTCHNL2_QUEUE_TYPE_RX			= 1,
281 	VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION	= 2,
282 	VIRTCHNL2_QUEUE_TYPE_RX_BUFFER		= 3,
283 	VIRTCHNL2_QUEUE_TYPE_CONFIG_TX		= 4,
284 	VIRTCHNL2_QUEUE_TYPE_CONFIG_RX		= 5,
285 	/* Queue types 6, 7, 8, 9 are reserved */
286 	VIRTCHNL2_QUEUE_TYPE_MBX_TX		= 10,
287 	VIRTCHNL2_QUEUE_TYPE_MBX_RX		= 11,
288 };
289 
290 /* Interrupt throttling rate index */
291 enum virtchnl2_itr_idx {
292 	VIRTCHNL2_ITR_IDX_0			= 0,
293 	VIRTCHNL2_ITR_IDX_1			= 1,
294 };
295 
296 /**
297  * enum virtchnl2_mac_addr_type - MAC address types.
298  * @VIRTCHNL2_MAC_ADDR_PRIMARY: PF/VF driver should set this type for the
299  *				primary/device unicast MAC address filter for
300  *				VIRTCHNL2_OP_ADD_MAC_ADDR and
301  *				VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the
302  *				underlying control plane function to accurately
303  *				track the MAC address and for VM/function reset.
304  *
305  * @VIRTCHNL2_MAC_ADDR_EXTRA: PF/VF driver should set this type for any extra
306  *			      unicast and/or multicast filters that are being
307  *			      added/deleted via VIRTCHNL2_OP_ADD_MAC_ADDR or
308  *			      VIRTCHNL2_OP_DEL_MAC_ADDR.
309  */
310 enum virtchnl2_mac_addr_type {
311 	VIRTCHNL2_MAC_ADDR_PRIMARY		= 1,
312 	VIRTCHNL2_MAC_ADDR_EXTRA		= 2,
313 };
314 
315 /* Flags used for promiscuous mode */
316 enum virtchnl2_promisc_flags {
317 	VIRTCHNL2_UNICAST_PROMISC		= BIT(0),
318 	VIRTCHNL2_MULTICAST_PROMISC		= BIT(1),
319 };
320 
321 /* Protocol header type within a packet segment. A segment consists of one or
322  * more protocol headers that make up a logical group of protocol headers. Each
323  * logical group of protocol headers encapsulates or is encapsulated using/by
324  * tunneling or encapsulation protocols for network virtualization.
325  */
326 enum virtchnl2_proto_hdr_type {
327 	/* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */
328 	VIRTCHNL2_PROTO_HDR_ANY			= 0,
329 	VIRTCHNL2_PROTO_HDR_PRE_MAC		= 1,
330 	/* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */
331 	VIRTCHNL2_PROTO_HDR_MAC			= 2,
332 	VIRTCHNL2_PROTO_HDR_POST_MAC		= 3,
333 	VIRTCHNL2_PROTO_HDR_ETHERTYPE		= 4,
334 	VIRTCHNL2_PROTO_HDR_VLAN		= 5,
335 	VIRTCHNL2_PROTO_HDR_SVLAN		= 6,
336 	VIRTCHNL2_PROTO_HDR_CVLAN		= 7,
337 	VIRTCHNL2_PROTO_HDR_MPLS		= 8,
338 	VIRTCHNL2_PROTO_HDR_UMPLS		= 9,
339 	VIRTCHNL2_PROTO_HDR_MMPLS		= 10,
340 	VIRTCHNL2_PROTO_HDR_PTP			= 11,
341 	VIRTCHNL2_PROTO_HDR_CTRL		= 12,
342 	VIRTCHNL2_PROTO_HDR_LLDP		= 13,
343 	VIRTCHNL2_PROTO_HDR_ARP			= 14,
344 	VIRTCHNL2_PROTO_HDR_ECP			= 15,
345 	VIRTCHNL2_PROTO_HDR_EAPOL		= 16,
346 	VIRTCHNL2_PROTO_HDR_PPPOD		= 17,
347 	VIRTCHNL2_PROTO_HDR_PPPOE		= 18,
348 	/* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */
349 	VIRTCHNL2_PROTO_HDR_IPV4		= 19,
350 	/* IPv4 and IPv6 Fragment header types are only associated to
351 	 * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively,
352 	 * cannot be used independently.
353 	 */
354 	/* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */
355 	VIRTCHNL2_PROTO_HDR_IPV4_FRAG		= 20,
356 	/* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */
357 	VIRTCHNL2_PROTO_HDR_IPV6		= 21,
358 	/* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */
359 	VIRTCHNL2_PROTO_HDR_IPV6_FRAG		= 22,
360 	VIRTCHNL2_PROTO_HDR_IPV6_EH		= 23,
361 	/* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */
362 	VIRTCHNL2_PROTO_HDR_UDP			= 24,
363 	/* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */
364 	VIRTCHNL2_PROTO_HDR_TCP			= 25,
365 	/* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */
366 	VIRTCHNL2_PROTO_HDR_SCTP		= 26,
367 	/* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */
368 	VIRTCHNL2_PROTO_HDR_ICMP		= 27,
369 	/* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */
370 	VIRTCHNL2_PROTO_HDR_ICMPV6		= 28,
371 	VIRTCHNL2_PROTO_HDR_IGMP		= 29,
372 	VIRTCHNL2_PROTO_HDR_AH			= 30,
373 	VIRTCHNL2_PROTO_HDR_ESP			= 31,
374 	VIRTCHNL2_PROTO_HDR_IKE			= 32,
375 	VIRTCHNL2_PROTO_HDR_NATT_KEEP		= 33,
376 	/* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */
377 	VIRTCHNL2_PROTO_HDR_PAY			= 34,
378 	VIRTCHNL2_PROTO_HDR_L2TPV2		= 35,
379 	VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL	= 36,
380 	VIRTCHNL2_PROTO_HDR_L2TPV3		= 37,
381 	VIRTCHNL2_PROTO_HDR_GTP			= 38,
382 	VIRTCHNL2_PROTO_HDR_GTP_EH		= 39,
383 	VIRTCHNL2_PROTO_HDR_GTPCV2		= 40,
384 	VIRTCHNL2_PROTO_HDR_GTPC_TEID		= 41,
385 	VIRTCHNL2_PROTO_HDR_GTPU		= 42,
386 	VIRTCHNL2_PROTO_HDR_GTPU_UL		= 43,
387 	VIRTCHNL2_PROTO_HDR_GTPU_DL		= 44,
388 	VIRTCHNL2_PROTO_HDR_ECPRI		= 45,
389 	VIRTCHNL2_PROTO_HDR_VRRP		= 46,
390 	VIRTCHNL2_PROTO_HDR_OSPF		= 47,
391 	/* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */
392 	VIRTCHNL2_PROTO_HDR_TUN			= 48,
393 	VIRTCHNL2_PROTO_HDR_GRE			= 49,
394 	VIRTCHNL2_PROTO_HDR_NVGRE		= 50,
395 	VIRTCHNL2_PROTO_HDR_VXLAN		= 51,
396 	VIRTCHNL2_PROTO_HDR_VXLAN_GPE		= 52,
397 	VIRTCHNL2_PROTO_HDR_GENEVE		= 53,
398 	VIRTCHNL2_PROTO_HDR_NSH			= 54,
399 	VIRTCHNL2_PROTO_HDR_QUIC		= 55,
400 	VIRTCHNL2_PROTO_HDR_PFCP		= 56,
401 	VIRTCHNL2_PROTO_HDR_PFCP_NODE		= 57,
402 	VIRTCHNL2_PROTO_HDR_PFCP_SESSION	= 58,
403 	VIRTCHNL2_PROTO_HDR_RTP			= 59,
404 	VIRTCHNL2_PROTO_HDR_ROCE		= 60,
405 	VIRTCHNL2_PROTO_HDR_ROCEV1		= 61,
406 	VIRTCHNL2_PROTO_HDR_ROCEV2		= 62,
407 	/* Protocol ids up to 32767 are reserved.
408 	 * 32768 - 65534 are used for user defined protocol ids.
409 	 * VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id.
410 	 */
411 	VIRTCHNL2_PROTO_HDR_NO_PROTO		= 65535,
412 };
413 
414 enum virtchl2_version {
415 	VIRTCHNL2_VERSION_MINOR_0		= 0,
416 	VIRTCHNL2_VERSION_MAJOR_2		= 2,
417 };
418 
419 /**
420  * struct virtchnl2_edt_caps - Get EDT granularity and time horizon.
421  * @tstamp_granularity_ns: Timestamp granularity in nanoseconds.
422  * @time_horizon_ns: Total time window in nanoseconds.
423  *
424  * Associated with VIRTCHNL2_OP_GET_EDT_CAPS.
425  */
426 struct virtchnl2_edt_caps {
427 	__le64 tstamp_granularity_ns;
428 	__le64 time_horizon_ns;
429 };
430 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_edt_caps);
431 
432 /**
433  * struct virtchnl2_version_info - Version information.
434  * @major: Major version.
435  * @minor: Minor version.
436  *
437  * PF/VF posts its version number to the CP. CP responds with its version number
438  * in the same format, along with a return code.
439  * If there is a major version mismatch, then the PF/VF cannot operate.
440  * If there is a minor version mismatch, then the PF/VF can operate but should
441  * add a warning to the system log.
442  *
443  * This version opcode MUST always be specified as == 1, regardless of other
444  * changes in the API. The CP must always respond to this message without
445  * error regardless of version mismatch.
446  *
447  * Associated with VIRTCHNL2_OP_VERSION.
448  */
449 struct virtchnl2_version_info {
450 	__le32 major;
451 	__le32 minor;
452 };
453 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
454 
455 /**
456  * struct virtchnl2_get_capabilities - Capabilities info.
457  * @csum_caps: See enum virtchnl2_cap_txrx_csum.
458  * @seg_caps: See enum virtchnl2_cap_seg.
459  * @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at.
460  * @rsc_caps: See enum virtchnl2_cap_rsc.
461  * @rss_caps: See enum virtchnl2_cap_rss.
462  * @other_caps: See enum virtchnl2_cap_other.
463  * @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
464  *		     provided by CP.
465  * @mailbox_vector_id: Mailbox vector id.
466  * @num_allocated_vectors: Maximum number of allocated vectors for the device.
467  * @max_rx_q: Maximum number of supported Rx queues.
468  * @max_tx_q: Maximum number of supported Tx queues.
469  * @max_rx_bufq: Maximum number of supported buffer queues.
470  * @max_tx_complq: Maximum number of supported completion queues.
471  * @max_sriov_vfs: The PF sends the maximum VFs it is requesting. The CP
472  *		   responds with the maximum VFs granted.
473  * @max_vports: Maximum number of vports that can be supported.
474  * @default_num_vports: Default number of vports driver should allocate on load.
475  * @max_tx_hdr_size: Max header length hardware can parse/checksum, in bytes.
476  * @max_sg_bufs_per_tx_pkt: Max number of scatter gather buffers that can be
477  *			    sent per transmit packet without needing to be
478  *			    linearized.
479  * @pad: Padding.
480  * @reserved: Reserved.
481  * @device_type: See enum virtchl2_device_type.
482  * @min_sso_packet_len: Min packet length supported by device for single
483  *			segment offload.
484  * @max_hdr_buf_per_lso: Max number of header buffers that can be used for
485  *			 an LSO.
486  * @pad1: Padding for future extensions.
487  *
488  * Dataplane driver sends this message to CP to negotiate capabilities and
489  * provides a virtchnl2_get_capabilities structure with its desired
490  * capabilities, max_sriov_vfs and num_allocated_vectors.
491  * CP responds with a virtchnl2_get_capabilities structure updated
492  * with allowed capabilities and the other fields as below.
493  * If PF sets max_sriov_vfs as 0, CP will respond with max number of VFs
494  * that can be created by this PF. For any other value 'n', CP responds
495  * with max_sriov_vfs set to min(n, x) where x is the max number of VFs
496  * allowed by CP's policy. max_sriov_vfs is not applicable for VFs.
497  * If dataplane driver sets num_allocated_vectors as 0, CP will respond with 1
498  * which is default vector associated with the default mailbox. For any other
499  * value 'n', CP responds with a value <= n based on the CP's policy of
500  * max number of vectors for a PF.
501  * CP will respond with the vector ID of mailbox allocated to the PF in
502  * mailbox_vector_id and the number of itr index registers in itr_idx_map.
503  * It also responds with default number of vports that the dataplane driver
504  * should comeup with in default_num_vports and maximum number of vports that
505  * can be supported in max_vports.
506  *
507  * Associated with VIRTCHNL2_OP_GET_CAPS.
508  */
509 struct virtchnl2_get_capabilities {
510 	__le32 csum_caps;
511 	__le32 seg_caps;
512 	__le32 hsplit_caps;
513 	__le32 rsc_caps;
514 	__le64 rss_caps;
515 	__le64 other_caps;
516 	__le32 mailbox_dyn_ctl;
517 	__le16 mailbox_vector_id;
518 	__le16 num_allocated_vectors;
519 	__le16 max_rx_q;
520 	__le16 max_tx_q;
521 	__le16 max_rx_bufq;
522 	__le16 max_tx_complq;
523 	__le16 max_sriov_vfs;
524 	__le16 max_vports;
525 	__le16 default_num_vports;
526 	__le16 max_tx_hdr_size;
527 	u8 max_sg_bufs_per_tx_pkt;
528 	u8 pad[3];
529 	u8 reserved[4];
530 	__le32 device_type;
531 	u8 min_sso_packet_len;
532 	u8 max_hdr_buf_per_lso;
533 	u8 pad1[10];
534 };
535 VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
536 
537 /**
538  * struct virtchnl2_queue_reg_chunk - Single queue chunk.
539  * @type: See enum virtchnl2_queue_type.
540  * @start_queue_id: Start Queue ID.
541  * @num_queues: Number of queues in the chunk.
542  * @pad: Padding.
543  * @qtail_reg_start: Queue tail register offset.
544  * @qtail_reg_spacing: Queue tail register spacing.
545  * @pad1: Padding for future extensions.
546  */
547 struct virtchnl2_queue_reg_chunk {
548 	__le32 type;
549 	__le32 start_queue_id;
550 	__le32 num_queues;
551 	__le32 pad;
552 	__le64 qtail_reg_start;
553 	__le32 qtail_reg_spacing;
554 	u8 pad1[4];
555 };
556 VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
557 
558 /**
559  * struct virtchnl2_queue_reg_chunks - Specify several chunks of contiguous
560  *				       queues.
561  * @num_chunks: Number of chunks.
562  * @pad: Padding.
563  * @chunks: Chunks of queue info.
564  */
565 struct virtchnl2_queue_reg_chunks {
566 	__le16 num_chunks;
567 	u8 pad[6];
568 	struct virtchnl2_queue_reg_chunk chunks[] __counted_by_le(num_chunks);
569 };
570 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
571 
572 /**
573  * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities.
574  * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports
575  */
576 enum virtchnl2_vport_flags {
577 	VIRTCHNL2_VPORT_UPLINK_PORT	= BIT(0),
578 };
579 
580 /**
581  * struct virtchnl2_create_vport - Create vport config info.
582  * @vport_type: See enum virtchnl2_vport_type.
583  * @txq_model: See virtchnl2_queue_model.
584  * @rxq_model: See virtchnl2_queue_model.
585  * @num_tx_q: Number of Tx queues.
586  * @num_tx_complq: Valid only if txq_model is split queue.
587  * @num_rx_q: Number of Rx queues.
588  * @num_rx_bufq: Valid only if rxq_model is split queue.
589  * @default_rx_q: Relative receive queue index to be used as default.
590  * @vport_index: Used to align PF and CP in case of default multiple vports,
591  *		 it is filled by the PF and CP returns the same value, to
592  *		 enable the driver to support multiple asynchronous parallel
593  *		 CREATE_VPORT requests and associate a response to a specific
594  *		 request.
595  * @max_mtu: Max MTU. CP populates this field on response.
596  * @vport_id: Vport id. CP populates this field on response.
597  * @default_mac_addr: Default MAC address.
598  * @vport_flags: See enum virtchnl2_vport_flags.
599  * @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
600  * @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
601  * @pad1: Padding.
602  * @rss_algorithm: RSS algorithm.
603  * @rss_key_size: RSS key size.
604  * @rss_lut_size: RSS LUT size.
605  * @rx_split_pos: See enum virtchnl2_cap_rx_hsplit_at.
606  * @pad2: Padding.
607  * @chunks: Chunks of contiguous queues.
608  *
609  * PF sends this message to CP to create a vport by filling in required
610  * fields of virtchnl2_create_vport structure.
611  * CP responds with the updated virtchnl2_create_vport structure containing the
612  * necessary fields followed by chunks which in turn will have an array of
613  * num_chunks entries of virtchnl2_queue_chunk structures.
614  *
615  * Associated with VIRTCHNL2_OP_CREATE_VPORT.
616  */
617 struct virtchnl2_create_vport {
618 	__le16 vport_type;
619 	__le16 txq_model;
620 	__le16 rxq_model;
621 	__le16 num_tx_q;
622 	__le16 num_tx_complq;
623 	__le16 num_rx_q;
624 	__le16 num_rx_bufq;
625 	__le16 default_rx_q;
626 	__le16 vport_index;
627 	/* CP populates the following fields on response */
628 	__le16 max_mtu;
629 	__le32 vport_id;
630 	u8 default_mac_addr[ETH_ALEN];
631 	__le16 vport_flags;
632 	__le64 rx_desc_ids;
633 	__le64 tx_desc_ids;
634 	u8 pad1[72];
635 	__le32 rss_algorithm;
636 	__le16 rss_key_size;
637 	__le16 rss_lut_size;
638 	__le32 rx_split_pos;
639 	u8 pad2[20];
640 	struct virtchnl2_queue_reg_chunks chunks;
641 };
642 VIRTCHNL2_CHECK_STRUCT_LEN(160, virtchnl2_create_vport);
643 
644 /**
645  * struct virtchnl2_vport - Vport ID info.
646  * @vport_id: Vport id.
647  * @pad: Padding for future extensions.
648  *
649  * PF sends this message to CP to destroy, enable or disable a vport by filling
650  * in the vport_id in virtchnl2_vport structure.
651  * CP responds with the status of the requested operation.
652  *
653  * Associated with VIRTCHNL2_OP_DESTROY_VPORT, VIRTCHNL2_OP_ENABLE_VPORT,
654  * VIRTCHNL2_OP_DISABLE_VPORT.
655  */
656 struct virtchnl2_vport {
657 	__le32 vport_id;
658 	u8 pad[4];
659 };
660 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_vport);
661 
662 /**
663  * struct virtchnl2_txq_info - Transmit queue config info
664  * @dma_ring_addr: DMA address.
665  * @type: See enum virtchnl2_queue_type.
666  * @queue_id: Queue ID.
667  * @relative_queue_id: Valid only if queue model is split and type is transmit
668  *		       queue. Used in many to one mapping of transmit queues to
669  *		       completion queue.
670  * @model: See enum virtchnl2_queue_model.
671  * @sched_mode: See enum virtchnl2_txq_sched_mode.
672  * @qflags: TX queue feature flags.
673  * @ring_len: Ring length.
674  * @tx_compl_queue_id: Valid only if queue model is split and type is transmit
675  *		       queue.
676  * @peer_type: Valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX
677  * @peer_rx_queue_id: Valid only if queue type is CONFIG_TX and used to deliver
678  *		      messages for the respective CONFIG_TX queue.
679  * @pad: Padding.
680  * @egress_pasid: Egress PASID info.
681  * @egress_hdr_pasid: Egress HDR passid.
682  * @egress_buf_pasid: Egress buf passid.
683  * @pad1: Padding for future extensions.
684  */
685 struct virtchnl2_txq_info {
686 	__le64 dma_ring_addr;
687 	__le32 type;
688 	__le32 queue_id;
689 	__le16 relative_queue_id;
690 	__le16 model;
691 	__le16 sched_mode;
692 	__le16 qflags;
693 	__le16 ring_len;
694 	__le16 tx_compl_queue_id;
695 	__le16 peer_type;
696 	__le16 peer_rx_queue_id;
697 	u8 pad[4];
698 	__le32 egress_pasid;
699 	__le32 egress_hdr_pasid;
700 	__le32 egress_buf_pasid;
701 	u8 pad1[8];
702 };
703 VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_txq_info);
704 
705 /**
706  * struct virtchnl2_config_tx_queues - TX queue config.
707  * @vport_id: Vport id.
708  * @num_qinfo: Number of virtchnl2_txq_info structs.
709  * @pad: Padding.
710  * @qinfo: Tx queues config info.
711  *
712  * PF sends this message to set up parameters for one or more transmit queues.
713  * This message contains an array of num_qinfo instances of virtchnl2_txq_info
714  * structures. CP configures requested queues and returns a status code. If
715  * num_qinfo specified is greater than the number of queues associated with the
716  * vport, an error is returned and no queues are configured.
717  *
718  * Associated with VIRTCHNL2_OP_CONFIG_TX_QUEUES.
719  */
720 struct virtchnl2_config_tx_queues {
721 	__le32 vport_id;
722 	__le16 num_qinfo;
723 	u8 pad[10];
724 	struct virtchnl2_txq_info qinfo[] __counted_by_le(num_qinfo);
725 };
726 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues);
727 
728 /**
729  * struct virtchnl2_rxq_info - Receive queue config info.
730  * @desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
731  * @dma_ring_addr: See VIRTCHNL2_RX_DESC_IDS definitions.
732  * @type: See enum virtchnl2_queue_type.
733  * @queue_id: Queue id.
734  * @model: See enum virtchnl2_queue_model.
735  * @hdr_buffer_size: Header buffer size.
736  * @data_buffer_size: Data buffer size.
737  * @max_pkt_size: Max packet size.
738  * @ring_len: Ring length.
739  * @buffer_notif_stride: Buffer notification stride in units of 32-descriptors.
740  *			 This field must be a power of 2.
741  * @pad: Padding.
742  * @dma_head_wb_addr: Applicable only for receive buffer queues.
743  * @qflags: Applicable only for receive completion queues.
744  *	    See enum virtchnl2_rxq_flags.
745  * @rx_buffer_low_watermark: Rx buffer low watermark.
746  * @rx_bufq1_id: Buffer queue index of the first buffer queue associated with
747  *		 the Rx queue. Valid only in split queue model.
748  * @rx_bufq2_id: Buffer queue index of the second buffer queue associated with
749  *		 the Rx queue. Valid only in split queue model.
750  * @bufq2_ena: It indicates if there is a second buffer, rx_bufq2_id is valid
751  *	       only if this field is set.
752  * @pad1: Padding.
753  * @ingress_pasid: Ingress PASID.
754  * @ingress_hdr_pasid: Ingress PASID header.
755  * @ingress_buf_pasid: Ingress PASID buffer.
756  * @pad2: Padding for future extensions.
757  */
758 struct virtchnl2_rxq_info {
759 	__le64 desc_ids;
760 	__le64 dma_ring_addr;
761 	__le32 type;
762 	__le32 queue_id;
763 	__le16 model;
764 	__le16 hdr_buffer_size;
765 	__le32 data_buffer_size;
766 	__le32 max_pkt_size;
767 	__le16 ring_len;
768 	u8 buffer_notif_stride;
769 	u8 pad;
770 	__le64 dma_head_wb_addr;
771 	__le16 qflags;
772 	__le16 rx_buffer_low_watermark;
773 	__le16 rx_bufq1_id;
774 	__le16 rx_bufq2_id;
775 	u8 bufq2_ena;
776 	u8 pad1[3];
777 	__le32 ingress_pasid;
778 	__le32 ingress_hdr_pasid;
779 	__le32 ingress_buf_pasid;
780 	u8 pad2[16];
781 };
782 VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_rxq_info);
783 
784 /**
785  * struct virtchnl2_config_rx_queues - Rx queues config.
786  * @vport_id: Vport id.
787  * @num_qinfo: Number of instances.
788  * @pad: Padding.
789  * @qinfo: Rx queues config info.
790  *
791  * PF sends this message to set up parameters for one or more receive queues.
792  * This message contains an array of num_qinfo instances of virtchnl2_rxq_info
793  * structures. CP configures requested queues and returns a status code.
794  * If the number of queues specified is greater than the number of queues
795  * associated with the vport, an error is returned and no queues are configured.
796  *
797  * Associated with VIRTCHNL2_OP_CONFIG_RX_QUEUES.
798  */
799 struct virtchnl2_config_rx_queues {
800 	__le32 vport_id;
801 	__le16 num_qinfo;
802 	u8 pad[18];
803 	struct virtchnl2_rxq_info qinfo[] __counted_by_le(num_qinfo);
804 };
805 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues);
806 
807 /**
808  * struct virtchnl2_add_queues - data for VIRTCHNL2_OP_ADD_QUEUES.
809  * @vport_id: Vport id.
810  * @num_tx_q: Number of Tx qieues.
811  * @num_tx_complq: Number of Tx completion queues.
812  * @num_rx_q:  Number of Rx queues.
813  * @num_rx_bufq:  Number of Rx buffer queues.
814  * @pad: Padding.
815  * @chunks: Chunks of contiguous queues.
816  *
817  * PF sends this message to request additional transmit/receive queues beyond
818  * the ones that were assigned via CREATE_VPORT request. virtchnl2_add_queues
819  * structure is used to specify the number of each type of queues.
820  * CP responds with the same structure with the actual number of queues assigned
821  * followed by num_chunks of virtchnl2_queue_chunk structures.
822  *
823  * Associated with VIRTCHNL2_OP_ADD_QUEUES.
824  */
825 struct virtchnl2_add_queues {
826 	__le32 vport_id;
827 	__le16 num_tx_q;
828 	__le16 num_tx_complq;
829 	__le16 num_rx_q;
830 	__le16 num_rx_bufq;
831 	u8 pad[4];
832 	struct virtchnl2_queue_reg_chunks chunks;
833 };
834 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_add_queues);
835 
836 /**
837  * struct virtchnl2_vector_chunk - Structure to specify a chunk of contiguous
838  *				   interrupt vectors.
839  * @start_vector_id: Start vector id.
840  * @start_evv_id: Start EVV id.
841  * @num_vectors: Number of vectors.
842  * @pad: Padding.
843  * @dynctl_reg_start: DYN_CTL register offset.
844  * @dynctl_reg_spacing: register spacing between DYN_CTL registers of 2
845  *			consecutive vectors.
846  * @itrn_reg_start: ITRN register offset.
847  * @itrn_reg_spacing: Register spacing between dynctl registers of 2
848  *		      consecutive vectors.
849  * @itrn_index_spacing: Register spacing between itrn registers of the same
850  *			vector where n=0..2.
851  * @pad1: Padding for future extensions.
852  *
853  * Register offsets and spacing provided by CP.
854  * Dynamic control registers are used for enabling/disabling/re-enabling
855  * interrupts and updating interrupt rates in the hotpath. Any changes
856  * to interrupt rates in the dynamic control registers will be reflected
857  * in the interrupt throttling rate registers.
858  * itrn registers are used to update interrupt rates for specific
859  * interrupt indices without modifying the state of the interrupt.
860  */
861 struct virtchnl2_vector_chunk {
862 	__le16 start_vector_id;
863 	__le16 start_evv_id;
864 	__le16 num_vectors;
865 	__le16 pad;
866 	__le32 dynctl_reg_start;
867 	__le32 dynctl_reg_spacing;
868 	__le32 itrn_reg_start;
869 	__le32 itrn_reg_spacing;
870 	__le32 itrn_index_spacing;
871 	u8 pad1[4];
872 };
873 VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
874 
875 /**
876  * struct virtchnl2_vector_chunks - chunks of contiguous interrupt vectors.
877  * @num_vchunks: number of vector chunks.
878  * @pad: Padding.
879  * @vchunks: Chunks of contiguous vector info.
880  *
881  * PF sends virtchnl2_vector_chunks struct to specify the vectors it is giving
882  * away. CP performs requested action and returns status.
883  *
884  * Associated with VIRTCHNL2_OP_DEALLOC_VECTORS.
885  */
886 struct virtchnl2_vector_chunks {
887 	__le16 num_vchunks;
888 	u8 pad[14];
889 	struct virtchnl2_vector_chunk vchunks[] __counted_by_le(num_vchunks);
890 };
891 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks);
892 
893 /**
894  * struct virtchnl2_alloc_vectors - vector allocation info.
895  * @num_vectors: Number of vectors.
896  * @pad: Padding.
897  * @vchunks: Chunks of contiguous vector info.
898  *
899  * PF sends this message to request additional interrupt vectors beyond the
900  * ones that were assigned via GET_CAPS request. virtchnl2_alloc_vectors
901  * structure is used to specify the number of vectors requested. CP responds
902  * with the same structure with the actual number of vectors assigned followed
903  * by virtchnl2_vector_chunks structure identifying the vector ids.
904  *
905  * Associated with VIRTCHNL2_OP_ALLOC_VECTORS.
906  */
907 struct virtchnl2_alloc_vectors {
908 	__le16 num_vectors;
909 	u8 pad[14];
910 	struct virtchnl2_vector_chunks vchunks;
911 };
912 VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_alloc_vectors);
913 
914 /**
915  * struct virtchnl2_rss_lut - RSS LUT info.
916  * @vport_id: Vport id.
917  * @lut_entries_start: Start of LUT entries.
918  * @lut_entries: Number of LUT entrties.
919  * @pad: Padding.
920  * @lut: RSS lookup table.
921  *
922  * PF sends this message to get or set RSS lookup table. Only supported if
923  * both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
924  * negotiation.
925  *
926  * Associated with VIRTCHNL2_OP_GET_RSS_LUT and VIRTCHNL2_OP_SET_RSS_LUT.
927  */
928 struct virtchnl2_rss_lut {
929 	__le32 vport_id;
930 	__le16 lut_entries_start;
931 	__le16 lut_entries;
932 	u8 pad[4];
933 	__le32 lut[] __counted_by_le(lut_entries);
934 };
935 VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut);
936 
937 /**
938  * struct virtchnl2_rss_hash - RSS hash info.
939  * @ptype_groups: Packet type groups bitmap.
940  * @vport_id: Vport id.
941  * @pad: Padding for future extensions.
942  *
943  * PF sends these messages to get and set the hash filter enable bits for RSS.
944  * By default, the CP sets these to all possible traffic types that the
945  * hardware supports. The PF can query this value if it wants to change the
946  * traffic types that are hashed by the hardware.
947  * Only supported if both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit
948  * during configuration negotiation.
949  *
950  * Associated with VIRTCHNL2_OP_GET_RSS_HASH and VIRTCHNL2_OP_SET_RSS_HASH
951  */
952 struct virtchnl2_rss_hash {
953 	__le64 ptype_groups;
954 	__le32 vport_id;
955 	u8 pad[4];
956 };
957 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_hash);
958 
959 /**
960  * struct virtchnl2_sriov_vfs_info - VFs info.
961  * @num_vfs: Number of VFs.
962  * @pad: Padding for future extensions.
963  *
964  * This message is used to set number of SRIOV VFs to be created. The actual
965  * allocation of resources for the VFs in terms of vport, queues and interrupts
966  * is done by CP. When this call completes, the IDPF driver calls
967  * pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices.
968  * The number of VFs set to 0 will destroy all the VFs of this function.
969  *
970  * Associated with VIRTCHNL2_OP_SET_SRIOV_VFS.
971  */
972 struct virtchnl2_sriov_vfs_info {
973 	__le16 num_vfs;
974 	__le16 pad;
975 };
976 VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);
977 
978 /**
979  * struct virtchnl2_ptype - Packet type info.
980  * @ptype_id_10: 10-bit packet type.
981  * @ptype_id_8: 8-bit packet type.
982  * @proto_id_count: Number of protocol ids the packet supports, maximum of 32
983  *		    protocol ids are supported.
984  * @pad: Padding.
985  * @proto_id: proto_id_count decides the allocation of protocol id array.
986  *	      See enum virtchnl2_proto_hdr_type.
987  *
988  * Based on the descriptor type the PF supports, CP fills ptype_id_10 or
989  * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value
990  * is set to 0xFFFF, PF should consider this ptype as dummy one and it is the
991  * last ptype.
992  */
993 struct virtchnl2_ptype {
994 	__le16 ptype_id_10;
995 	u8 ptype_id_8;
996 	u8 proto_id_count;
997 	__le16 pad;
998 	__le16 proto_id[] __counted_by(proto_id_count);
999 } __packed __aligned(2);
1000 VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
1001 
1002 /**
1003  * struct virtchnl2_get_ptype_info - Packet type info.
1004  * @start_ptype_id: Starting ptype ID.
1005  * @num_ptypes: Number of packet types from start_ptype_id.
1006  * @pad: Padding for future extensions.
1007  *
1008  * The total number of supported packet types is based on the descriptor type.
1009  * For the flex descriptor, it is 1024 (10-bit ptype), and for the base
1010  * descriptor, it is 256 (8-bit ptype). Send this message to the CP by
1011  * populating the 'start_ptype_id' and the 'num_ptypes'. CP responds with the
1012  * 'start_ptype_id', 'num_ptypes', and the array of ptype (virtchnl2_ptype) that
1013  * are added at the end of the 'virtchnl2_get_ptype_info' message (Note: There
1014  * is no specific field for the ptypes but are added at the end of the
1015  * ptype info message. PF/VF is expected to extract the ptypes accordingly.
1016  * Reason for doing this is because compiler doesn't allow nested flexible
1017  * array fields).
1018  *
1019  * If all the ptypes don't fit into one mailbox buffer, CP splits the
1020  * ptype info into multiple messages, where each message will have its own
1021  * 'start_ptype_id', 'num_ptypes', and the ptype array itself. When CP is done
1022  * updating all the ptype information extracted from the package (the number of
1023  * ptypes extracted might be less than what PF/VF expects), it will append a
1024  * dummy ptype (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF)
1025  * to the ptype array.
1026  *
1027  * PF/VF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO messages.
1028  *
1029  * Associated with VIRTCHNL2_OP_GET_PTYPE_INFO.
1030  */
1031 struct virtchnl2_get_ptype_info {
1032 	__le16 start_ptype_id;
1033 	__le16 num_ptypes;
1034 	__le32 pad;
1035 };
1036 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_ptype_info);
1037 
1038 /**
1039  * struct virtchnl2_vport_stats - Vport statistics.
1040  * @vport_id: Vport id.
1041  * @pad: Padding.
1042  * @rx_bytes: Received bytes.
1043  * @rx_unicast: Received unicast packets.
1044  * @rx_multicast: Received multicast packets.
1045  * @rx_broadcast: Received broadcast packets.
1046  * @rx_discards: Discarded packets on receive.
1047  * @rx_errors: Receive errors.
1048  * @rx_unknown_protocol: Unlnown protocol.
1049  * @tx_bytes: Transmitted bytes.
1050  * @tx_unicast: Transmitted unicast packets.
1051  * @tx_multicast: Transmitted multicast packets.
1052  * @tx_broadcast: Transmitted broadcast packets.
1053  * @tx_discards: Discarded packets on transmit.
1054  * @tx_errors: Transmit errors.
1055  * @rx_invalid_frame_length: Packets with invalid frame length.
1056  * @rx_overflow_drop: Packets dropped on buffer overflow.
1057  *
1058  * PF/VF sends this message to CP to get the update stats by specifying the
1059  * vport_id. CP responds with stats in struct virtchnl2_vport_stats.
1060  *
1061  * Associated with VIRTCHNL2_OP_GET_STATS.
1062  */
1063 struct virtchnl2_vport_stats {
1064 	__le32 vport_id;
1065 	u8 pad[4];
1066 	__le64 rx_bytes;
1067 	__le64 rx_unicast;
1068 	__le64 rx_multicast;
1069 	__le64 rx_broadcast;
1070 	__le64 rx_discards;
1071 	__le64 rx_errors;
1072 	__le64 rx_unknown_protocol;
1073 	__le64 tx_bytes;
1074 	__le64 tx_unicast;
1075 	__le64 tx_multicast;
1076 	__le64 tx_broadcast;
1077 	__le64 tx_discards;
1078 	__le64 tx_errors;
1079 	__le64 rx_invalid_frame_length;
1080 	__le64 rx_overflow_drop;
1081 };
1082 VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);
1083 
1084 /**
1085  * struct virtchnl2_event - Event info.
1086  * @event: Event opcode. See enum virtchnl2_event_codes.
1087  * @link_speed: Link_speed provided in Mbps.
1088  * @vport_id: Vport ID.
1089  * @link_status: Link status.
1090  * @pad: Padding.
1091  * @reserved: Reserved.
1092  *
1093  * CP sends this message to inform the PF/VF driver of events that may affect
1094  * it. No direct response is expected from the driver, though it may generate
1095  * other messages in response to this one.
1096  *
1097  * Associated with VIRTCHNL2_OP_EVENT.
1098  */
1099 struct virtchnl2_event {
1100 	__le32 event;
1101 	__le32 link_speed;
1102 	__le32 vport_id;
1103 	u8 link_status;
1104 	u8 pad;
1105 	__le16 reserved;
1106 };
1107 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_event);
1108 
1109 /**
1110  * struct virtchnl2_rss_key - RSS key info.
1111  * @vport_id: Vport id.
1112  * @key_len: Length of RSS key.
1113  * @pad: Padding.
1114  * @key_flex: RSS hash key, packed bytes.
1115  * PF/VF sends this message to get or set RSS key. Only supported if both
1116  * PF/VF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
1117  * negotiation.
1118  *
1119  * Associated with VIRTCHNL2_OP_GET_RSS_KEY and VIRTCHNL2_OP_SET_RSS_KEY.
1120  */
1121 struct virtchnl2_rss_key {
1122 	__le32 vport_id;
1123 	__le16 key_len;
1124 	u8 pad;
1125 	u8 key_flex[] __counted_by_le(key_len);
1126 } __packed;
1127 VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key);
1128 
1129 /**
1130  * struct virtchnl2_queue_chunk - chunk of contiguous queues
1131  * @type: See enum virtchnl2_queue_type.
1132  * @start_queue_id: Starting queue id.
1133  * @num_queues: Number of queues.
1134  * @pad: Padding for future extensions.
1135  */
1136 struct virtchnl2_queue_chunk {
1137 	__le32 type;
1138 	__le32 start_queue_id;
1139 	__le32 num_queues;
1140 	u8 pad[4];
1141 };
1142 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
1143 
1144 /* struct virtchnl2_queue_chunks - chunks of contiguous queues
1145  * @num_chunks: Number of chunks.
1146  * @pad: Padding.
1147  * @chunks: Chunks of contiguous queues info.
1148  */
1149 struct virtchnl2_queue_chunks {
1150 	__le16 num_chunks;
1151 	u8 pad[6];
1152 	struct virtchnl2_queue_chunk chunks[] __counted_by_le(num_chunks);
1153 };
1154 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks);
1155 
1156 /**
1157  * struct virtchnl2_del_ena_dis_queues - Enable/disable queues info.
1158  * @vport_id: Vport id.
1159  * @pad: Padding.
1160  * @chunks: Chunks of contiguous queues info.
1161  *
1162  * PF sends these messages to enable, disable or delete queues specified in
1163  * chunks. PF sends virtchnl2_del_ena_dis_queues struct to specify the queues
1164  * to be enabled/disabled/deleted. Also applicable to single queue receive or
1165  * transmit. CP performs requested action and returns status.
1166  *
1167  * Associated with VIRTCHNL2_OP_ENABLE_QUEUES, VIRTCHNL2_OP_DISABLE_QUEUES and
1168  * VIRTCHNL2_OP_DISABLE_QUEUES.
1169  */
1170 struct virtchnl2_del_ena_dis_queues {
1171 	__le32 vport_id;
1172 	u8 pad[4];
1173 	struct virtchnl2_queue_chunks chunks;
1174 };
1175 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_del_ena_dis_queues);
1176 
1177 /**
1178  * struct virtchnl2_queue_vector - Queue to vector mapping.
1179  * @queue_id: Queue id.
1180  * @vector_id: Vector id.
1181  * @pad: Padding.
1182  * @itr_idx: See enum virtchnl2_itr_idx.
1183  * @queue_type: See enum virtchnl2_queue_type.
1184  * @pad1: Padding for future extensions.
1185  */
1186 struct virtchnl2_queue_vector {
1187 	__le32 queue_id;
1188 	__le16 vector_id;
1189 	u8 pad[2];
1190 	__le32 itr_idx;
1191 	__le32 queue_type;
1192 	u8 pad1[8];
1193 };
1194 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_vector);
1195 
1196 /**
1197  * struct virtchnl2_queue_vector_maps - Map/unmap queues info.
1198  * @vport_id: Vport id.
1199  * @num_qv_maps: Number of queue vector maps.
1200  * @pad: Padding.
1201  * @qv_maps: Queue to vector maps.
1202  *
1203  * PF sends this message to map or unmap queues to vectors and interrupt
1204  * throttling rate index registers. External data buffer contains
1205  * virtchnl2_queue_vector_maps structure that contains num_qv_maps of
1206  * virtchnl2_queue_vector structures. CP maps the requested queue vector maps
1207  * after validating the queue and vector ids and returns a status code.
1208  *
1209  * Associated with VIRTCHNL2_OP_MAP_QUEUE_VECTOR and
1210  * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR.
1211  */
1212 struct virtchnl2_queue_vector_maps {
1213 	__le32 vport_id;
1214 	__le16 num_qv_maps;
1215 	u8 pad[10];
1216 	struct virtchnl2_queue_vector qv_maps[] __counted_by_le(num_qv_maps);
1217 };
1218 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps);
1219 
1220 /**
1221  * struct virtchnl2_loopback - Loopback info.
1222  * @vport_id: Vport id.
1223  * @enable: Enable/disable.
1224  * @pad: Padding for future extensions.
1225  *
1226  * PF/VF sends this message to transition to/from the loopback state. Setting
1227  * the 'enable' to 1 enables the loopback state and setting 'enable' to 0
1228  * disables it. CP configures the state to loopback and returns status.
1229  *
1230  * Associated with VIRTCHNL2_OP_LOOPBACK.
1231  */
1232 struct virtchnl2_loopback {
1233 	__le32 vport_id;
1234 	u8 enable;
1235 	u8 pad[3];
1236 };
1237 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_loopback);
1238 
1239 /* struct virtchnl2_mac_addr - MAC address info.
1240  * @addr: MAC address.
1241  * @type: MAC type. See enum virtchnl2_mac_addr_type.
1242  * @pad: Padding for future extensions.
1243  */
1244 struct virtchnl2_mac_addr {
1245 	u8 addr[ETH_ALEN];
1246 	u8 type;
1247 	u8 pad;
1248 };
1249 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr);
1250 
1251 /**
1252  * struct virtchnl2_mac_addr_list - List of MAC addresses.
1253  * @vport_id: Vport id.
1254  * @num_mac_addr: Number of MAC addresses.
1255  * @pad: Padding.
1256  * @mac_addr_list: List with MAC address info.
1257  *
1258  * PF/VF driver uses this structure to send list of MAC addresses to be
1259  * added/deleted to the CP where as CP performs the action and returns the
1260  * status.
1261  *
1262  * Associated with VIRTCHNL2_OP_ADD_MAC_ADDR and VIRTCHNL2_OP_DEL_MAC_ADDR.
1263  */
1264 struct virtchnl2_mac_addr_list {
1265 	__le32 vport_id;
1266 	__le16 num_mac_addr;
1267 	u8 pad[2];
1268 	struct virtchnl2_mac_addr mac_addr_list[] __counted_by_le(num_mac_addr);
1269 };
1270 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list);
1271 
1272 /**
1273  * struct virtchnl2_promisc_info - Promisc type info.
1274  * @vport_id: Vport id.
1275  * @flags: See enum virtchnl2_promisc_flags.
1276  * @pad: Padding for future extensions.
1277  *
1278  * PF/VF sends vport id and flags to the CP where as CP performs the action
1279  * and returns the status.
1280  *
1281  * Associated with VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE.
1282  */
1283 struct virtchnl2_promisc_info {
1284 	__le32 vport_id;
1285 	/* See VIRTCHNL2_PROMISC_FLAGS definitions */
1286 	__le16 flags;
1287 	u8 pad[2];
1288 };
1289 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
1290 
1291 /**
1292  * enum virtchnl2_ptp_caps - PTP capabilities
1293  * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME: direct access to get the time of
1294  *					   device clock
1295  * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB: mailbox access to get the time of
1296  *					      device clock
1297  * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME: direct access to cross timestamp
1298  * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB: mailbox access to cross timestamp
1299  * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME: direct access to set the time of
1300  *					   device clock
1301  * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB: mailbox access to set the time of
1302  *					      device clock
1303  * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK: direct access to adjust the time of device
1304  *				      clock
1305  * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB: mailbox access to adjust the time of
1306  *					 device clock
1307  * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS: direct access to the Tx timestamping
1308  * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB: mailbox access to the Tx timestamping
1309  *
1310  * PF/VF negotiates a set of supported PTP capabilities with the Control Plane.
1311  * There are two access methods - mailbox (_MB) and direct.
1312  * PTP capabilities enables Main Timer operations: get/set/adjust Main Timer,
1313  * cross timestamping and the Tx timestamping.
1314  */
1315 enum virtchnl2_ptp_caps {
1316 	VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME		= BIT(0),
1317 	VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB	= BIT(1),
1318 	VIRTCHNL2_CAP_PTP_GET_CROSS_TIME		= BIT(2),
1319 	VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB		= BIT(3),
1320 	VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME		= BIT(4),
1321 	VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB	= BIT(5),
1322 	VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK		= BIT(6),
1323 	VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB		= BIT(7),
1324 	VIRTCHNL2_CAP_PTP_TX_TSTAMPS			= BIT(8),
1325 	VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB			= BIT(9),
1326 };
1327 
1328 /**
1329  * struct virtchnl2_ptp_clk_reg_offsets - Offsets of device and PHY clocks
1330  *					  registers.
1331  * @dev_clk_ns_l: Device clock low register offset
1332  * @dev_clk_ns_h: Device clock high register offset
1333  * @phy_clk_ns_l: PHY clock low register offset
1334  * @phy_clk_ns_h: PHY clock high register offset
1335  * @cmd_sync_trigger: The command sync trigger register offset
1336  * @pad: Padding for future extensions
1337  */
1338 struct virtchnl2_ptp_clk_reg_offsets {
1339 	__le32 dev_clk_ns_l;
1340 	__le32 dev_clk_ns_h;
1341 	__le32 phy_clk_ns_l;
1342 	__le32 phy_clk_ns_h;
1343 	__le32 cmd_sync_trigger;
1344 	u8 pad[4];
1345 };
1346 VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_clk_reg_offsets);
1347 
1348 /**
1349  * struct virtchnl2_ptp_cross_time_reg_offsets - Offsets of the device cross
1350  *						 time registers.
1351  * @sys_time_ns_l: System time low register offset
1352  * @sys_time_ns_h: System time high register offset
1353  * @cmd_sync_trigger: The command sync trigger register offset
1354  * @pad: Padding for future extensions
1355  */
1356 struct virtchnl2_ptp_cross_time_reg_offsets {
1357 	__le32 sys_time_ns_l;
1358 	__le32 sys_time_ns_h;
1359 	__le32 cmd_sync_trigger;
1360 	u8 pad[4];
1361 };
1362 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_cross_time_reg_offsets);
1363 
1364 /**
1365  * struct virtchnl2_ptp_clk_adj_reg_offsets - Offsets of device and PHY clocks
1366  *					      adjustments registers.
1367  * @dev_clk_cmd_type: Device clock command type register offset
1368  * @dev_clk_incval_l: Device clock increment value low register offset
1369  * @dev_clk_incval_h: Device clock increment value high registers offset
1370  * @dev_clk_shadj_l: Device clock shadow adjust low register offset
1371  * @dev_clk_shadj_h: Device clock shadow adjust high register offset
1372  * @phy_clk_cmd_type: PHY timer command type register offset
1373  * @phy_clk_incval_l: PHY timer increment value low register offset
1374  * @phy_clk_incval_h: PHY timer increment value high register offset
1375  * @phy_clk_shadj_l: PHY timer shadow adjust low register offset
1376  * @phy_clk_shadj_h: PHY timer shadow adjust high register offset
1377  */
1378 struct virtchnl2_ptp_clk_adj_reg_offsets {
1379 	__le32 dev_clk_cmd_type;
1380 	__le32 dev_clk_incval_l;
1381 	__le32 dev_clk_incval_h;
1382 	__le32 dev_clk_shadj_l;
1383 	__le32 dev_clk_shadj_h;
1384 	__le32 phy_clk_cmd_type;
1385 	__le32 phy_clk_incval_l;
1386 	__le32 phy_clk_incval_h;
1387 	__le32 phy_clk_shadj_l;
1388 	__le32 phy_clk_shadj_h;
1389 };
1390 VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_ptp_clk_adj_reg_offsets);
1391 
1392 /**
1393  * struct virtchnl2_ptp_tx_tstamp_latch_caps - PTP Tx timestamp latch
1394  *					       capabilities.
1395  * @tx_latch_reg_offset_l: Tx timestamp latch low register offset
1396  * @tx_latch_reg_offset_h: Tx timestamp latch high register offset
1397  * @index: Latch index provided to the Tx descriptor
1398  * @pad: Padding for future extensions
1399  */
1400 struct virtchnl2_ptp_tx_tstamp_latch_caps {
1401 	__le32 tx_latch_reg_offset_l;
1402 	__le32 tx_latch_reg_offset_h;
1403 	u8 index;
1404 	u8 pad[7];
1405 };
1406 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch_caps);
1407 
1408 /**
1409  * struct virtchnl2_ptp_get_vport_tx_tstamp_caps - Structure that defines Tx
1410  *						   tstamp entries.
1411  * @vport_id: Vport number
1412  * @num_latches: Total number of latches
1413  * @tstamp_ns_lo_bit: First bit for nanosecond part of the timestamp
1414  * @tstamp_ns_hi_bit: Last bit for nanosecond part of the timestamp
1415  * @pad: Padding for future tstamp granularity extensions
1416  * @tstamp_latches: Capabilities of Tx timestamp entries
1417  *
1418  * PF/VF sends this message to negotiate the Tx timestamp latches for each
1419  * Vport.
1420  *
1421  * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS.
1422  */
1423 struct virtchnl2_ptp_get_vport_tx_tstamp_caps {
1424 	__le32 vport_id;
1425 	__le16 num_latches;
1426 	u8 tstamp_ns_lo_bit;
1427 	u8 tstamp_ns_hi_bit;
1428 	u8 pad[8];
1429 
1430 	struct virtchnl2_ptp_tx_tstamp_latch_caps tstamp_latches[]
1431 						  __counted_by_le(num_latches);
1432 };
1433 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_caps);
1434 
1435 /**
1436  * struct virtchnl2_ptp_get_caps - Get PTP capabilities
1437  * @caps: PTP capability bitmap. See enum virtchnl2_ptp_caps
1438  * @max_adj: The maximum possible frequency adjustment
1439  * @base_incval: The default timer increment value
1440  * @peer_mbx_q_id: ID of the PTP Device Control daemon queue
1441  * @peer_id: Peer ID for PTP Device Control daemon
1442  * @secondary_mbx: Indicates to the driver that it should create a secondary
1443  *		   mailbox to inetract with control plane for PTP
1444  * @pad: Padding for future extensions
1445  * @clk_offsets: Main timer and PHY registers offsets
1446  * @cross_time_offsets: Cross time registers offsets
1447  * @clk_adj_offsets: Offsets needed to adjust the PHY and the main timer
1448  *
1449  * PF/VF sends this message to negotiate PTP capabilities. CP updates bitmap
1450  * with supported features and fulfills appropriate structures.
1451  * If HW uses primary MBX for PTP: secondary_mbx is set to false.
1452  * If HW uses secondary MBX for PTP: secondary_mbx is set to true.
1453  *	Control plane has 2 MBX and the driver has 1 MBX, send to peer
1454  *	driver may be used to send a message using valid ptp_peer_mb_q_id and
1455  *	ptp_peer_id.
1456  * If HW does not use send to peer driver: secondary_mbx is no care field and
1457  * peer_mbx_q_id holds invalid value (0xFFFF).
1458  *
1459  * Associated with VIRTCHNL2_OP_PTP_GET_CAPS.
1460  */
1461 struct virtchnl2_ptp_get_caps {
1462 	__le32 caps;
1463 	__le32 max_adj;
1464 	__le64 base_incval;
1465 	__le16 peer_mbx_q_id;
1466 	u8 peer_id;
1467 	u8 secondary_mbx;
1468 	u8 pad[4];
1469 
1470 	struct virtchnl2_ptp_clk_reg_offsets clk_offsets;
1471 	struct virtchnl2_ptp_cross_time_reg_offsets cross_time_offsets;
1472 	struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
1473 };
1474 VIRTCHNL2_CHECK_STRUCT_LEN(104, virtchnl2_ptp_get_caps);
1475 
1476 /**
1477  * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp
1478  *					  values, index and validity.
1479  * @tstamp: Timestamp value
1480  * @index: Timestamp index from which the value is read
1481  * @valid: Timestamp validity
1482  * @pad: Padding for future extensions
1483  */
1484 struct virtchnl2_ptp_tx_tstamp_latch {
1485 	__le64 tstamp;
1486 	u8 index;
1487 	u8 valid;
1488 	u8 pad[6];
1489 };
1490 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
1491 
1492 /**
1493  * struct virtchnl2_ptp_get_vport_tx_tstamp_latches - Tx timestamp latches
1494  *						      associated with the vport.
1495  * @vport_id: Number of vport that requests the timestamp
1496  * @num_latches: Number of latches
1497  * @get_devtime_with_txtstmp: Flag to request device time along with Tx timestamp
1498  * @pad: Padding for future extensions
1499  * @device_time: device time if get_devtime_with_txtstmp was set in request
1500  * @tstamp_latches: PTP TX timestamp latch
1501  *
1502  * PF/VF sends this message to receive a specified number of timestamps
1503  * entries.
1504  *
1505  * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP.
1506  */
1507 struct virtchnl2_ptp_get_vport_tx_tstamp_latches {
1508 	__le32 vport_id;
1509 	__le16 num_latches;
1510 	u8 get_devtime_with_txtstmp;
1511 	u8 pad[1];
1512 	__le64 device_time;
1513 
1514 	struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[]
1515 					     __counted_by_le(num_latches);
1516 };
1517 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_latches);
1518 
1519 /**
1520  * struct virtchnl2_ptp_get_dev_clk_time - Associated with message
1521  *					   VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME.
1522  * @dev_time_ns: Device clock time value in nanoseconds
1523  *
1524  * PF/VF sends this message to receive the time from the main timer.
1525  */
1526 struct virtchnl2_ptp_get_dev_clk_time {
1527 	__le64 dev_time_ns;
1528 };
1529 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_get_dev_clk_time);
1530 
1531 /**
1532  * struct virtchnl2_ptp_get_cross_time: Associated with message
1533  *					VIRTCHNL2_OP_PTP_GET_CROSS_TIME.
1534  * @sys_time_ns: System counter value expressed in nanoseconds, read
1535  *		 synchronously with device time
1536  * @dev_time_ns: Device clock time value expressed in nanoseconds
1537  *
1538  * PF/VF sends this message to receive the cross time.
1539  */
1540 struct virtchnl2_ptp_get_cross_time {
1541 	__le64 sys_time_ns;
1542 	__le64 dev_time_ns;
1543 };
1544 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_cross_time);
1545 
1546 /**
1547  * struct virtchnl2_ptp_set_dev_clk_time: Associated with message
1548  *					  VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME.
1549  * @dev_time_ns: Device time value expressed in nanoseconds to set
1550  *
1551  * PF/VF sends this message to set the time of the main timer.
1552  */
1553 struct virtchnl2_ptp_set_dev_clk_time {
1554 	__le64 dev_time_ns;
1555 };
1556 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_set_dev_clk_time);
1557 
1558 /**
1559  * struct virtchnl2_ptp_adj_dev_clk_fine: Associated with message
1560  *					  VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE.
1561  * @incval: Source timer increment value per clock cycle
1562  *
1563  * PF/VF sends this message to adjust the frequency of the main timer by the
1564  * indicated increment value.
1565  */
1566 struct virtchnl2_ptp_adj_dev_clk_fine {
1567 	__le64 incval;
1568 };
1569 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_fine);
1570 
1571 /**
1572  * struct virtchnl2_ptp_adj_dev_clk_time: Associated with message
1573  *					  VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME.
1574  * @delta: Offset in nanoseconds to adjust the time by
1575  *
1576  * PF/VF sends this message to adjust the time of the main timer by the delta.
1577  */
1578 struct virtchnl2_ptp_adj_dev_clk_time {
1579 	__le64 delta;
1580 };
1581 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time);
1582 
1583 #endif /* _VIRTCHNL_2_H_ */
1584