xref: /linux/drivers/net/ethernet/google/gve/gve_adminq.h (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #ifndef _GVE_ADMINQ_H
8 #define _GVE_ADMINQ_H
9 
10 #include <linux/build_bug.h>
11 
12 /* Admin queue opcodes */
13 enum gve_adminq_opcodes {
14 	GVE_ADMINQ_DESCRIBE_DEVICE		= 0x1,
15 	GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES	= 0x2,
16 	GVE_ADMINQ_REGISTER_PAGE_LIST		= 0x3,
17 	GVE_ADMINQ_UNREGISTER_PAGE_LIST		= 0x4,
18 	GVE_ADMINQ_CREATE_TX_QUEUE		= 0x5,
19 	GVE_ADMINQ_CREATE_RX_QUEUE		= 0x6,
20 	GVE_ADMINQ_DESTROY_TX_QUEUE		= 0x7,
21 	GVE_ADMINQ_DESTROY_RX_QUEUE		= 0x8,
22 	GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES	= 0x9,
23 	GVE_ADMINQ_CONFIGURE_RSS		= 0xA,
24 	GVE_ADMINQ_SET_DRIVER_PARAMETER		= 0xB,
25 	GVE_ADMINQ_REPORT_STATS			= 0xC,
26 	GVE_ADMINQ_REPORT_LINK_SPEED		= 0xD,
27 	GVE_ADMINQ_GET_PTYPE_MAP		= 0xE,
28 	GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY	= 0xF,
29 	GVE_ADMINQ_QUERY_FLOW_RULES		= 0x10,
30 	GVE_ADMINQ_REPORT_NIC_TIMESTAMP		= 0x11,
31 	GVE_ADMINQ_QUERY_RSS			= 0x12,
32 
33 	/* For commands that are larger than 56 bytes */
34 	GVE_ADMINQ_EXTENDED_COMMAND		= 0xFF,
35 };
36 
37 /* The normal adminq command is restricted to be 56 bytes at maximum. For the
38  * longer adminq command, it is wrapped by GVE_ADMINQ_EXTENDED_COMMAND with
39  * inner opcode of gve_adminq_extended_cmd_opcodes specified. The inner command
40  * is written in the dma memory allocated by GVE_ADMINQ_EXTENDED_COMMAND.
41  */
42 enum gve_adminq_extended_cmd_opcodes {
43 	GVE_ADMINQ_CONFIGURE_FLOW_RULE	= 0x101,
44 };
45 
46 /* Admin queue status codes */
47 enum gve_adminq_statuses {
48 	GVE_ADMINQ_COMMAND_UNSET			= 0x0,
49 	GVE_ADMINQ_COMMAND_PASSED			= 0x1,
50 	GVE_ADMINQ_COMMAND_ERROR_ABORTED		= 0xFFFFFFF0,
51 	GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS		= 0xFFFFFFF1,
52 	GVE_ADMINQ_COMMAND_ERROR_CANCELLED		= 0xFFFFFFF2,
53 	GVE_ADMINQ_COMMAND_ERROR_DATALOSS		= 0xFFFFFFF3,
54 	GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED	= 0xFFFFFFF4,
55 	GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION	= 0xFFFFFFF5,
56 	GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR		= 0xFFFFFFF6,
57 	GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT	= 0xFFFFFFF7,
58 	GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND		= 0xFFFFFFF8,
59 	GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE		= 0xFFFFFFF9,
60 	GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED	= 0xFFFFFFFA,
61 	GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED	= 0xFFFFFFFB,
62 	GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED	= 0xFFFFFFFC,
63 	GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE		= 0xFFFFFFFD,
64 	GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED		= 0xFFFFFFFE,
65 	GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR		= 0xFFFFFFFF,
66 };
67 
68 #define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1
69 
70 /* All AdminQ command structs should be naturally packed. The static_assert
71  * calls make sure this is the case at compile time.
72  */
73 
74 struct gve_adminq_describe_device {
75 	__be64 device_descriptor_addr;
76 	__be32 device_descriptor_version;
77 	__be32 available_length;
78 };
79 
80 static_assert(sizeof(struct gve_adminq_describe_device) == 16);
81 
82 struct gve_device_descriptor {
83 	__be64 max_registered_pages;
84 	__be16 reserved1;
85 	__be16 tx_queue_entries;
86 	__be16 rx_queue_entries;
87 	__be16 default_num_queues;
88 	__be16 mtu;
89 	__be16 counters;
90 	__be16 tx_pages_per_qpl;
91 	__be16 rx_pages_per_qpl;
92 	u8  mac[ETH_ALEN];
93 	__be16 num_device_options;
94 	__be16 total_length;
95 	u8  reserved2[6];
96 };
97 
98 static_assert(sizeof(struct gve_device_descriptor) == 40);
99 
100 struct gve_device_option {
101 	__be16 option_id;
102 	__be16 option_length;
103 	__be32 required_features_mask;
104 };
105 
106 static_assert(sizeof(struct gve_device_option) == 8);
107 
108 struct gve_device_option_gqi_rda {
109 	__be32 supported_features_mask;
110 };
111 
112 static_assert(sizeof(struct gve_device_option_gqi_rda) == 4);
113 
114 struct gve_device_option_gqi_qpl {
115 	__be32 supported_features_mask;
116 };
117 
118 static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
119 
120 struct gve_device_option_dqo_rda {
121 	__be32 supported_features_mask;
122 	__be32 reserved;
123 };
124 
125 static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
126 
127 struct gve_device_option_dqo_qpl {
128 	__be32 supported_features_mask;
129 	__be16 tx_pages_per_qpl;
130 	__be16 rx_pages_per_qpl;
131 };
132 
133 static_assert(sizeof(struct gve_device_option_dqo_qpl) == 8);
134 
135 struct gve_device_option_jumbo_frames {
136 	__be32 supported_features_mask;
137 	__be16 max_mtu;
138 	u8 padding[2];
139 };
140 
141 static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
142 
143 struct gve_device_option_buffer_sizes {
144 	/* GVE_SUP_BUFFER_SIZES_MASK bit should be set */
145 	__be32 supported_features_mask;
146 	__be16 packet_buffer_size;
147 	__be16 header_buffer_size;
148 };
149 
150 static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
151 
152 struct gve_device_option_modify_ring {
153 	__be32 supported_featured_mask;
154 	__be16 max_rx_ring_size;
155 	__be16 max_tx_ring_size;
156 	__be16 min_rx_ring_size;
157 	__be16 min_tx_ring_size;
158 };
159 
160 static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
161 
162 struct gve_device_option_flow_steering {
163 	__be32 supported_features_mask;
164 	__be32 reserved;
165 	__be32 max_flow_rules;
166 };
167 
168 static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
169 
170 struct gve_device_option_rss_config {
171 	__be32 supported_features_mask;
172 	__be16 hash_key_size;
173 	__be16 hash_lut_size;
174 };
175 
176 static_assert(sizeof(struct gve_device_option_rss_config) == 8);
177 
178 struct gve_device_option_nic_timestamp {
179 	__be32 supported_features_mask;
180 };
181 
182 static_assert(sizeof(struct gve_device_option_nic_timestamp) == 4);
183 
184 /* Terminology:
185  *
186  * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
187  *       mapped and read/updated by the device.
188  *
189  * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with
190  *       the device for read/write and data is copied from/to SKBs.
191  */
192 enum gve_dev_opt_id {
193 	GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING	= 0x1,
194 	GVE_DEV_OPT_ID_GQI_RDA			= 0x2,
195 	GVE_DEV_OPT_ID_GQI_QPL			= 0x3,
196 	GVE_DEV_OPT_ID_DQO_RDA			= 0x4,
197 	GVE_DEV_OPT_ID_MODIFY_RING		= 0x6,
198 	GVE_DEV_OPT_ID_DQO_QPL			= 0x7,
199 	GVE_DEV_OPT_ID_JUMBO_FRAMES		= 0x8,
200 	GVE_DEV_OPT_ID_BUFFER_SIZES		= 0xa,
201 	GVE_DEV_OPT_ID_FLOW_STEERING		= 0xb,
202 	GVE_DEV_OPT_ID_NIC_TIMESTAMP		= 0xd,
203 	GVE_DEV_OPT_ID_RSS_CONFIG		= 0xe,
204 };
205 
206 enum gve_dev_opt_req_feat_mask {
207 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING	= 0x0,
208 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA		= 0x0,
209 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL		= 0x0,
210 	GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA		= 0x0,
211 	GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES		= 0x0,
212 	GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL		= 0x0,
213 	GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES		= 0x0,
214 	GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING		= 0x0,
215 	GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING		= 0x0,
216 	GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG		= 0x0,
217 	GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP		= 0x0,
218 };
219 
220 enum gve_sup_feature_mask {
221 	GVE_SUP_MODIFY_RING_MASK	= 1 << 0,
222 	GVE_SUP_JUMBO_FRAMES_MASK	= 1 << 2,
223 	GVE_SUP_BUFFER_SIZES_MASK	= 1 << 4,
224 	GVE_SUP_FLOW_STEERING_MASK	= 1 << 5,
225 	GVE_SUP_RSS_CONFIG_MASK		= 1 << 7,
226 	GVE_SUP_NIC_TIMESTAMP_MASK	= 1 << 8,
227 };
228 
229 #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
230 
231 #define GVE_VERSION_STR_LEN 128
232 
233 enum gve_driver_capbility {
234 	gve_driver_capability_gqi_qpl = 0,
235 	gve_driver_capability_gqi_rda = 1,
236 	gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
237 	gve_driver_capability_dqo_rda = 3,
238 	gve_driver_capability_alt_miss_compl = 4,
239 	gve_driver_capability_flexible_buffer_size = 5,
240 	gve_driver_capability_flexible_rss_size = 6,
241 };
242 
243 #define GVE_CAP1(a) BIT((int)a)
244 #define GVE_CAP2(a) BIT(((int)a) - 64)
245 #define GVE_CAP3(a) BIT(((int)a) - 128)
246 #define GVE_CAP4(a) BIT(((int)a) - 192)
247 
248 #define GVE_DRIVER_CAPABILITY_FLAGS1 \
249 	(GVE_CAP1(gve_driver_capability_gqi_qpl) | \
250 	 GVE_CAP1(gve_driver_capability_gqi_rda) | \
251 	 GVE_CAP1(gve_driver_capability_dqo_rda) | \
252 	 GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
253 	 GVE_CAP1(gve_driver_capability_flexible_buffer_size) | \
254 	 GVE_CAP1(gve_driver_capability_flexible_rss_size))
255 
256 #define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
257 #define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
258 #define GVE_DRIVER_CAPABILITY_FLAGS4 0x0
259 
260 struct gve_adminq_extended_command {
261 	__be32 inner_opcode;
262 	__be32 inner_length;
263 	__be64 inner_command_addr;
264 };
265 
266 static_assert(sizeof(struct gve_adminq_extended_command) == 16);
267 
268 struct gve_driver_info {
269 	u8 os_type;	/* 0x01 = Linux */
270 	u8 driver_major;
271 	u8 driver_minor;
272 	u8 driver_sub;
273 	__be32 os_version_major;
274 	__be32 os_version_minor;
275 	__be32 os_version_sub;
276 	__be64 driver_capability_flags[4];
277 	u8 os_version_str1[GVE_VERSION_STR_LEN];
278 	u8 os_version_str2[GVE_VERSION_STR_LEN];
279 };
280 
281 struct gve_adminq_verify_driver_compatibility {
282 	__be64 driver_info_len;
283 	__be64 driver_info_addr;
284 };
285 
286 static_assert(sizeof(struct gve_adminq_verify_driver_compatibility) == 16);
287 
288 struct gve_adminq_configure_device_resources {
289 	__be64 counter_array;
290 	__be64 irq_db_addr;
291 	__be32 num_counters;
292 	__be32 num_irq_dbs;
293 	__be32 irq_db_stride;
294 	__be32 ntfy_blk_msix_base_idx;
295 	u8 queue_format;
296 	u8 padding[7];
297 };
298 
299 static_assert(sizeof(struct gve_adminq_configure_device_resources) == 40);
300 
301 struct gve_adminq_register_page_list {
302 	__be32 page_list_id;
303 	__be32 num_pages;
304 	__be64 page_address_list_addr;
305 	__be64 page_size;
306 };
307 
308 static_assert(sizeof(struct gve_adminq_register_page_list) == 24);
309 
310 struct gve_adminq_unregister_page_list {
311 	__be32 page_list_id;
312 };
313 
314 static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4);
315 
316 #define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF
317 
318 struct gve_adminq_create_tx_queue {
319 	__be32 queue_id;
320 	__be32 reserved;
321 	__be64 queue_resources_addr;
322 	__be64 tx_ring_addr;
323 	__be32 queue_page_list_id;
324 	__be32 ntfy_id;
325 	__be64 tx_comp_ring_addr;
326 	__be16 tx_ring_size;
327 	__be16 tx_comp_ring_size;
328 	u8 padding[4];
329 };
330 
331 static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48);
332 
333 struct gve_adminq_create_rx_queue {
334 	__be32 queue_id;
335 	__be32 index;
336 	__be32 reserved;
337 	__be32 ntfy_id;
338 	__be64 queue_resources_addr;
339 	__be64 rx_desc_ring_addr;
340 	__be64 rx_data_ring_addr;
341 	__be32 queue_page_list_id;
342 	__be16 rx_ring_size;
343 	__be16 packet_buffer_size;
344 	__be16 rx_buff_ring_size;
345 	u8 enable_rsc;
346 	u8 padding1;
347 	__be16 header_buffer_size;
348 	u8 padding2[2];
349 };
350 
351 static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
352 
353 /* Queue resources that are shared with the device */
354 struct gve_queue_resources {
355 	union {
356 		struct {
357 			__be32 db_index;	/* Device -> Guest */
358 			__be32 counter_index;	/* Device -> Guest */
359 		};
360 		u8 reserved[64];
361 	};
362 };
363 
364 static_assert(sizeof(struct gve_queue_resources) == 64);
365 
366 struct gve_adminq_destroy_tx_queue {
367 	__be32 queue_id;
368 };
369 
370 static_assert(sizeof(struct gve_adminq_destroy_tx_queue) == 4);
371 
372 struct gve_adminq_destroy_rx_queue {
373 	__be32 queue_id;
374 };
375 
376 static_assert(sizeof(struct gve_adminq_destroy_rx_queue) == 4);
377 
378 /* GVE Set Driver Parameter Types */
379 enum gve_set_driver_param_types {
380 	GVE_SET_PARAM_MTU	= 0x1,
381 };
382 
383 struct gve_adminq_set_driver_parameter {
384 	__be32 parameter_type;
385 	u8 reserved[4];
386 	__be64 parameter_value;
387 };
388 
389 static_assert(sizeof(struct gve_adminq_set_driver_parameter) == 16);
390 
391 struct gve_adminq_report_stats {
392 	__be64 stats_report_len;
393 	__be64 stats_report_addr;
394 	__be64 interval;
395 };
396 
397 static_assert(sizeof(struct gve_adminq_report_stats) == 24);
398 
399 struct gve_adminq_report_link_speed {
400 	__be64 link_speed_address;
401 };
402 
403 static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
404 
405 struct gve_adminq_report_nic_ts {
406 	__be64 nic_ts_report_len;
407 	__be64 nic_ts_report_addr;
408 };
409 
410 static_assert(sizeof(struct gve_adminq_report_nic_ts) == 16);
411 
412 struct gve_nic_ts_report {
413 	__be64 nic_timestamp; /* NIC clock in nanoseconds */
414 	__be64 reserved1;
415 	__be64 reserved2;
416 	__be64 reserved3;
417 	__be64 reserved4;
418 };
419 
420 struct stats {
421 	__be32 stat_name;
422 	__be32 queue_id;
423 	__be64 value;
424 };
425 
426 static_assert(sizeof(struct stats) == 16);
427 
428 struct gve_stats_report {
429 	__be64 written_count;
430 	struct stats stats[];
431 };
432 
433 static_assert(sizeof(struct gve_stats_report) == 8);
434 
435 enum gve_stat_names {
436 	// stats from gve
437 	TX_WAKE_CNT			= 1,
438 	TX_STOP_CNT			= 2,
439 	TX_FRAMES_SENT			= 3,
440 	TX_BYTES_SENT			= 4,
441 	TX_LAST_COMPLETION_PROCESSED	= 5,
442 	RX_NEXT_EXPECTED_SEQUENCE	= 6,
443 	RX_BUFFERS_POSTED		= 7,
444 	TX_TIMEOUT_CNT			= 8,
445 	// stats from NIC
446 	RX_QUEUE_DROP_CNT		= 65,
447 	RX_NO_BUFFERS_POSTED		= 66,
448 	RX_DROPS_PACKET_OVER_MRU	= 67,
449 	RX_DROPS_INVALID_CHECKSUM	= 68,
450 };
451 
452 enum gve_l3_type {
453 	/* Must be zero so zero initialized LUT is unknown. */
454 	GVE_L3_TYPE_UNKNOWN = 0,
455 	GVE_L3_TYPE_OTHER,
456 	GVE_L3_TYPE_IPV4,
457 	GVE_L3_TYPE_IPV6,
458 };
459 
460 enum gve_l4_type {
461 	/* Must be zero so zero initialized LUT is unknown. */
462 	GVE_L4_TYPE_UNKNOWN = 0,
463 	GVE_L4_TYPE_OTHER,
464 	GVE_L4_TYPE_TCP,
465 	GVE_L4_TYPE_UDP,
466 	GVE_L4_TYPE_ICMP,
467 	GVE_L4_TYPE_SCTP,
468 };
469 
470 /* These are control path types for PTYPE which are the same as the data path
471  * types.
472  */
473 struct gve_ptype_entry {
474 	u8 l3_type;
475 	u8 l4_type;
476 };
477 
478 struct gve_ptype_map {
479 	struct gve_ptype_entry ptypes[GVE_NUM_PTYPES]; /* PTYPES are always 10 bits. */
480 };
481 
482 struct gve_adminq_get_ptype_map {
483 	__be64 ptype_map_len;
484 	__be64 ptype_map_addr;
485 };
486 
487 /* Flow-steering related definitions */
488 enum gve_adminq_flow_rule_cfg_opcode {
489 	GVE_FLOW_RULE_CFG_ADD	= 0,
490 	GVE_FLOW_RULE_CFG_DEL	= 1,
491 	GVE_FLOW_RULE_CFG_RESET	= 2,
492 };
493 
494 enum gve_adminq_flow_rule_query_opcode {
495 	GVE_FLOW_RULE_QUERY_RULES	= 0,
496 	GVE_FLOW_RULE_QUERY_IDS		= 1,
497 	GVE_FLOW_RULE_QUERY_STATS	= 2,
498 };
499 
500 enum gve_adminq_flow_type {
501 	GVE_FLOW_TYPE_TCPV4,
502 	GVE_FLOW_TYPE_UDPV4,
503 	GVE_FLOW_TYPE_SCTPV4,
504 	GVE_FLOW_TYPE_AHV4,
505 	GVE_FLOW_TYPE_ESPV4,
506 	GVE_FLOW_TYPE_TCPV6,
507 	GVE_FLOW_TYPE_UDPV6,
508 	GVE_FLOW_TYPE_SCTPV6,
509 	GVE_FLOW_TYPE_AHV6,
510 	GVE_FLOW_TYPE_ESPV6,
511 };
512 
513 /* Flow-steering command */
514 struct gve_adminq_flow_rule {
515 	__be16 flow_type;
516 	__be16 action; /* RX queue id */
517 	struct gve_flow_spec key;
518 	struct gve_flow_spec mask;
519 };
520 
521 struct gve_adminq_configure_flow_rule {
522 	__be16 opcode;
523 	u8 padding[2];
524 	struct gve_adminq_flow_rule rule;
525 	__be32 location;
526 };
527 
528 static_assert(sizeof(struct gve_adminq_configure_flow_rule) == 92);
529 
530 struct gve_query_flow_rules_descriptor {
531 	__be32 num_flow_rules;
532 	__be32 max_flow_rules;
533 	__be32 num_queried_rules;
534 	__be32 total_length;
535 };
536 
537 struct gve_adminq_queried_flow_rule {
538 	__be32 location;
539 	struct gve_adminq_flow_rule flow_rule;
540 };
541 
542 struct gve_adminq_query_flow_rules {
543 	__be16 opcode;
544 	u8 padding[2];
545 	__be32 starting_rule_id;
546 	__be64 available_length; /* The dma memory length that the driver allocated */
547 	__be64 rule_descriptor_addr; /* The dma memory address */
548 };
549 
550 static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24);
551 
552 enum gve_rss_hash_type {
553 	GVE_RSS_HASH_IPV4,
554 	GVE_RSS_HASH_TCPV4,
555 	GVE_RSS_HASH_IPV6,
556 	GVE_RSS_HASH_IPV6_EX,
557 	GVE_RSS_HASH_TCPV6,
558 	GVE_RSS_HASH_TCPV6_EX,
559 	GVE_RSS_HASH_UDPV4,
560 	GVE_RSS_HASH_UDPV6,
561 	GVE_RSS_HASH_UDPV6_EX,
562 };
563 
564 struct gve_adminq_configure_rss {
565 	__be16 hash_types;
566 	u8 hash_alg;
567 	u8 reserved;
568 	__be16 hash_key_size;
569 	__be16 hash_lut_size;
570 	__be64 hash_key_addr;
571 	__be64 hash_lut_addr;
572 };
573 
574 static_assert(sizeof(struct gve_adminq_configure_rss) == 24);
575 
576 struct gve_query_rss_descriptor {
577 	__be32 total_length;
578 	__be16 hash_types;
579 	u8 hash_alg;
580 	u8 reserved;
581 };
582 
583 struct gve_adminq_query_rss {
584 	__be64 available_length;
585 	__be64 rss_descriptor_addr;
586 };
587 
588 static_assert(sizeof(struct gve_adminq_query_rss) == 16);
589 
590 union gve_adminq_command {
591 	struct {
592 		__be32 opcode;
593 		__be32 status;
594 		union {
595 			struct gve_adminq_configure_device_resources
596 						configure_device_resources;
597 			struct gve_adminq_create_tx_queue create_tx_queue;
598 			struct gve_adminq_create_rx_queue create_rx_queue;
599 			struct gve_adminq_destroy_tx_queue destroy_tx_queue;
600 			struct gve_adminq_destroy_rx_queue destroy_rx_queue;
601 			struct gve_adminq_describe_device describe_device;
602 			struct gve_adminq_register_page_list reg_page_list;
603 			struct gve_adminq_unregister_page_list unreg_page_list;
604 			struct gve_adminq_set_driver_parameter set_driver_param;
605 			struct gve_adminq_report_stats report_stats;
606 			struct gve_adminq_report_link_speed report_link_speed;
607 			struct gve_adminq_get_ptype_map get_ptype_map;
608 			struct gve_adminq_verify_driver_compatibility
609 						verify_driver_compatibility;
610 			struct gve_adminq_query_flow_rules query_flow_rules;
611 			struct gve_adminq_configure_rss configure_rss;
612 			struct gve_adminq_query_rss query_rss;
613 			struct gve_adminq_report_nic_ts report_nic_ts;
614 			struct gve_adminq_extended_command extended_command;
615 		};
616 	};
617 	u8 reserved[64];
618 };
619 
620 static_assert(sizeof(union gve_adminq_command) == 64);
621 
622 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv);
623 void gve_adminq_free(struct device *dev, struct gve_priv *priv);
624 void gve_adminq_release(struct gve_priv *priv);
625 int gve_adminq_describe_device(struct gve_priv *priv);
626 int gve_adminq_configure_device_resources(struct gve_priv *priv,
627 					  dma_addr_t counter_array_bus_addr,
628 					  u32 num_counters,
629 					  dma_addr_t db_array_bus_addr,
630 					  u32 num_ntfy_blks);
631 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
632 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
633 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
634 int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index);
635 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
636 int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index);
637 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
638 int gve_adminq_register_page_list(struct gve_priv *priv,
639 				  struct gve_queue_page_list *qpl);
640 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
641 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
642 			    dma_addr_t stats_report_addr, u64 interval);
643 int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
644 					   u64 driver_info_len,
645 					   dma_addr_t driver_info_addr);
646 int gve_adminq_report_link_speed(struct gve_priv *priv);
647 int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc);
648 int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc);
649 int gve_adminq_reset_flow_rules(struct gve_priv *priv);
650 int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
651 int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
652 int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
653 int gve_adminq_report_nic_ts(struct gve_priv *priv,
654 			     dma_addr_t nic_ts_report_addr);
655 
656 struct gve_ptype_lut;
657 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
658 				 struct gve_ptype_lut *ptype_lut);
659 
660 #endif /* _GVE_ADMINQ_H */
661