xref: /linux/drivers/net/ethernet/google/gve/gve_adminq.h (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #ifndef _GVE_ADMINQ_H
8 #define _GVE_ADMINQ_H
9 
10 #include <linux/build_bug.h>
11 
12 /* Admin queue opcodes */
13 enum gve_adminq_opcodes {
14 	GVE_ADMINQ_DESCRIBE_DEVICE		= 0x1,
15 	GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES	= 0x2,
16 	GVE_ADMINQ_REGISTER_PAGE_LIST		= 0x3,
17 	GVE_ADMINQ_UNREGISTER_PAGE_LIST		= 0x4,
18 	GVE_ADMINQ_CREATE_TX_QUEUE		= 0x5,
19 	GVE_ADMINQ_CREATE_RX_QUEUE		= 0x6,
20 	GVE_ADMINQ_DESTROY_TX_QUEUE		= 0x7,
21 	GVE_ADMINQ_DESTROY_RX_QUEUE		= 0x8,
22 	GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES	= 0x9,
23 	GVE_ADMINQ_SET_DRIVER_PARAMETER		= 0xB,
24 	GVE_ADMINQ_REPORT_STATS			= 0xC,
25 	GVE_ADMINQ_REPORT_LINK_SPEED		= 0xD,
26 	GVE_ADMINQ_GET_PTYPE_MAP		= 0xE,
27 	GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY	= 0xF,
28 	GVE_ADMINQ_QUERY_FLOW_RULES		= 0x10,
29 
30 	/* For commands that are larger than 56 bytes */
31 	GVE_ADMINQ_EXTENDED_COMMAND		= 0xFF,
32 };
33 
34 /* The normal adminq command is restricted to be 56 bytes at maximum. For the
35  * longer adminq command, it is wrapped by GVE_ADMINQ_EXTENDED_COMMAND with
36  * inner opcode of gve_adminq_extended_cmd_opcodes specified. The inner command
37  * is written in the dma memory allocated by GVE_ADMINQ_EXTENDED_COMMAND.
38  */
39 enum gve_adminq_extended_cmd_opcodes {
40 	GVE_ADMINQ_CONFIGURE_FLOW_RULE	= 0x101,
41 };
42 
43 /* Admin queue status codes */
44 enum gve_adminq_statuses {
45 	GVE_ADMINQ_COMMAND_UNSET			= 0x0,
46 	GVE_ADMINQ_COMMAND_PASSED			= 0x1,
47 	GVE_ADMINQ_COMMAND_ERROR_ABORTED		= 0xFFFFFFF0,
48 	GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS		= 0xFFFFFFF1,
49 	GVE_ADMINQ_COMMAND_ERROR_CANCELLED		= 0xFFFFFFF2,
50 	GVE_ADMINQ_COMMAND_ERROR_DATALOSS		= 0xFFFFFFF3,
51 	GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED	= 0xFFFFFFF4,
52 	GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION	= 0xFFFFFFF5,
53 	GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR		= 0xFFFFFFF6,
54 	GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT	= 0xFFFFFFF7,
55 	GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND		= 0xFFFFFFF8,
56 	GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE		= 0xFFFFFFF9,
57 	GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED	= 0xFFFFFFFA,
58 	GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED	= 0xFFFFFFFB,
59 	GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED	= 0xFFFFFFFC,
60 	GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE		= 0xFFFFFFFD,
61 	GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED		= 0xFFFFFFFE,
62 	GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR		= 0xFFFFFFFF,
63 };
64 
65 #define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1
66 
67 /* All AdminQ command structs should be naturally packed. The static_assert
68  * calls make sure this is the case at compile time.
69  */
70 
71 struct gve_adminq_describe_device {
72 	__be64 device_descriptor_addr;
73 	__be32 device_descriptor_version;
74 	__be32 available_length;
75 };
76 
77 static_assert(sizeof(struct gve_adminq_describe_device) == 16);
78 
79 struct gve_device_descriptor {
80 	__be64 max_registered_pages;
81 	__be16 reserved1;
82 	__be16 tx_queue_entries;
83 	__be16 rx_queue_entries;
84 	__be16 default_num_queues;
85 	__be16 mtu;
86 	__be16 counters;
87 	__be16 tx_pages_per_qpl;
88 	__be16 rx_pages_per_qpl;
89 	u8  mac[ETH_ALEN];
90 	__be16 num_device_options;
91 	__be16 total_length;
92 	u8  reserved2[6];
93 };
94 
95 static_assert(sizeof(struct gve_device_descriptor) == 40);
96 
97 struct gve_device_option {
98 	__be16 option_id;
99 	__be16 option_length;
100 	__be32 required_features_mask;
101 };
102 
103 static_assert(sizeof(struct gve_device_option) == 8);
104 
105 struct gve_device_option_gqi_rda {
106 	__be32 supported_features_mask;
107 };
108 
109 static_assert(sizeof(struct gve_device_option_gqi_rda) == 4);
110 
111 struct gve_device_option_gqi_qpl {
112 	__be32 supported_features_mask;
113 };
114 
115 static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
116 
117 struct gve_device_option_dqo_rda {
118 	__be32 supported_features_mask;
119 	__be32 reserved;
120 };
121 
122 static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
123 
124 struct gve_device_option_dqo_qpl {
125 	__be32 supported_features_mask;
126 	__be16 tx_pages_per_qpl;
127 	__be16 rx_pages_per_qpl;
128 };
129 
130 static_assert(sizeof(struct gve_device_option_dqo_qpl) == 8);
131 
132 struct gve_device_option_jumbo_frames {
133 	__be32 supported_features_mask;
134 	__be16 max_mtu;
135 	u8 padding[2];
136 };
137 
138 static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
139 
140 struct gve_device_option_buffer_sizes {
141 	/* GVE_SUP_BUFFER_SIZES_MASK bit should be set */
142 	__be32 supported_features_mask;
143 	__be16 packet_buffer_size;
144 	__be16 header_buffer_size;
145 };
146 
147 static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
148 
149 struct gve_device_option_modify_ring {
150 	__be32 supported_featured_mask;
151 	__be16 max_rx_ring_size;
152 	__be16 max_tx_ring_size;
153 	__be16 min_rx_ring_size;
154 	__be16 min_tx_ring_size;
155 };
156 
157 static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
158 
159 struct gve_device_option_flow_steering {
160 	__be32 supported_features_mask;
161 	__be32 reserved;
162 	__be32 max_flow_rules;
163 };
164 
165 static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
166 
167 /* Terminology:
168  *
169  * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
170  *       mapped and read/updated by the device.
171  *
172  * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with
173  *       the device for read/write and data is copied from/to SKBs.
174  */
175 enum gve_dev_opt_id {
176 	GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING	= 0x1,
177 	GVE_DEV_OPT_ID_GQI_RDA			= 0x2,
178 	GVE_DEV_OPT_ID_GQI_QPL			= 0x3,
179 	GVE_DEV_OPT_ID_DQO_RDA			= 0x4,
180 	GVE_DEV_OPT_ID_MODIFY_RING		= 0x6,
181 	GVE_DEV_OPT_ID_DQO_QPL			= 0x7,
182 	GVE_DEV_OPT_ID_JUMBO_FRAMES		= 0x8,
183 	GVE_DEV_OPT_ID_BUFFER_SIZES		= 0xa,
184 	GVE_DEV_OPT_ID_FLOW_STEERING		= 0xb,
185 };
186 
187 enum gve_dev_opt_req_feat_mask {
188 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING	= 0x0,
189 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA		= 0x0,
190 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL		= 0x0,
191 	GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA		= 0x0,
192 	GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES		= 0x0,
193 	GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL		= 0x0,
194 	GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES		= 0x0,
195 	GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING		= 0x0,
196 	GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING		= 0x0,
197 };
198 
199 enum gve_sup_feature_mask {
200 	GVE_SUP_MODIFY_RING_MASK	= 1 << 0,
201 	GVE_SUP_JUMBO_FRAMES_MASK	= 1 << 2,
202 	GVE_SUP_BUFFER_SIZES_MASK	= 1 << 4,
203 	GVE_SUP_FLOW_STEERING_MASK	= 1 << 5,
204 };
205 
206 #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
207 
208 #define GVE_VERSION_STR_LEN 128
209 
210 enum gve_driver_capbility {
211 	gve_driver_capability_gqi_qpl = 0,
212 	gve_driver_capability_gqi_rda = 1,
213 	gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
214 	gve_driver_capability_dqo_rda = 3,
215 	gve_driver_capability_alt_miss_compl = 4,
216 	gve_driver_capability_flexible_buffer_size = 5,
217 };
218 
219 #define GVE_CAP1(a) BIT((int)a)
220 #define GVE_CAP2(a) BIT(((int)a) - 64)
221 #define GVE_CAP3(a) BIT(((int)a) - 128)
222 #define GVE_CAP4(a) BIT(((int)a) - 192)
223 
224 #define GVE_DRIVER_CAPABILITY_FLAGS1 \
225 	(GVE_CAP1(gve_driver_capability_gqi_qpl) | \
226 	 GVE_CAP1(gve_driver_capability_gqi_rda) | \
227 	 GVE_CAP1(gve_driver_capability_dqo_rda) | \
228 	 GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
229 	 GVE_CAP1(gve_driver_capability_flexible_buffer_size))
230 
231 #define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
232 #define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
233 #define GVE_DRIVER_CAPABILITY_FLAGS4 0x0
234 
235 struct gve_adminq_extended_command {
236 	__be32 inner_opcode;
237 	__be32 inner_length;
238 	__be64 inner_command_addr;
239 };
240 
241 static_assert(sizeof(struct gve_adminq_extended_command) == 16);
242 
243 struct gve_driver_info {
244 	u8 os_type;	/* 0x01 = Linux */
245 	u8 driver_major;
246 	u8 driver_minor;
247 	u8 driver_sub;
248 	__be32 os_version_major;
249 	__be32 os_version_minor;
250 	__be32 os_version_sub;
251 	__be64 driver_capability_flags[4];
252 	u8 os_version_str1[GVE_VERSION_STR_LEN];
253 	u8 os_version_str2[GVE_VERSION_STR_LEN];
254 };
255 
256 struct gve_adminq_verify_driver_compatibility {
257 	__be64 driver_info_len;
258 	__be64 driver_info_addr;
259 };
260 
261 static_assert(sizeof(struct gve_adminq_verify_driver_compatibility) == 16);
262 
263 struct gve_adminq_configure_device_resources {
264 	__be64 counter_array;
265 	__be64 irq_db_addr;
266 	__be32 num_counters;
267 	__be32 num_irq_dbs;
268 	__be32 irq_db_stride;
269 	__be32 ntfy_blk_msix_base_idx;
270 	u8 queue_format;
271 	u8 padding[7];
272 };
273 
274 static_assert(sizeof(struct gve_adminq_configure_device_resources) == 40);
275 
276 struct gve_adminq_register_page_list {
277 	__be32 page_list_id;
278 	__be32 num_pages;
279 	__be64 page_address_list_addr;
280 	__be64 page_size;
281 };
282 
283 static_assert(sizeof(struct gve_adminq_register_page_list) == 24);
284 
285 struct gve_adminq_unregister_page_list {
286 	__be32 page_list_id;
287 };
288 
289 static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4);
290 
291 #define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF
292 
293 struct gve_adminq_create_tx_queue {
294 	__be32 queue_id;
295 	__be32 reserved;
296 	__be64 queue_resources_addr;
297 	__be64 tx_ring_addr;
298 	__be32 queue_page_list_id;
299 	__be32 ntfy_id;
300 	__be64 tx_comp_ring_addr;
301 	__be16 tx_ring_size;
302 	__be16 tx_comp_ring_size;
303 	u8 padding[4];
304 };
305 
306 static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48);
307 
308 struct gve_adminq_create_rx_queue {
309 	__be32 queue_id;
310 	__be32 index;
311 	__be32 reserved;
312 	__be32 ntfy_id;
313 	__be64 queue_resources_addr;
314 	__be64 rx_desc_ring_addr;
315 	__be64 rx_data_ring_addr;
316 	__be32 queue_page_list_id;
317 	__be16 rx_ring_size;
318 	__be16 packet_buffer_size;
319 	__be16 rx_buff_ring_size;
320 	u8 enable_rsc;
321 	u8 padding1;
322 	__be16 header_buffer_size;
323 	u8 padding2[2];
324 };
325 
326 static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
327 
328 /* Queue resources that are shared with the device */
329 struct gve_queue_resources {
330 	union {
331 		struct {
332 			__be32 db_index;	/* Device -> Guest */
333 			__be32 counter_index;	/* Device -> Guest */
334 		};
335 		u8 reserved[64];
336 	};
337 };
338 
339 static_assert(sizeof(struct gve_queue_resources) == 64);
340 
341 struct gve_adminq_destroy_tx_queue {
342 	__be32 queue_id;
343 };
344 
345 static_assert(sizeof(struct gve_adminq_destroy_tx_queue) == 4);
346 
347 struct gve_adminq_destroy_rx_queue {
348 	__be32 queue_id;
349 };
350 
351 static_assert(sizeof(struct gve_adminq_destroy_rx_queue) == 4);
352 
353 /* GVE Set Driver Parameter Types */
354 enum gve_set_driver_param_types {
355 	GVE_SET_PARAM_MTU	= 0x1,
356 };
357 
358 struct gve_adminq_set_driver_parameter {
359 	__be32 parameter_type;
360 	u8 reserved[4];
361 	__be64 parameter_value;
362 };
363 
364 static_assert(sizeof(struct gve_adminq_set_driver_parameter) == 16);
365 
366 struct gve_adminq_report_stats {
367 	__be64 stats_report_len;
368 	__be64 stats_report_addr;
369 	__be64 interval;
370 };
371 
372 static_assert(sizeof(struct gve_adminq_report_stats) == 24);
373 
374 struct gve_adminq_report_link_speed {
375 	__be64 link_speed_address;
376 };
377 
378 static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
379 
380 struct stats {
381 	__be32 stat_name;
382 	__be32 queue_id;
383 	__be64 value;
384 };
385 
386 static_assert(sizeof(struct stats) == 16);
387 
388 struct gve_stats_report {
389 	__be64 written_count;
390 	struct stats stats[];
391 };
392 
393 static_assert(sizeof(struct gve_stats_report) == 8);
394 
395 enum gve_stat_names {
396 	// stats from gve
397 	TX_WAKE_CNT			= 1,
398 	TX_STOP_CNT			= 2,
399 	TX_FRAMES_SENT			= 3,
400 	TX_BYTES_SENT			= 4,
401 	TX_LAST_COMPLETION_PROCESSED	= 5,
402 	RX_NEXT_EXPECTED_SEQUENCE	= 6,
403 	RX_BUFFERS_POSTED		= 7,
404 	TX_TIMEOUT_CNT			= 8,
405 	// stats from NIC
406 	RX_QUEUE_DROP_CNT		= 65,
407 	RX_NO_BUFFERS_POSTED		= 66,
408 	RX_DROPS_PACKET_OVER_MRU	= 67,
409 	RX_DROPS_INVALID_CHECKSUM	= 68,
410 };
411 
412 enum gve_l3_type {
413 	/* Must be zero so zero initialized LUT is unknown. */
414 	GVE_L3_TYPE_UNKNOWN = 0,
415 	GVE_L3_TYPE_OTHER,
416 	GVE_L3_TYPE_IPV4,
417 	GVE_L3_TYPE_IPV6,
418 };
419 
420 enum gve_l4_type {
421 	/* Must be zero so zero initialized LUT is unknown. */
422 	GVE_L4_TYPE_UNKNOWN = 0,
423 	GVE_L4_TYPE_OTHER,
424 	GVE_L4_TYPE_TCP,
425 	GVE_L4_TYPE_UDP,
426 	GVE_L4_TYPE_ICMP,
427 	GVE_L4_TYPE_SCTP,
428 };
429 
430 /* These are control path types for PTYPE which are the same as the data path
431  * types.
432  */
433 struct gve_ptype_entry {
434 	u8 l3_type;
435 	u8 l4_type;
436 };
437 
438 struct gve_ptype_map {
439 	struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
440 };
441 
442 struct gve_adminq_get_ptype_map {
443 	__be64 ptype_map_len;
444 	__be64 ptype_map_addr;
445 };
446 
447 /* Flow-steering related definitions */
448 enum gve_adminq_flow_rule_cfg_opcode {
449 	GVE_FLOW_RULE_CFG_ADD	= 0,
450 	GVE_FLOW_RULE_CFG_DEL	= 1,
451 	GVE_FLOW_RULE_CFG_RESET	= 2,
452 };
453 
454 enum gve_adminq_flow_rule_query_opcode {
455 	GVE_FLOW_RULE_QUERY_RULES	= 0,
456 	GVE_FLOW_RULE_QUERY_IDS		= 1,
457 	GVE_FLOW_RULE_QUERY_STATS	= 2,
458 };
459 
460 enum gve_adminq_flow_type {
461 	GVE_FLOW_TYPE_TCPV4,
462 	GVE_FLOW_TYPE_UDPV4,
463 	GVE_FLOW_TYPE_SCTPV4,
464 	GVE_FLOW_TYPE_AHV4,
465 	GVE_FLOW_TYPE_ESPV4,
466 	GVE_FLOW_TYPE_TCPV6,
467 	GVE_FLOW_TYPE_UDPV6,
468 	GVE_FLOW_TYPE_SCTPV6,
469 	GVE_FLOW_TYPE_AHV6,
470 	GVE_FLOW_TYPE_ESPV6,
471 };
472 
473 /* Flow-steering command */
474 struct gve_adminq_flow_rule {
475 	__be16 flow_type;
476 	__be16 action; /* RX queue id */
477 	struct gve_flow_spec key;
478 	struct gve_flow_spec mask;
479 };
480 
481 struct gve_adminq_configure_flow_rule {
482 	__be16 opcode;
483 	u8 padding[2];
484 	struct gve_adminq_flow_rule rule;
485 	__be32 location;
486 };
487 
488 static_assert(sizeof(struct gve_adminq_configure_flow_rule) == 92);
489 
490 struct gve_query_flow_rules_descriptor {
491 	__be32 num_flow_rules;
492 	__be32 max_flow_rules;
493 	__be32 num_queried_rules;
494 	__be32 total_length;
495 };
496 
497 struct gve_adminq_queried_flow_rule {
498 	__be32 location;
499 	struct gve_adminq_flow_rule flow_rule;
500 };
501 
502 struct gve_adminq_query_flow_rules {
503 	__be16 opcode;
504 	u8 padding[2];
505 	__be32 starting_rule_id;
506 	__be64 available_length; /* The dma memory length that the driver allocated */
507 	__be64 rule_descriptor_addr; /* The dma memory address */
508 };
509 
510 static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24);
511 
512 union gve_adminq_command {
513 	struct {
514 		__be32 opcode;
515 		__be32 status;
516 		union {
517 			struct gve_adminq_configure_device_resources
518 						configure_device_resources;
519 			struct gve_adminq_create_tx_queue create_tx_queue;
520 			struct gve_adminq_create_rx_queue create_rx_queue;
521 			struct gve_adminq_destroy_tx_queue destroy_tx_queue;
522 			struct gve_adminq_destroy_rx_queue destroy_rx_queue;
523 			struct gve_adminq_describe_device describe_device;
524 			struct gve_adminq_register_page_list reg_page_list;
525 			struct gve_adminq_unregister_page_list unreg_page_list;
526 			struct gve_adminq_set_driver_parameter set_driver_param;
527 			struct gve_adminq_report_stats report_stats;
528 			struct gve_adminq_report_link_speed report_link_speed;
529 			struct gve_adminq_get_ptype_map get_ptype_map;
530 			struct gve_adminq_verify_driver_compatibility
531 						verify_driver_compatibility;
532 			struct gve_adminq_query_flow_rules query_flow_rules;
533 			struct gve_adminq_extended_command extended_command;
534 		};
535 	};
536 	u8 reserved[64];
537 };
538 
539 static_assert(sizeof(union gve_adminq_command) == 64);
540 
541 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv);
542 void gve_adminq_free(struct device *dev, struct gve_priv *priv);
543 void gve_adminq_release(struct gve_priv *priv);
544 int gve_adminq_describe_device(struct gve_priv *priv);
545 int gve_adminq_configure_device_resources(struct gve_priv *priv,
546 					  dma_addr_t counter_array_bus_addr,
547 					  u32 num_counters,
548 					  dma_addr_t db_array_bus_addr,
549 					  u32 num_ntfy_blks);
550 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
551 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
552 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
553 int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index);
554 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
555 int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index);
556 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
557 int gve_adminq_register_page_list(struct gve_priv *priv,
558 				  struct gve_queue_page_list *qpl);
559 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
560 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
561 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
562 			    dma_addr_t stats_report_addr, u64 interval);
563 int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
564 					   u64 driver_info_len,
565 					   dma_addr_t driver_info_addr);
566 int gve_adminq_report_link_speed(struct gve_priv *priv);
567 int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc);
568 int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc);
569 int gve_adminq_reset_flow_rules(struct gve_priv *priv);
570 int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
571 
572 struct gve_ptype_lut;
573 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
574 				 struct gve_ptype_lut *ptype_lut);
575 
576 #endif /* _GVE_ADMINQ_H */
577