xref: /linux/drivers/net/ethernet/google/gve/gve_adminq.h (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #ifndef _GVE_ADMINQ_H
8 #define _GVE_ADMINQ_H
9 
10 #include <linux/build_bug.h>
11 
12 /* Admin queue opcodes */
13 enum gve_adminq_opcodes {
14 	GVE_ADMINQ_DESCRIBE_DEVICE		= 0x1,
15 	GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES	= 0x2,
16 	GVE_ADMINQ_REGISTER_PAGE_LIST		= 0x3,
17 	GVE_ADMINQ_UNREGISTER_PAGE_LIST		= 0x4,
18 	GVE_ADMINQ_CREATE_TX_QUEUE		= 0x5,
19 	GVE_ADMINQ_CREATE_RX_QUEUE		= 0x6,
20 	GVE_ADMINQ_DESTROY_TX_QUEUE		= 0x7,
21 	GVE_ADMINQ_DESTROY_RX_QUEUE		= 0x8,
22 	GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES	= 0x9,
23 	GVE_ADMINQ_SET_DRIVER_PARAMETER		= 0xB,
24 	GVE_ADMINQ_REPORT_STATS			= 0xC,
25 	GVE_ADMINQ_REPORT_LINK_SPEED		= 0xD,
26 	GVE_ADMINQ_GET_PTYPE_MAP		= 0xE,
27 	GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY	= 0xF,
28 };
29 
30 /* Admin queue status codes */
31 enum gve_adminq_statuses {
32 	GVE_ADMINQ_COMMAND_UNSET			= 0x0,
33 	GVE_ADMINQ_COMMAND_PASSED			= 0x1,
34 	GVE_ADMINQ_COMMAND_ERROR_ABORTED		= 0xFFFFFFF0,
35 	GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS		= 0xFFFFFFF1,
36 	GVE_ADMINQ_COMMAND_ERROR_CANCELLED		= 0xFFFFFFF2,
37 	GVE_ADMINQ_COMMAND_ERROR_DATALOSS		= 0xFFFFFFF3,
38 	GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED	= 0xFFFFFFF4,
39 	GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION	= 0xFFFFFFF5,
40 	GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR		= 0xFFFFFFF6,
41 	GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT	= 0xFFFFFFF7,
42 	GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND		= 0xFFFFFFF8,
43 	GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE		= 0xFFFFFFF9,
44 	GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED	= 0xFFFFFFFA,
45 	GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED	= 0xFFFFFFFB,
46 	GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED	= 0xFFFFFFFC,
47 	GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE		= 0xFFFFFFFD,
48 	GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED		= 0xFFFFFFFE,
49 	GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR		= 0xFFFFFFFF,
50 };
51 
52 #define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1
53 
54 /* All AdminQ command structs should be naturally packed. The static_assert
55  * calls make sure this is the case at compile time.
56  */
57 
58 struct gve_adminq_describe_device {
59 	__be64 device_descriptor_addr;
60 	__be32 device_descriptor_version;
61 	__be32 available_length;
62 };
63 
64 static_assert(sizeof(struct gve_adminq_describe_device) == 16);
65 
66 struct gve_device_descriptor {
67 	__be64 max_registered_pages;
68 	__be16 reserved1;
69 	__be16 tx_queue_entries;
70 	__be16 rx_queue_entries;
71 	__be16 default_num_queues;
72 	__be16 mtu;
73 	__be16 counters;
74 	__be16 tx_pages_per_qpl;
75 	__be16 rx_pages_per_qpl;
76 	u8  mac[ETH_ALEN];
77 	__be16 num_device_options;
78 	__be16 total_length;
79 	u8  reserved2[6];
80 };
81 
82 static_assert(sizeof(struct gve_device_descriptor) == 40);
83 
84 struct gve_device_option {
85 	__be16 option_id;
86 	__be16 option_length;
87 	__be32 required_features_mask;
88 };
89 
90 static_assert(sizeof(struct gve_device_option) == 8);
91 
92 struct gve_device_option_gqi_rda {
93 	__be32 supported_features_mask;
94 };
95 
96 static_assert(sizeof(struct gve_device_option_gqi_rda) == 4);
97 
98 struct gve_device_option_gqi_qpl {
99 	__be32 supported_features_mask;
100 };
101 
102 static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
103 
104 struct gve_device_option_dqo_rda {
105 	__be32 supported_features_mask;
106 	__be32 reserved;
107 };
108 
109 static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
110 
111 struct gve_device_option_dqo_qpl {
112 	__be32 supported_features_mask;
113 	__be16 tx_pages_per_qpl;
114 	__be16 rx_pages_per_qpl;
115 };
116 
117 static_assert(sizeof(struct gve_device_option_dqo_qpl) == 8);
118 
119 struct gve_device_option_jumbo_frames {
120 	__be32 supported_features_mask;
121 	__be16 max_mtu;
122 	u8 padding[2];
123 };
124 
125 static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
126 
127 struct gve_device_option_buffer_sizes {
128 	/* GVE_SUP_BUFFER_SIZES_MASK bit should be set */
129 	__be32 supported_features_mask;
130 	__be16 packet_buffer_size;
131 	__be16 header_buffer_size;
132 };
133 
134 static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
135 
136 struct gve_device_option_modify_ring {
137 	__be32 supported_featured_mask;
138 	__be16 max_rx_ring_size;
139 	__be16 max_tx_ring_size;
140 	__be16 min_rx_ring_size;
141 	__be16 min_tx_ring_size;
142 };
143 
144 static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
145 
146 /* Terminology:
147  *
148  * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
149  *       mapped and read/updated by the device.
150  *
151  * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with
152  *       the device for read/write and data is copied from/to SKBs.
153  */
154 enum gve_dev_opt_id {
155 	GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING	= 0x1,
156 	GVE_DEV_OPT_ID_GQI_RDA			= 0x2,
157 	GVE_DEV_OPT_ID_GQI_QPL			= 0x3,
158 	GVE_DEV_OPT_ID_DQO_RDA			= 0x4,
159 	GVE_DEV_OPT_ID_MODIFY_RING		= 0x6,
160 	GVE_DEV_OPT_ID_DQO_QPL			= 0x7,
161 	GVE_DEV_OPT_ID_JUMBO_FRAMES		= 0x8,
162 	GVE_DEV_OPT_ID_BUFFER_SIZES		= 0xa,
163 };
164 
165 enum gve_dev_opt_req_feat_mask {
166 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING	= 0x0,
167 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA		= 0x0,
168 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL		= 0x0,
169 	GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA		= 0x0,
170 	GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES		= 0x0,
171 	GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL		= 0x0,
172 	GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES		= 0x0,
173 	GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING		= 0x0,
174 };
175 
176 enum gve_sup_feature_mask {
177 	GVE_SUP_MODIFY_RING_MASK	= 1 << 0,
178 	GVE_SUP_JUMBO_FRAMES_MASK	= 1 << 2,
179 	GVE_SUP_BUFFER_SIZES_MASK	= 1 << 4,
180 };
181 
182 #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
183 
184 #define GVE_VERSION_STR_LEN 128
185 
186 enum gve_driver_capbility {
187 	gve_driver_capability_gqi_qpl = 0,
188 	gve_driver_capability_gqi_rda = 1,
189 	gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
190 	gve_driver_capability_dqo_rda = 3,
191 	gve_driver_capability_alt_miss_compl = 4,
192 	gve_driver_capability_flexible_buffer_size = 5,
193 };
194 
195 #define GVE_CAP1(a) BIT((int)a)
196 #define GVE_CAP2(a) BIT(((int)a) - 64)
197 #define GVE_CAP3(a) BIT(((int)a) - 128)
198 #define GVE_CAP4(a) BIT(((int)a) - 192)
199 
200 #define GVE_DRIVER_CAPABILITY_FLAGS1 \
201 	(GVE_CAP1(gve_driver_capability_gqi_qpl) | \
202 	 GVE_CAP1(gve_driver_capability_gqi_rda) | \
203 	 GVE_CAP1(gve_driver_capability_dqo_rda) | \
204 	 GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
205 	 GVE_CAP1(gve_driver_capability_flexible_buffer_size))
206 
207 #define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
208 #define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
209 #define GVE_DRIVER_CAPABILITY_FLAGS4 0x0
210 
211 struct gve_driver_info {
212 	u8 os_type;	/* 0x01 = Linux */
213 	u8 driver_major;
214 	u8 driver_minor;
215 	u8 driver_sub;
216 	__be32 os_version_major;
217 	__be32 os_version_minor;
218 	__be32 os_version_sub;
219 	__be64 driver_capability_flags[4];
220 	u8 os_version_str1[GVE_VERSION_STR_LEN];
221 	u8 os_version_str2[GVE_VERSION_STR_LEN];
222 };
223 
224 struct gve_adminq_verify_driver_compatibility {
225 	__be64 driver_info_len;
226 	__be64 driver_info_addr;
227 };
228 
229 static_assert(sizeof(struct gve_adminq_verify_driver_compatibility) == 16);
230 
231 struct gve_adminq_configure_device_resources {
232 	__be64 counter_array;
233 	__be64 irq_db_addr;
234 	__be32 num_counters;
235 	__be32 num_irq_dbs;
236 	__be32 irq_db_stride;
237 	__be32 ntfy_blk_msix_base_idx;
238 	u8 queue_format;
239 	u8 padding[7];
240 };
241 
242 static_assert(sizeof(struct gve_adminq_configure_device_resources) == 40);
243 
244 struct gve_adminq_register_page_list {
245 	__be32 page_list_id;
246 	__be32 num_pages;
247 	__be64 page_address_list_addr;
248 	__be64 page_size;
249 };
250 
251 static_assert(sizeof(struct gve_adminq_register_page_list) == 24);
252 
253 struct gve_adminq_unregister_page_list {
254 	__be32 page_list_id;
255 };
256 
257 static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4);
258 
259 #define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF
260 
261 struct gve_adminq_create_tx_queue {
262 	__be32 queue_id;
263 	__be32 reserved;
264 	__be64 queue_resources_addr;
265 	__be64 tx_ring_addr;
266 	__be32 queue_page_list_id;
267 	__be32 ntfy_id;
268 	__be64 tx_comp_ring_addr;
269 	__be16 tx_ring_size;
270 	__be16 tx_comp_ring_size;
271 	u8 padding[4];
272 };
273 
274 static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48);
275 
276 struct gve_adminq_create_rx_queue {
277 	__be32 queue_id;
278 	__be32 index;
279 	__be32 reserved;
280 	__be32 ntfy_id;
281 	__be64 queue_resources_addr;
282 	__be64 rx_desc_ring_addr;
283 	__be64 rx_data_ring_addr;
284 	__be32 queue_page_list_id;
285 	__be16 rx_ring_size;
286 	__be16 packet_buffer_size;
287 	__be16 rx_buff_ring_size;
288 	u8 enable_rsc;
289 	u8 padding1;
290 	__be16 header_buffer_size;
291 	u8 padding2[2];
292 };
293 
294 static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
295 
296 /* Queue resources that are shared with the device */
297 struct gve_queue_resources {
298 	union {
299 		struct {
300 			__be32 db_index;	/* Device -> Guest */
301 			__be32 counter_index;	/* Device -> Guest */
302 		};
303 		u8 reserved[64];
304 	};
305 };
306 
307 static_assert(sizeof(struct gve_queue_resources) == 64);
308 
309 struct gve_adminq_destroy_tx_queue {
310 	__be32 queue_id;
311 };
312 
313 static_assert(sizeof(struct gve_adminq_destroy_tx_queue) == 4);
314 
315 struct gve_adminq_destroy_rx_queue {
316 	__be32 queue_id;
317 };
318 
319 static_assert(sizeof(struct gve_adminq_destroy_rx_queue) == 4);
320 
321 /* GVE Set Driver Parameter Types */
322 enum gve_set_driver_param_types {
323 	GVE_SET_PARAM_MTU	= 0x1,
324 };
325 
326 struct gve_adminq_set_driver_parameter {
327 	__be32 parameter_type;
328 	u8 reserved[4];
329 	__be64 parameter_value;
330 };
331 
332 static_assert(sizeof(struct gve_adminq_set_driver_parameter) == 16);
333 
334 struct gve_adminq_report_stats {
335 	__be64 stats_report_len;
336 	__be64 stats_report_addr;
337 	__be64 interval;
338 };
339 
340 static_assert(sizeof(struct gve_adminq_report_stats) == 24);
341 
342 struct gve_adminq_report_link_speed {
343 	__be64 link_speed_address;
344 };
345 
346 static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
347 
348 struct stats {
349 	__be32 stat_name;
350 	__be32 queue_id;
351 	__be64 value;
352 };
353 
354 static_assert(sizeof(struct stats) == 16);
355 
356 struct gve_stats_report {
357 	__be64 written_count;
358 	struct stats stats[];
359 };
360 
361 static_assert(sizeof(struct gve_stats_report) == 8);
362 
363 enum gve_stat_names {
364 	// stats from gve
365 	TX_WAKE_CNT			= 1,
366 	TX_STOP_CNT			= 2,
367 	TX_FRAMES_SENT			= 3,
368 	TX_BYTES_SENT			= 4,
369 	TX_LAST_COMPLETION_PROCESSED	= 5,
370 	RX_NEXT_EXPECTED_SEQUENCE	= 6,
371 	RX_BUFFERS_POSTED		= 7,
372 	TX_TIMEOUT_CNT			= 8,
373 	// stats from NIC
374 	RX_QUEUE_DROP_CNT		= 65,
375 	RX_NO_BUFFERS_POSTED		= 66,
376 	RX_DROPS_PACKET_OVER_MRU	= 67,
377 	RX_DROPS_INVALID_CHECKSUM	= 68,
378 };
379 
380 enum gve_l3_type {
381 	/* Must be zero so zero initialized LUT is unknown. */
382 	GVE_L3_TYPE_UNKNOWN = 0,
383 	GVE_L3_TYPE_OTHER,
384 	GVE_L3_TYPE_IPV4,
385 	GVE_L3_TYPE_IPV6,
386 };
387 
388 enum gve_l4_type {
389 	/* Must be zero so zero initialized LUT is unknown. */
390 	GVE_L4_TYPE_UNKNOWN = 0,
391 	GVE_L4_TYPE_OTHER,
392 	GVE_L4_TYPE_TCP,
393 	GVE_L4_TYPE_UDP,
394 	GVE_L4_TYPE_ICMP,
395 	GVE_L4_TYPE_SCTP,
396 };
397 
398 /* These are control path types for PTYPE which are the same as the data path
399  * types.
400  */
401 struct gve_ptype_entry {
402 	u8 l3_type;
403 	u8 l4_type;
404 };
405 
406 struct gve_ptype_map {
407 	struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
408 };
409 
410 struct gve_adminq_get_ptype_map {
411 	__be64 ptype_map_len;
412 	__be64 ptype_map_addr;
413 };
414 
415 union gve_adminq_command {
416 	struct {
417 		__be32 opcode;
418 		__be32 status;
419 		union {
420 			struct gve_adminq_configure_device_resources
421 						configure_device_resources;
422 			struct gve_adminq_create_tx_queue create_tx_queue;
423 			struct gve_adminq_create_rx_queue create_rx_queue;
424 			struct gve_adminq_destroy_tx_queue destroy_tx_queue;
425 			struct gve_adminq_destroy_rx_queue destroy_rx_queue;
426 			struct gve_adminq_describe_device describe_device;
427 			struct gve_adminq_register_page_list reg_page_list;
428 			struct gve_adminq_unregister_page_list unreg_page_list;
429 			struct gve_adminq_set_driver_parameter set_driver_param;
430 			struct gve_adminq_report_stats report_stats;
431 			struct gve_adminq_report_link_speed report_link_speed;
432 			struct gve_adminq_get_ptype_map get_ptype_map;
433 			struct gve_adminq_verify_driver_compatibility
434 						verify_driver_compatibility;
435 		};
436 	};
437 	u8 reserved[64];
438 };
439 
440 static_assert(sizeof(union gve_adminq_command) == 64);
441 
442 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv);
443 void gve_adminq_free(struct device *dev, struct gve_priv *priv);
444 void gve_adminq_release(struct gve_priv *priv);
445 int gve_adminq_describe_device(struct gve_priv *priv);
446 int gve_adminq_configure_device_resources(struct gve_priv *priv,
447 					  dma_addr_t counter_array_bus_addr,
448 					  u32 num_counters,
449 					  dma_addr_t db_array_bus_addr,
450 					  u32 num_ntfy_blks);
451 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
452 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
453 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
454 int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index);
455 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
456 int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index);
457 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
458 int gve_adminq_register_page_list(struct gve_priv *priv,
459 				  struct gve_queue_page_list *qpl);
460 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
461 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
462 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
463 			    dma_addr_t stats_report_addr, u64 interval);
464 int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
465 					   u64 driver_info_len,
466 					   dma_addr_t driver_info_addr);
467 int gve_adminq_report_link_speed(struct gve_priv *priv);
468 
469 struct gve_ptype_lut;
470 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
471 				 struct gve_ptype_lut *ptype_lut);
472 
473 #endif /* _GVE_ADMINQ_H */
474