xref: /freebsd/sys/dev/gve/gve_adminq.c (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2023 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #include <sys/endian.h>
32 #include <sys/socket.h>
33 #include <sys/time.h>
34 
35 #include <net/ethernet.h>
36 #include <net/if.h>
37 #include <net/if_var.h>
38 
39 #include "gve.h"
40 #include "gve_adminq.h"
41 
42 #define GVE_ADMINQ_SLEEP_LEN_MS 20
43 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 10
44 #define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1
45 #define GVE_REG_ADMINQ_ADDR 16
46 #define ADMINQ_SLOTS (ADMINQ_SIZE / sizeof(struct gve_adminq_command))
47 
48 #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
49     "Expected: length=%d, feature_mask=%x.\n" \
50     "Actual: length=%d, feature_mask=%x.\n"
51 
52 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected." \
53     " Possible older version of guest driver.\n"
54 
55 static
56 void gve_parse_device_option(struct gve_priv *priv,
57     struct gve_device_descriptor *device_descriptor,
58     struct gve_device_option *option,
59     struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
60     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
61 {
62 	uint32_t req_feat_mask = be32toh(option->required_features_mask);
63 	uint16_t option_length = be16toh(option->option_length);
64 	uint16_t option_id = be16toh(option->option_id);
65 
66 	/*
67 	 * If the length or feature mask doesn't match, continue without
68 	 * enabling the feature.
69 	 */
70 	switch (option_id) {
71 	case GVE_DEV_OPT_ID_GQI_QPL:
72 		if (option_length < sizeof(**dev_op_gqi_qpl) ||
73 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
74 			device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
75 			    "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
76 			    GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
77 			    option_length, req_feat_mask);
78 			break;
79 		}
80 
81 		if (option_length > sizeof(**dev_op_gqi_qpl)) {
82 			device_printf(priv->dev, GVE_DEVICE_OPTION_TOO_BIG_FMT,
83 			    "GQI QPL");
84 		}
85 		*dev_op_gqi_qpl = (void *)(option + 1);
86 		break;
87 
88 	case GVE_DEV_OPT_ID_JUMBO_FRAMES:
89 		if (option_length < sizeof(**dev_op_jumbo_frames) ||
90 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
91 			device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
92 			    "Jumbo Frames", (int)sizeof(**dev_op_jumbo_frames),
93 			    GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
94 			    option_length, req_feat_mask);
95 			break;
96 		}
97 
98 		if (option_length > sizeof(**dev_op_jumbo_frames)) {
99 			device_printf(priv->dev,
100 			    GVE_DEVICE_OPTION_TOO_BIG_FMT, "Jumbo Frames");
101 		}
102 		*dev_op_jumbo_frames = (void *)(option + 1);
103 		break;
104 
105 	default:
106 		/*
107 		 * If we don't recognize the option just continue
108 		 * without doing anything.
109 		 */
110 		device_printf(priv->dev, "Unrecognized device option 0x%hx not enabled.\n",
111 		    option_id);
112 	}
113 }
114 
115 /* Process all device options for a given describe device call. */
116 static int
117 gve_process_device_options(struct gve_priv *priv,
118     struct gve_device_descriptor *descriptor,
119     struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
120     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
121 {
122 	char *desc_end = (char *)descriptor + be16toh(descriptor->total_length);
123 	const int num_options = be16toh(descriptor->num_device_options);
124 	struct gve_device_option *dev_opt;
125 	int i;
126 
127 	/* The options struct directly follows the device descriptor. */
128 	dev_opt = (void *)(descriptor + 1);
129 	for (i = 0; i < num_options; i++) {
130 		if ((char *)(dev_opt + 1) > desc_end ||
131 		    (char *)(dev_opt + 1) + be16toh(dev_opt->option_length) > desc_end) {
132 			device_printf(priv->dev,
133 			    "options exceed device_descriptor's total length.\n");
134 			return (EINVAL);
135 		}
136 
137 		gve_parse_device_option(priv, descriptor, dev_opt,
138 		    dev_op_gqi_qpl, dev_op_jumbo_frames);
139 		dev_opt = (void *)((char *)(dev_opt + 1) + be16toh(dev_opt->option_length));
140 	}
141 
142 	return (0);
143 }
144 
145 static int gve_adminq_execute_cmd(struct gve_priv *priv,
146     struct gve_adminq_command *cmd);
147 
148 static int
149 gve_adminq_destroy_tx_queue(struct gve_priv *priv, uint32_t id)
150 {
151 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
152 
153 	cmd.opcode = htobe32(GVE_ADMINQ_DESTROY_TX_QUEUE);
154 	cmd.destroy_tx_queue.queue_id = htobe32(id);
155 
156 	return (gve_adminq_execute_cmd(priv, &cmd));
157 }
158 
159 static int
160 gve_adminq_destroy_rx_queue(struct gve_priv *priv, uint32_t id)
161 {
162 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
163 
164 	cmd.opcode = htobe32(GVE_ADMINQ_DESTROY_RX_QUEUE);
165 	cmd.destroy_rx_queue.queue_id = htobe32(id);
166 
167 	return (gve_adminq_execute_cmd(priv, &cmd));
168 }
169 
170 int
171 gve_adminq_destroy_rx_queues(struct gve_priv *priv, uint32_t num_queues)
172 {
173 	int err;
174 	int i;
175 
176 	for (i = 0; i < num_queues; i++) {
177 		err = gve_adminq_destroy_rx_queue(priv, i);
178 		if (err != 0) {
179 			device_printf(priv->dev, "Failed to destroy rxq %d, err: %d\n",
180 			    i, err);
181 		}
182 	}
183 
184 	if (err != 0)
185 		return (err);
186 
187 	device_printf(priv->dev, "Destroyed %d rx queues\n", num_queues);
188 	return (0);
189 }
190 
191 int
192 gve_adminq_destroy_tx_queues(struct gve_priv *priv, uint32_t num_queues)
193 {
194 	int err;
195 	int i;
196 
197 	for (i = 0; i < num_queues; i++) {
198 		err = gve_adminq_destroy_tx_queue(priv, i);
199 		if (err != 0) {
200 			device_printf(priv->dev, "Failed to destroy txq %d, err: %d\n",
201 			    i, err);
202 		}
203 	}
204 
205 	if (err != 0)
206 		return (err);
207 
208 	device_printf(priv->dev, "Destroyed %d tx queues\n", num_queues);
209 	return (0);
210 }
211 
212 static int
213 gve_adminq_create_rx_queue(struct gve_priv *priv, uint32_t queue_index)
214 {
215 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
216 	struct gve_rx_ring *rx = &priv->rx[queue_index];
217 	struct gve_dma_handle *qres_dma = &rx->com.q_resources_mem;
218 
219 	bus_dmamap_sync(qres_dma->tag, qres_dma->map, BUS_DMASYNC_PREREAD);
220 
221 	cmd.opcode = htobe32(GVE_ADMINQ_CREATE_RX_QUEUE);
222 	cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
223 		.queue_id = htobe32(queue_index),
224 		.index = htobe32(queue_index),
225 		.ntfy_id = htobe32(rx->com.ntfy_id),
226 		.queue_resources_addr = htobe64(qres_dma->bus_addr),
227 		.rx_desc_ring_addr = htobe64(rx->desc_ring_mem.bus_addr),
228 		.rx_data_ring_addr = htobe64(rx->data_ring_mem.bus_addr),
229 		.queue_page_list_id = htobe32((rx->com.qpl)->id),
230 		.rx_ring_size = htobe16(priv->rx_desc_cnt),
231 		.packet_buffer_size = htobe16(GVE_DEFAULT_RX_BUFFER_SIZE),
232 	};
233 
234 	return (gve_adminq_execute_cmd(priv, &cmd));
235 }
236 
237 int
238 gve_adminq_create_rx_queues(struct gve_priv *priv, uint32_t num_queues)
239 {
240 	int err;
241 	int i;
242 
243 	for (i = 0; i < num_queues; i++) {
244 		err = gve_adminq_create_rx_queue(priv, i);
245 		if (err != 0) {
246 			device_printf(priv->dev, "Failed to create rxq %d, err: %d\n",
247 			    i, err);
248 			goto abort;
249 		}
250 	}
251 
252 	if (bootverbose)
253 		device_printf(priv->dev, "Created %d rx queues\n", num_queues);
254 	return (0);
255 
256 abort:
257 	gve_adminq_destroy_rx_queues(priv, i);
258 	return (err);
259 }
260 
261 static int
262 gve_adminq_create_tx_queue(struct gve_priv *priv, uint32_t queue_index)
263 {
264 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
265 	struct gve_tx_ring *tx = &priv->tx[queue_index];
266 	struct gve_dma_handle *qres_dma = &tx->com.q_resources_mem;
267 
268 	bus_dmamap_sync(qres_dma->tag, qres_dma->map, BUS_DMASYNC_PREREAD);
269 
270 	cmd.opcode = htobe32(GVE_ADMINQ_CREATE_TX_QUEUE);
271 	cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
272 		.queue_id = htobe32(queue_index),
273 		.queue_resources_addr = htobe64(qres_dma->bus_addr),
274 		.tx_ring_addr = htobe64(tx->desc_ring_mem.bus_addr),
275 		.queue_page_list_id = htobe32((tx->com.qpl)->id),
276 		.ntfy_id = htobe32(tx->com.ntfy_id),
277 		.tx_ring_size = htobe16(priv->tx_desc_cnt),
278 	};
279 
280 	return (gve_adminq_execute_cmd(priv, &cmd));
281 }
282 
283 int
284 gve_adminq_create_tx_queues(struct gve_priv *priv, uint32_t num_queues)
285 {
286 	int err;
287 	int i;
288 
289 	for (i = 0; i < num_queues; i++) {
290 		err = gve_adminq_create_tx_queue(priv, i);
291 		if (err != 0) {
292 			device_printf(priv->dev, "Failed to create txq %d, err: %d\n",
293 			    i, err);
294 			goto abort;
295 		}
296 	}
297 
298 	if (bootverbose)
299 		device_printf(priv->dev, "Created %d tx queues\n", num_queues);
300 	return (0);
301 
302 abort:
303 	gve_adminq_destroy_tx_queues(priv, i);
304 	return (err);
305 }
306 
307 int
308 gve_adminq_set_mtu(struct gve_priv *priv, uint32_t mtu) {
309 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
310 
311 	cmd.opcode = htobe32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
312 	cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
313 		.parameter_type = htobe32(GVE_SET_PARAM_MTU),
314 		.parameter_value = htobe64(mtu),
315 	};
316 
317 	return (gve_adminq_execute_cmd(priv, &cmd));
318 }
319 
320 static void
321 gve_enable_supported_features(struct gve_priv *priv,
322     uint32_t supported_features_mask,
323     const struct gve_device_option_jumbo_frames *dev_op_jumbo_frames)
324 {
325 	if (dev_op_jumbo_frames &&
326 	    (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
327 		if (bootverbose)
328 			device_printf(priv->dev, "JUMBO FRAMES device option enabled: %u.\n",
329 			    be16toh(dev_op_jumbo_frames->max_mtu));
330 		priv->max_mtu = be16toh(dev_op_jumbo_frames->max_mtu);
331 	}
332 }
333 
334 int
335 gve_adminq_describe_device(struct gve_priv *priv)
336 {
337 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
338 	struct gve_device_descriptor *desc;
339 	struct gve_dma_handle desc_mem;
340 	struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
341 	struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
342 	uint32_t supported_features_mask = 0;
343 	int rc;
344 	int i;
345 
346 	rc = gve_dma_alloc_coherent(priv, ADMINQ_SIZE, ADMINQ_SIZE, &desc_mem);
347 	if (rc != 0) {
348 		device_printf(priv->dev, "Failed to alloc DMA mem for DescribeDevice.\n");
349 		return (rc);
350 	}
351 
352 	desc = desc_mem.cpu_addr;
353 
354 	aq_cmd.opcode = htobe32(GVE_ADMINQ_DESCRIBE_DEVICE);
355 	aq_cmd.describe_device.device_descriptor_addr = htobe64(
356 	    desc_mem.bus_addr);
357 	aq_cmd.describe_device.device_descriptor_version = htobe32(
358 	    GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
359 	aq_cmd.describe_device.available_length = htobe32(ADMINQ_SIZE);
360 
361 	bus_dmamap_sync(desc_mem.tag, desc_mem.map, BUS_DMASYNC_PREWRITE);
362 
363 	rc = gve_adminq_execute_cmd(priv, &aq_cmd);
364 	if (rc != 0)
365 		goto free_device_descriptor;
366 
367 	bus_dmamap_sync(desc_mem.tag, desc_mem.map, BUS_DMASYNC_POSTREAD);
368 
369 	rc = gve_process_device_options(priv, desc, &dev_op_gqi_qpl,
370 	    &dev_op_jumbo_frames);
371 	if (rc != 0)
372 		goto free_device_descriptor;
373 
374 	if (dev_op_gqi_qpl != NULL) {
375 		priv->queue_format = GVE_GQI_QPL_FORMAT;
376 		supported_features_mask = be32toh(
377 		    dev_op_gqi_qpl->supported_features_mask);
378 		if (bootverbose)
379 			device_printf(priv->dev,
380 			    "Driver is running with GQI QPL queue format.\n");
381 	} else {
382 		device_printf(priv->dev, "No compatible queue formats\n");
383 		rc = (EINVAL);
384 		goto free_device_descriptor;
385 	}
386 
387         priv->num_event_counters = be16toh(desc->counters);
388 	priv->default_num_queues = be16toh(desc->default_num_queues);
389 	priv->tx_desc_cnt = be16toh(desc->tx_queue_entries);
390 	priv->rx_desc_cnt = be16toh(desc->rx_queue_entries);
391 	priv->rx_pages_per_qpl = be16toh(desc->rx_pages_per_qpl);
392 	priv->max_registered_pages = be64toh(desc->max_registered_pages);
393 	priv->max_mtu = be16toh(desc->mtu);
394 	priv->default_num_queues = be16toh(desc->default_num_queues);
395 	priv->supported_features =  supported_features_mask;
396 
397 	gve_enable_supported_features(priv, supported_features_mask,
398 	    dev_op_jumbo_frames);
399 
400 	for (i = 0; i < ETHER_ADDR_LEN; i++)
401 		priv->mac[i] = desc->mac[i];
402 
403 free_device_descriptor:
404 	gve_dma_free_coherent(&desc_mem);
405 
406 	return (rc);
407 }
408 
409 int
410 gve_adminq_register_page_list(struct gve_priv *priv,
411     struct gve_queue_page_list *qpl)
412 {
413 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
414 	uint32_t num_entries = qpl->num_pages;
415 	uint32_t size = num_entries * sizeof(qpl->dmas[0].bus_addr);
416 	__be64 *page_list;
417 	struct gve_dma_handle dma;
418 	int err;
419 	int i;
420 
421 	err = gve_dma_alloc_coherent(priv, size, PAGE_SIZE, &dma);
422 	if (err != 0)
423 		return (ENOMEM);
424 
425 	page_list = dma.cpu_addr;
426 
427 	for (i = 0; i < num_entries; i++)
428 		page_list[i] = htobe64(qpl->dmas[i].bus_addr);
429 
430 	bus_dmamap_sync(dma.tag, dma.map, BUS_DMASYNC_PREWRITE);
431 
432 	cmd.opcode = htobe32(GVE_ADMINQ_REGISTER_PAGE_LIST);
433 	cmd.reg_page_list = (struct gve_adminq_register_page_list) {
434 		.page_list_id = htobe32(qpl->id),
435 		.num_pages = htobe32(num_entries),
436 		.page_address_list_addr = htobe64(dma.bus_addr),
437 		.page_size = htobe64(PAGE_SIZE),
438 	};
439 
440 	err = gve_adminq_execute_cmd(priv, &cmd);
441 	gve_dma_free_coherent(&dma);
442 	return (err);
443 }
444 
445 int
446 gve_adminq_unregister_page_list(struct gve_priv *priv, uint32_t page_list_id)
447 {
448 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
449 
450 	cmd.opcode = htobe32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
451 	cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
452 		.page_list_id = htobe32(page_list_id),
453 	};
454 
455 	return (gve_adminq_execute_cmd(priv, &cmd));
456 }
457 
458 #define GVE_NTFY_BLK_BASE_MSIX_IDX	0
459 int
460 gve_adminq_configure_device_resources(struct gve_priv *priv)
461 {
462 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
463 
464 	bus_dmamap_sync(priv->irqs_db_mem.tag, priv->irqs_db_mem.map,
465 	    BUS_DMASYNC_PREREAD);
466 	bus_dmamap_sync(priv->counter_array_mem.tag,
467 	    priv->counter_array_mem.map, BUS_DMASYNC_PREREAD);
468 
469 	aq_cmd.opcode = htobe32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
470 	aq_cmd.configure_device_resources =
471 	    (struct gve_adminq_configure_device_resources) {
472 		.counter_array = htobe64(priv->counter_array_mem.bus_addr),
473 		.irq_db_addr = htobe64(priv->irqs_db_mem.bus_addr),
474 		.num_counters = htobe32(priv->num_event_counters),
475 		.num_irq_dbs = htobe32(priv->num_queues),
476 		.irq_db_stride = htobe32(sizeof(struct gve_irq_db)),
477 		.ntfy_blk_msix_base_idx = htobe32(GVE_NTFY_BLK_BASE_MSIX_IDX),
478 		.queue_format = priv->queue_format,
479 	};
480 
481 	return (gve_adminq_execute_cmd(priv, &aq_cmd));
482 }
483 
484 int
485 gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
486 {
487 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
488 
489 	aq_cmd.opcode = htobe32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
490 	return (gve_adminq_execute_cmd(priv, &aq_cmd));
491 }
492 
493 int
494 gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
495     uint64_t driver_info_len,
496     vm_paddr_t driver_info_addr)
497 {
498 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
499 
500 	aq_cmd.opcode = htobe32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY);
501 	aq_cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) {
502 		.driver_info_len = htobe64(driver_info_len),
503 		.driver_info_addr = htobe64(driver_info_addr),
504 	};
505 
506 	return (gve_adminq_execute_cmd(priv, &aq_cmd));
507 }
508 
509 int
510 gve_adminq_alloc(struct gve_priv *priv)
511 {
512 	int rc;
513 
514 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK))
515 		return (0);
516 
517 	if (priv->aq_mem.cpu_addr == NULL) {
518 		rc = gve_dma_alloc_coherent(priv, ADMINQ_SIZE, ADMINQ_SIZE,
519 		    &priv->aq_mem);
520 		if (rc != 0) {
521 			device_printf(priv->dev, "Failed to allocate admin queue mem\n");
522 			return (rc);
523 		}
524 	}
525 
526 	priv->adminq = priv->aq_mem.cpu_addr;
527 	priv->adminq_bus_addr = priv->aq_mem.bus_addr;
528 
529 	if (priv->adminq == NULL)
530 		return (ENOMEM);
531 
532 	priv->adminq_mask = ADMINQ_SLOTS - 1;
533 	priv->adminq_prod_cnt = 0;
534 	priv->adminq_cmd_fail = 0;
535 	priv->adminq_timeouts = 0;
536 	priv->adminq_describe_device_cnt = 0;
537 	priv->adminq_cfg_device_resources_cnt = 0;
538 	priv->adminq_register_page_list_cnt = 0;
539 	priv->adminq_unregister_page_list_cnt = 0;
540 	priv->adminq_create_tx_queue_cnt = 0;
541 	priv->adminq_create_rx_queue_cnt = 0;
542 	priv->adminq_destroy_tx_queue_cnt = 0;
543 	priv->adminq_destroy_rx_queue_cnt = 0;
544 	priv->adminq_dcfg_device_resources_cnt = 0;
545 	priv->adminq_set_driver_parameter_cnt = 0;
546 
547 	gve_reg_bar_write_4(priv, GVE_REG_ADMINQ_ADDR,
548 	    priv->adminq_bus_addr / ADMINQ_SIZE);
549 
550 	gve_set_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK);
551 	return (0);
552 }
553 
554 void
555 gve_release_adminq(struct gve_priv *priv)
556 {
557 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK))
558 		return;
559 
560 	gve_reg_bar_write_4(priv, GVE_REG_ADMINQ_ADDR, 0);
561 	while (gve_reg_bar_read_4(priv, GVE_REG_ADMINQ_ADDR)) {
562 		device_printf(priv->dev, "Waiting until admin queue is released.\n");
563 		pause("gve release adminq", GVE_ADMINQ_SLEEP_LEN_MS);
564 	}
565 
566 	gve_dma_free_coherent(&priv->aq_mem);
567 	priv->aq_mem = (struct gve_dma_handle){};
568 	priv->adminq = 0;
569 	priv->adminq_bus_addr = 0;
570 
571 	gve_clear_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK);
572 
573 	if (bootverbose)
574 		device_printf(priv->dev, "Admin queue released\n");
575 }
576 
577 static int
578 gve_adminq_parse_err(struct gve_priv *priv, uint32_t opcode, uint32_t status)
579 {
580 	if (status != GVE_ADMINQ_COMMAND_PASSED &&
581 	    status != GVE_ADMINQ_COMMAND_UNSET) {
582 		device_printf(priv->dev, "AQ command(%u): failed with status %d\n", opcode, status);
583 		priv->adminq_cmd_fail++;
584 	}
585 	switch (status) {
586 	case GVE_ADMINQ_COMMAND_PASSED:
587 		return (0);
588 
589 	case GVE_ADMINQ_COMMAND_UNSET:
590 		device_printf(priv->dev,
591 		    "AQ command(%u): err and status both unset, this should not be possible.\n",
592 		    opcode);
593 		return (EINVAL);
594 
595 	case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
596 	case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
597 	case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
598 	case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
599 	case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
600 		return (EAGAIN);
601 
602 	case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
603 	case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
604 	case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
605 	case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
606 	case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
607 	case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
608 		return (EINVAL);
609 
610 	case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
611 		return (ETIMEDOUT);
612 
613 	case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
614 	case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
615 		return (EACCES);
616 
617 	case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
618 		return (ENOMEM);
619 
620 	case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
621 		return (EOPNOTSUPP);
622 
623 	default:
624 		device_printf(priv->dev, "AQ command(%u): unknown status code %d\n",
625 		    opcode, status);
626 		return (EINVAL);
627 	}
628 }
629 
630 static void
631 gve_adminq_kick_cmd(struct gve_priv *priv, uint32_t prod_cnt)
632 {
633 	gve_reg_bar_write_4(priv, ADMINQ_DOORBELL, prod_cnt);
634 
635 }
636 
637 static bool
638 gve_adminq_wait_for_cmd(struct gve_priv *priv, uint32_t prod_cnt)
639 {
640 	int i;
641 
642 	for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
643 		if (gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER) == prod_cnt)
644 			return (true);
645 		pause("gve adminq cmd", GVE_ADMINQ_SLEEP_LEN_MS);
646 	}
647 
648 	return (false);
649 }
650 
651 /*
652  * Flushes all AQ commands currently queued and waits for them to complete.
653  * If there are failures, it will return the first error.
654  */
655 static int
656 gve_adminq_kick_and_wait(struct gve_priv *priv)
657 {
658 	struct gve_adminq_command *cmd;
659 	uint32_t status, err;
660 	uint32_t tail, head;
661 	uint32_t opcode;
662 	int i;
663 
664 	tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
665 	head = priv->adminq_prod_cnt;
666 
667 	gve_adminq_kick_cmd(priv, head);
668 	if (!gve_adminq_wait_for_cmd(priv, head)) {
669 		device_printf(priv->dev, "AQ commands timed out, need to reset AQ\n");
670 		priv->adminq_timeouts++;
671 		return (ENOTRECOVERABLE);
672 	}
673 	bus_dmamap_sync(
674 	    priv->aq_mem.tag, priv->aq_mem.map, BUS_DMASYNC_POSTREAD);
675 
676 	for (i = tail; i < head; i++) {
677 		cmd = &priv->adminq[i & priv->adminq_mask];
678 		status = be32toh(cmd->status);
679 		opcode = be32toh(cmd->opcode);
680 		err = gve_adminq_parse_err(priv, opcode, status);
681 		if (err != 0)
682 			return (err);
683 	}
684 
685 	return (0);
686 }
687 
688 /*
689  * This function is not threadsafe - the caller is responsible for any
690  * necessary locks.
691  */
692 static int
693 gve_adminq_issue_cmd(struct gve_priv *priv, struct gve_adminq_command *cmd_orig)
694 {
695 	struct gve_adminq_command *cmd;
696 	uint32_t opcode;
697 	uint32_t tail;
698 	int err;
699 
700 	tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
701 
702 	/* Check if next command will overflow the buffer. */
703 	if ((priv->adminq_prod_cnt - tail) > priv->adminq_mask) {
704 		/* Flush existing commands to make room. */
705 		err = gve_adminq_kick_and_wait(priv);
706 		if (err != 0)
707 			return (err);
708 
709 		/* Retry. */
710 		tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
711 		if ((priv->adminq_prod_cnt - tail) > priv->adminq_mask) {
712 			/*
713 			 * This should never happen. We just flushed the
714 			 * command queue so there should be enough space.
715                          */
716 			return (ENOMEM);
717 		}
718 	}
719 
720 	cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
721 	priv->adminq_prod_cnt++;
722 
723 	memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
724 
725 	bus_dmamap_sync(
726 	    priv->aq_mem.tag, priv->aq_mem.map, BUS_DMASYNC_PREWRITE);
727 
728 	opcode = be32toh(cmd->opcode);
729 
730 	switch (opcode) {
731 	case GVE_ADMINQ_DESCRIBE_DEVICE:
732 		priv->adminq_describe_device_cnt++;
733 		break;
734 
735 	case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
736 		priv->adminq_cfg_device_resources_cnt++;
737 		break;
738 
739 	case GVE_ADMINQ_REGISTER_PAGE_LIST:
740 		priv->adminq_register_page_list_cnt++;
741 		break;
742 
743 	case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
744 		priv->adminq_unregister_page_list_cnt++;
745 		break;
746 
747 	case GVE_ADMINQ_CREATE_TX_QUEUE:
748 		priv->adminq_create_tx_queue_cnt++;
749 		break;
750 
751 	case GVE_ADMINQ_CREATE_RX_QUEUE:
752 		priv->adminq_create_rx_queue_cnt++;
753 		break;
754 
755 	case GVE_ADMINQ_DESTROY_TX_QUEUE:
756 		priv->adminq_destroy_tx_queue_cnt++;
757 		break;
758 
759 	case GVE_ADMINQ_DESTROY_RX_QUEUE:
760 		priv->adminq_destroy_rx_queue_cnt++;
761 		break;
762 
763 	case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
764 		priv->adminq_dcfg_device_resources_cnt++;
765 		break;
766 
767 	case GVE_ADMINQ_SET_DRIVER_PARAMETER:
768 		priv->adminq_set_driver_parameter_cnt++;
769 		break;
770 
771 	case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
772 		priv->adminq_verify_driver_compatibility_cnt++;
773 		break;
774 
775 	default:
776 		device_printf(priv->dev, "Unknown AQ command opcode %d\n", opcode);
777 	}
778 
779 	return (0);
780 }
781 
782 /*
783  * This function is not threadsafe - the caller is responsible for any
784  * necessary locks.
785  * The caller is also responsible for making sure there are no commands
786  * waiting to be executed.
787  */
788 static int
789 gve_adminq_execute_cmd(struct gve_priv *priv, struct gve_adminq_command *cmd_orig)
790 {
791 	uint32_t tail, head;
792 	int err;
793 
794 	tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
795 	head = priv->adminq_prod_cnt;
796 
797 	if (tail != head)
798 		return (EINVAL);
799 	err = gve_adminq_issue_cmd(priv, cmd_orig);
800 	if (err != 0)
801 		return (err);
802 	return (gve_adminq_kick_and_wait(priv));
803 }
804