xref: /freebsd/sys/dev/gve/gve_adminq.c (revision 71702df6126226b31dc3ec66459388e32b993be1)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2023-2024 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #include <sys/endian.h>
32 #include <sys/socket.h>
33 #include <sys/time.h>
34 
35 #include <net/ethernet.h>
36 #include <net/if.h>
37 #include <net/if_var.h>
38 
39 #include "gve.h"
40 #include "gve_adminq.h"
41 
42 #define GVE_ADMINQ_SLEEP_LEN_MS 20
43 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 10
44 #define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1
45 #define GVE_REG_ADMINQ_ADDR 16
46 #define ADMINQ_SLOTS (ADMINQ_SIZE / sizeof(struct gve_adminq_command))
47 
48 #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
49     "Expected: length=%d, feature_mask=%x.\n" \
50     "Actual: length=%d, feature_mask=%x.\n"
51 
52 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected." \
53     " Possible older version of guest driver.\n"
54 
55 static
gve_parse_device_option(struct gve_priv * priv,struct gve_device_descriptor * device_descriptor,struct gve_device_option * option,struct gve_device_option_gqi_qpl ** dev_op_gqi_qpl,struct gve_device_option_dqo_rda ** dev_op_dqo_rda,struct gve_device_option_dqo_qpl ** dev_op_dqo_qpl,struct gve_device_option_modify_ring ** dev_op_modify_ring,struct gve_device_option_jumbo_frames ** dev_op_jumbo_frames)56 void gve_parse_device_option(struct gve_priv *priv,
57     struct gve_device_descriptor *device_descriptor,
58     struct gve_device_option *option,
59     struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
60     struct gve_device_option_dqo_rda **dev_op_dqo_rda,
61     struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
62     struct gve_device_option_modify_ring **dev_op_modify_ring,
63     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
64 {
65 	uint32_t req_feat_mask = be32toh(option->required_features_mask);
66 	uint16_t option_length = be16toh(option->option_length);
67 	uint16_t option_id = be16toh(option->option_id);
68 
69 	/*
70 	 * If the length or feature mask doesn't match, continue without
71 	 * enabling the feature.
72 	 */
73 	switch (option_id) {
74 	case GVE_DEV_OPT_ID_GQI_QPL:
75 		if (option_length < sizeof(**dev_op_gqi_qpl) ||
76 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
77 			device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
78 			    "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
79 			    GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
80 			    option_length, req_feat_mask);
81 			break;
82 		}
83 
84 		if (option_length > sizeof(**dev_op_gqi_qpl)) {
85 			device_printf(priv->dev, GVE_DEVICE_OPTION_TOO_BIG_FMT,
86 			    "GQI QPL");
87 		}
88 		*dev_op_gqi_qpl = (void *)(option + 1);
89 		break;
90 
91 	case GVE_DEV_OPT_ID_DQO_RDA:
92 		if (option_length < sizeof(**dev_op_dqo_rda) ||
93 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
94 			device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
95 			    "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
96 			    GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
97 			    option_length, req_feat_mask);
98 			break;
99 		}
100 
101 		if (option_length > sizeof(**dev_op_dqo_rda)) {
102 			device_printf(priv->dev, GVE_DEVICE_OPTION_TOO_BIG_FMT,
103 			    "DQO RDA");
104 		}
105 		*dev_op_dqo_rda = (void *)(option + 1);
106 		break;
107 
108 	case GVE_DEV_OPT_ID_DQO_QPL:
109 		if (option_length < sizeof(**dev_op_dqo_qpl) ||
110 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) {
111 			device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
112 			    "DQO QPL", (int)sizeof(**dev_op_dqo_qpl),
113 			    GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL,
114 			    option_length, req_feat_mask);
115 			break;
116 		}
117 
118 		if (option_length > sizeof(**dev_op_dqo_qpl)) {
119 			device_printf(priv->dev, GVE_DEVICE_OPTION_TOO_BIG_FMT,
120 			    "DQO QPL");
121 		}
122 		*dev_op_dqo_qpl = (void *)(option + 1);
123 		break;
124 
125 	case GVE_DEV_OPT_ID_MODIFY_RING:
126 		if (option_length < (sizeof(**dev_op_modify_ring) -
127 		    sizeof(struct gve_ring_size_bound)) ||
128 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
129 			device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
130 			    "Modify Ring", (int)sizeof(**dev_op_modify_ring),
131 			    GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING,
132 			    option_length, req_feat_mask);
133 			break;
134 		}
135 
136 		if (option_length > sizeof(**dev_op_modify_ring)) {
137 			device_printf(priv->dev, GVE_DEVICE_OPTION_TOO_BIG_FMT,
138 			    "Modify Ring");
139 		}
140 		*dev_op_modify_ring = (void *)(option + 1);
141 
142 		/* Min ring size included; set the minimum ring size. */
143 		if (option_length == sizeof(**dev_op_modify_ring)) {
144 			priv->min_rx_desc_cnt = max(
145 			    be16toh((*dev_op_modify_ring)->min_ring_size.rx),
146 			    GVE_DEFAULT_MIN_RX_RING_SIZE);
147 			priv->min_tx_desc_cnt = max(
148 			    be16toh((*dev_op_modify_ring)->min_ring_size.tx),
149 			    GVE_DEFAULT_MIN_TX_RING_SIZE);
150 		}
151 		break;
152 
153 	case GVE_DEV_OPT_ID_JUMBO_FRAMES:
154 		if (option_length < sizeof(**dev_op_jumbo_frames) ||
155 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
156 			device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
157 			    "Jumbo Frames", (int)sizeof(**dev_op_jumbo_frames),
158 			    GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
159 			    option_length, req_feat_mask);
160 			break;
161 		}
162 
163 		if (option_length > sizeof(**dev_op_jumbo_frames)) {
164 			device_printf(priv->dev,
165 			    GVE_DEVICE_OPTION_TOO_BIG_FMT, "Jumbo Frames");
166 		}
167 		*dev_op_jumbo_frames = (void *)(option + 1);
168 		break;
169 
170 	default:
171 		/*
172 		 * If we don't recognize the option just continue
173 		 * without doing anything.
174 		 */
175 		device_printf(priv->dev, "Unrecognized device option 0x%hx not enabled.\n",
176 		    option_id);
177 	}
178 }
179 
180 /* Process all device options for a given describe device call. */
181 static int
gve_process_device_options(struct gve_priv * priv,struct gve_device_descriptor * descriptor,struct gve_device_option_gqi_qpl ** dev_op_gqi_qpl,struct gve_device_option_dqo_rda ** dev_op_dqo_rda,struct gve_device_option_dqo_qpl ** dev_op_dqo_qpl,struct gve_device_option_modify_ring ** dev_op_modify_ring,struct gve_device_option_jumbo_frames ** dev_op_jumbo_frames)182 gve_process_device_options(struct gve_priv *priv,
183     struct gve_device_descriptor *descriptor,
184     struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
185     struct gve_device_option_dqo_rda **dev_op_dqo_rda,
186     struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
187     struct gve_device_option_modify_ring **dev_op_modify_ring,
188     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
189 {
190 	char *desc_end = (char *)descriptor + be16toh(descriptor->total_length);
191 	const int num_options = be16toh(descriptor->num_device_options);
192 	struct gve_device_option *dev_opt;
193 	int i;
194 
195 	/* The options struct directly follows the device descriptor. */
196 	dev_opt = (void *)(descriptor + 1);
197 	for (i = 0; i < num_options; i++) {
198 		if ((char *)(dev_opt + 1) > desc_end ||
199 		    (char *)(dev_opt + 1) + be16toh(dev_opt->option_length) > desc_end) {
200 			device_printf(priv->dev,
201 			    "options exceed device descriptor's total length.\n");
202 			return (EINVAL);
203 		}
204 
205 		gve_parse_device_option(priv, descriptor, dev_opt,
206 		    dev_op_gqi_qpl,
207 		    dev_op_dqo_rda,
208 		    dev_op_dqo_qpl,
209 		    dev_op_modify_ring,
210 		    dev_op_jumbo_frames);
211 		dev_opt = (void *)((char *)(dev_opt + 1) + be16toh(dev_opt->option_length));
212 	}
213 
214 	return (0);
215 }
216 
217 static int gve_adminq_execute_cmd(struct gve_priv *priv,
218     struct gve_adminq_command *cmd);
219 
220 static int
gve_adminq_destroy_tx_queue(struct gve_priv * priv,uint32_t id)221 gve_adminq_destroy_tx_queue(struct gve_priv *priv, uint32_t id)
222 {
223 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
224 
225 	cmd.opcode = htobe32(GVE_ADMINQ_DESTROY_TX_QUEUE);
226 	cmd.destroy_tx_queue.queue_id = htobe32(id);
227 
228 	return (gve_adminq_execute_cmd(priv, &cmd));
229 }
230 
231 static int
gve_adminq_destroy_rx_queue(struct gve_priv * priv,uint32_t id)232 gve_adminq_destroy_rx_queue(struct gve_priv *priv, uint32_t id)
233 {
234 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
235 
236 	cmd.opcode = htobe32(GVE_ADMINQ_DESTROY_RX_QUEUE);
237 	cmd.destroy_rx_queue.queue_id = htobe32(id);
238 
239 	return (gve_adminq_execute_cmd(priv, &cmd));
240 }
241 
242 int
gve_adminq_destroy_rx_queues(struct gve_priv * priv,uint32_t num_queues)243 gve_adminq_destroy_rx_queues(struct gve_priv *priv, uint32_t num_queues)
244 {
245 	int err;
246 	int i;
247 
248 	for (i = 0; i < num_queues; i++) {
249 		err = gve_adminq_destroy_rx_queue(priv, i);
250 		if (err != 0) {
251 			device_printf(priv->dev, "Failed to destroy rxq %d, err: %d\n",
252 			    i, err);
253 		}
254 	}
255 
256 	if (err != 0)
257 		return (err);
258 
259 	device_printf(priv->dev, "Destroyed %d rx queues\n", num_queues);
260 	return (0);
261 }
262 
263 int
gve_adminq_destroy_tx_queues(struct gve_priv * priv,uint32_t num_queues)264 gve_adminq_destroy_tx_queues(struct gve_priv *priv, uint32_t num_queues)
265 {
266 	int err;
267 	int i;
268 
269 	for (i = 0; i < num_queues; i++) {
270 		err = gve_adminq_destroy_tx_queue(priv, i);
271 		if (err != 0) {
272 			device_printf(priv->dev, "Failed to destroy txq %d, err: %d\n",
273 			    i, err);
274 		}
275 	}
276 
277 	if (err != 0)
278 		return (err);
279 
280 	device_printf(priv->dev, "Destroyed %d tx queues\n", num_queues);
281 	return (0);
282 }
283 
284 static int
gve_adminq_create_rx_queue(struct gve_priv * priv,uint32_t queue_index)285 gve_adminq_create_rx_queue(struct gve_priv *priv, uint32_t queue_index)
286 {
287 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
288 	struct gve_rx_ring *rx = &priv->rx[queue_index];
289 	struct gve_dma_handle *qres_dma = &rx->com.q_resources_mem;
290 
291 	bus_dmamap_sync(qres_dma->tag, qres_dma->map, BUS_DMASYNC_PREREAD);
292 
293 	cmd.opcode = htobe32(GVE_ADMINQ_CREATE_RX_QUEUE);
294 	cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
295 		.queue_id = htobe32(queue_index),
296 		.ntfy_id = htobe32(rx->com.ntfy_id),
297 		.queue_resources_addr = htobe64(qres_dma->bus_addr),
298 		.rx_ring_size = htobe16(priv->rx_desc_cnt),
299 	};
300 
301 	if (gve_is_gqi(priv)) {
302 		cmd.create_rx_queue.rx_desc_ring_addr =
303 		    htobe64(rx->desc_ring_mem.bus_addr);
304 		cmd.create_rx_queue.rx_data_ring_addr =
305 		    htobe64(rx->data_ring_mem.bus_addr);
306 		cmd.create_rx_queue.index =
307 		    htobe32(queue_index);
308 		cmd.create_rx_queue.queue_page_list_id =
309 		    htobe32((rx->com.qpl)->id);
310 		cmd.create_rx_queue.packet_buffer_size =
311 		    htobe16(GVE_DEFAULT_RX_BUFFER_SIZE);
312 	} else {
313 		cmd.create_rx_queue.queue_page_list_id =
314 		    htobe32(GVE_RAW_ADDRESSING_QPL_ID);
315 		cmd.create_rx_queue.rx_desc_ring_addr =
316 		    htobe64(rx->dqo.compl_ring_mem.bus_addr);
317 		cmd.create_rx_queue.rx_data_ring_addr =
318 		    htobe64(rx->desc_ring_mem.bus_addr);
319 		cmd.create_rx_queue.rx_buff_ring_size =
320 		    htobe16(priv->rx_desc_cnt);
321 		cmd.create_rx_queue.enable_rsc =
322 		    !!((if_getcapenable(priv->ifp) & IFCAP_LRO) &&
323 			!gve_disable_hw_lro);
324 		cmd.create_rx_queue.packet_buffer_size =
325 		    htobe16(priv->rx_buf_size_dqo);
326 	}
327 
328 	return (gve_adminq_execute_cmd(priv, &cmd));
329 }
330 
331 int
gve_adminq_create_rx_queues(struct gve_priv * priv,uint32_t num_queues)332 gve_adminq_create_rx_queues(struct gve_priv *priv, uint32_t num_queues)
333 {
334 	int err;
335 	int i;
336 
337 	for (i = 0; i < num_queues; i++) {
338 		err = gve_adminq_create_rx_queue(priv, i);
339 		if (err != 0) {
340 			device_printf(priv->dev, "Failed to create rxq %d, err: %d\n",
341 			    i, err);
342 			goto abort;
343 		}
344 	}
345 
346 	if (bootverbose)
347 		device_printf(priv->dev, "Created %d rx queues\n", num_queues);
348 	return (0);
349 
350 abort:
351 	gve_adminq_destroy_rx_queues(priv, i);
352 	return (err);
353 }
354 
355 static int
gve_adminq_create_tx_queue(struct gve_priv * priv,uint32_t queue_index)356 gve_adminq_create_tx_queue(struct gve_priv *priv, uint32_t queue_index)
357 {
358 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
359 	struct gve_tx_ring *tx = &priv->tx[queue_index];
360 	struct gve_dma_handle *qres_dma = &tx->com.q_resources_mem;
361 
362 	bus_dmamap_sync(qres_dma->tag, qres_dma->map, BUS_DMASYNC_PREREAD);
363 
364 	cmd.opcode = htobe32(GVE_ADMINQ_CREATE_TX_QUEUE);
365 	cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
366 		.queue_id = htobe32(queue_index),
367 		.queue_resources_addr = htobe64(qres_dma->bus_addr),
368 		.tx_ring_addr = htobe64(tx->desc_ring_mem.bus_addr),
369 		.ntfy_id = htobe32(tx->com.ntfy_id),
370 		.tx_ring_size = htobe16(priv->tx_desc_cnt),
371 	};
372 
373 	if (gve_is_gqi(priv)) {
374 		cmd.create_tx_queue.queue_page_list_id =
375 		    htobe32((tx->com.qpl)->id);
376 	} else {
377 		cmd.create_tx_queue.queue_page_list_id =
378 		    htobe32(GVE_RAW_ADDRESSING_QPL_ID);
379 		cmd.create_tx_queue.tx_comp_ring_addr =
380 		    htobe64(tx->dqo.compl_ring_mem.bus_addr);
381 		cmd.create_tx_queue.tx_comp_ring_size =
382 		    htobe16(priv->tx_desc_cnt);
383 	}
384 	return (gve_adminq_execute_cmd(priv, &cmd));
385 }
386 
387 int
gve_adminq_create_tx_queues(struct gve_priv * priv,uint32_t num_queues)388 gve_adminq_create_tx_queues(struct gve_priv *priv, uint32_t num_queues)
389 {
390 	int err;
391 	int i;
392 
393 	for (i = 0; i < num_queues; i++) {
394 		err = gve_adminq_create_tx_queue(priv, i);
395 		if (err != 0) {
396 			device_printf(priv->dev, "Failed to create txq %d, err: %d\n",
397 			    i, err);
398 			goto abort;
399 		}
400 	}
401 
402 	if (bootverbose)
403 		device_printf(priv->dev, "Created %d tx queues\n", num_queues);
404 	return (0);
405 
406 abort:
407 	gve_adminq_destroy_tx_queues(priv, i);
408 	return (err);
409 }
410 
411 int
gve_adminq_set_mtu(struct gve_priv * priv,uint32_t mtu)412 gve_adminq_set_mtu(struct gve_priv *priv, uint32_t mtu) {
413 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
414 
415 	cmd.opcode = htobe32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
416 	cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
417 		.parameter_type = htobe32(GVE_SET_PARAM_MTU),
418 		.parameter_value = htobe64(mtu),
419 	};
420 
421 	return (gve_adminq_execute_cmd(priv, &cmd));
422 }
423 
424 static void
gve_enable_supported_features(struct gve_priv * priv,uint32_t supported_features_mask,const struct gve_device_option_modify_ring * dev_op_modify_ring,const struct gve_device_option_jumbo_frames * dev_op_jumbo_frames)425 gve_enable_supported_features(struct gve_priv *priv,
426     uint32_t supported_features_mask,
427     const struct gve_device_option_modify_ring *dev_op_modify_ring,
428     const struct gve_device_option_jumbo_frames *dev_op_jumbo_frames)
429 {
430 	if (dev_op_modify_ring &&
431 	    (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
432 		if (bootverbose)
433 			device_printf(priv->dev, "MODIFY RING device option enabled.\n");
434 		priv->modify_ringsize_enabled = true;
435 		priv->max_rx_desc_cnt = be16toh(dev_op_modify_ring->max_ring_size.rx);
436 		priv->max_tx_desc_cnt = be16toh(dev_op_modify_ring->max_ring_size.tx);
437 	}
438 
439 	if (dev_op_jumbo_frames &&
440 	    (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
441 		if (bootverbose)
442 			device_printf(priv->dev, "JUMBO FRAMES device option enabled: %u.\n",
443 			    be16toh(dev_op_jumbo_frames->max_mtu));
444 		priv->max_mtu = be16toh(dev_op_jumbo_frames->max_mtu);
445 	}
446 }
447 
448 int
gve_adminq_describe_device(struct gve_priv * priv)449 gve_adminq_describe_device(struct gve_priv *priv)
450 {
451 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
452 	struct gve_device_descriptor *desc;
453 	struct gve_dma_handle desc_mem;
454 	struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
455 	struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
456 	struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL;
457 	struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
458 	struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
459 	uint32_t supported_features_mask = 0;
460 	int rc;
461 	int i;
462 
463 	rc = gve_dma_alloc_coherent(priv, ADMINQ_SIZE, ADMINQ_SIZE, &desc_mem);
464 	if (rc != 0) {
465 		device_printf(priv->dev, "Failed to alloc DMA mem for DescribeDevice.\n");
466 		return (rc);
467 	}
468 
469 	desc = desc_mem.cpu_addr;
470 
471 	aq_cmd.opcode = htobe32(GVE_ADMINQ_DESCRIBE_DEVICE);
472 	aq_cmd.describe_device.device_descriptor_addr = htobe64(
473 	    desc_mem.bus_addr);
474 	aq_cmd.describe_device.device_descriptor_version = htobe32(
475 	    GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
476 	aq_cmd.describe_device.available_length = htobe32(ADMINQ_SIZE);
477 
478 	bus_dmamap_sync(desc_mem.tag, desc_mem.map, BUS_DMASYNC_PREWRITE);
479 
480 	rc = gve_adminq_execute_cmd(priv, &aq_cmd);
481 	if (rc != 0)
482 		goto free_device_descriptor;
483 
484 	bus_dmamap_sync(desc_mem.tag, desc_mem.map, BUS_DMASYNC_POSTREAD);
485 
486 	/* Default min in case device options don't have min values */
487 	priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
488 	priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
489 
490 	rc = gve_process_device_options(priv, desc,
491 	    &dev_op_gqi_qpl,
492 	    &dev_op_dqo_rda,
493 	    &dev_op_dqo_qpl,
494 	    &dev_op_modify_ring,
495 	    &dev_op_jumbo_frames);
496 	if (rc != 0)
497 		goto free_device_descriptor;
498 
499 	if (dev_op_dqo_rda != NULL) {
500 		snprintf(gve_queue_format, sizeof(gve_queue_format),
501 		    "%s", "DQO RDA");
502 		priv->queue_format = GVE_DQO_RDA_FORMAT;
503 		supported_features_mask = be32toh(
504 		    dev_op_dqo_rda->supported_features_mask);
505 		if (bootverbose)
506 			device_printf(priv->dev,
507 			    "Driver is running with DQO RDA queue format.\n");
508 	} else if (dev_op_dqo_qpl != NULL) {
509 		snprintf(gve_queue_format, sizeof(gve_queue_format),
510 		    "%s", "DQO QPL");
511 		priv->queue_format = GVE_DQO_QPL_FORMAT;
512 		supported_features_mask = be32toh(
513 		    dev_op_dqo_qpl->supported_features_mask);
514 		if (bootverbose)
515 			device_printf(priv->dev,
516 			    "Driver is running with DQO QPL queue format.\n");
517 	} else if (dev_op_gqi_qpl != NULL) {
518 		snprintf(gve_queue_format, sizeof(gve_queue_format),
519 		    "%s", "GQI QPL");
520 		priv->queue_format = GVE_GQI_QPL_FORMAT;
521 		supported_features_mask = be32toh(
522 		    dev_op_gqi_qpl->supported_features_mask);
523 		if (bootverbose)
524 			device_printf(priv->dev,
525 			    "Driver is running with GQI QPL queue format.\n");
526 	} else {
527 		device_printf(priv->dev, "No compatible queue formats\n");
528 		rc = EINVAL;
529 		goto free_device_descriptor;
530 	}
531 
532         priv->num_event_counters = be16toh(desc->counters);
533 	priv->default_num_queues = be16toh(desc->default_num_queues);
534 	priv->tx_desc_cnt = be16toh(desc->tx_queue_entries);
535 	priv->rx_desc_cnt = be16toh(desc->rx_queue_entries);
536 	priv->rx_pages_per_qpl = be16toh(desc->rx_pages_per_qpl);
537 	priv->max_registered_pages = be64toh(desc->max_registered_pages);
538 	priv->max_mtu = be16toh(desc->mtu);
539 	priv->default_num_queues = be16toh(desc->default_num_queues);
540 	priv->supported_features =  supported_features_mask;
541 
542 	/* Default max to current in case modify ring size option is disabled */
543 	priv->max_rx_desc_cnt = priv->rx_desc_cnt;
544 	priv->max_tx_desc_cnt = priv->tx_desc_cnt;
545 
546 	gve_enable_supported_features(priv, supported_features_mask,
547 	    dev_op_modify_ring, dev_op_jumbo_frames);
548 
549 	for (i = 0; i < ETHER_ADDR_LEN; i++)
550 		priv->mac[i] = desc->mac[i];
551 
552 free_device_descriptor:
553 	gve_dma_free_coherent(&desc_mem);
554 
555 	return (rc);
556 }
557 
558 int
gve_adminq_register_page_list(struct gve_priv * priv,struct gve_queue_page_list * qpl)559 gve_adminq_register_page_list(struct gve_priv *priv,
560     struct gve_queue_page_list *qpl)
561 {
562 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
563 	uint32_t num_entries = qpl->num_pages;
564 	uint32_t size = num_entries * sizeof(qpl->dmas[0].bus_addr);
565 	__be64 *page_list;
566 	struct gve_dma_handle dma;
567 	int err;
568 	int i;
569 
570 	err = gve_dma_alloc_coherent(priv, size, PAGE_SIZE, &dma);
571 	if (err != 0)
572 		return (ENOMEM);
573 
574 	page_list = dma.cpu_addr;
575 
576 	for (i = 0; i < num_entries; i++)
577 		page_list[i] = htobe64(qpl->dmas[i].bus_addr);
578 
579 	bus_dmamap_sync(dma.tag, dma.map, BUS_DMASYNC_PREWRITE);
580 
581 	cmd.opcode = htobe32(GVE_ADMINQ_REGISTER_PAGE_LIST);
582 	cmd.reg_page_list = (struct gve_adminq_register_page_list) {
583 		.page_list_id = htobe32(qpl->id),
584 		.num_pages = htobe32(num_entries),
585 		.page_address_list_addr = htobe64(dma.bus_addr),
586 		.page_size = htobe64(PAGE_SIZE),
587 	};
588 
589 	err = gve_adminq_execute_cmd(priv, &cmd);
590 	gve_dma_free_coherent(&dma);
591 	return (err);
592 }
593 
594 int
gve_adminq_unregister_page_list(struct gve_priv * priv,uint32_t page_list_id)595 gve_adminq_unregister_page_list(struct gve_priv *priv, uint32_t page_list_id)
596 {
597 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
598 
599 	cmd.opcode = htobe32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
600 	cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
601 		.page_list_id = htobe32(page_list_id),
602 	};
603 
604 	return (gve_adminq_execute_cmd(priv, &cmd));
605 }
606 
607 #define GVE_NTFY_BLK_BASE_MSIX_IDX	0
608 int
gve_adminq_configure_device_resources(struct gve_priv * priv)609 gve_adminq_configure_device_resources(struct gve_priv *priv)
610 {
611 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
612 
613 	bus_dmamap_sync(priv->irqs_db_mem.tag, priv->irqs_db_mem.map,
614 	    BUS_DMASYNC_PREREAD);
615 	bus_dmamap_sync(priv->counter_array_mem.tag,
616 	    priv->counter_array_mem.map, BUS_DMASYNC_PREREAD);
617 
618 	aq_cmd.opcode = htobe32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
619 	aq_cmd.configure_device_resources =
620 	    (struct gve_adminq_configure_device_resources) {
621 		.counter_array = htobe64(priv->counter_array_mem.bus_addr),
622 		.irq_db_addr = htobe64(priv->irqs_db_mem.bus_addr),
623 		.num_counters = htobe32(priv->num_event_counters),
624 		.num_irq_dbs = htobe32(priv->num_queues),
625 		.irq_db_stride = htobe32(sizeof(struct gve_irq_db)),
626 		.ntfy_blk_msix_base_idx = htobe32(GVE_NTFY_BLK_BASE_MSIX_IDX),
627 		.queue_format = priv->queue_format,
628 	};
629 
630 	return (gve_adminq_execute_cmd(priv, &aq_cmd));
631 }
632 
633 int
gve_adminq_deconfigure_device_resources(struct gve_priv * priv)634 gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
635 {
636 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
637 
638 	aq_cmd.opcode = htobe32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
639 	return (gve_adminq_execute_cmd(priv, &aq_cmd));
640 }
641 
642 int
gve_adminq_verify_driver_compatibility(struct gve_priv * priv,uint64_t driver_info_len,vm_paddr_t driver_info_addr)643 gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
644     uint64_t driver_info_len,
645     vm_paddr_t driver_info_addr)
646 {
647 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
648 
649 	aq_cmd.opcode = htobe32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY);
650 	aq_cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) {
651 		.driver_info_len = htobe64(driver_info_len),
652 		.driver_info_addr = htobe64(driver_info_addr),
653 	};
654 
655 	return (gve_adminq_execute_cmd(priv, &aq_cmd));
656 }
657 
658 int
gve_adminq_get_ptype_map_dqo(struct gve_priv * priv,struct gve_ptype_lut * ptype_lut_dqo)659 gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
660     struct gve_ptype_lut *ptype_lut_dqo)
661 {
662 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
663 	struct gve_ptype_map *ptype_map;
664 	struct gve_dma_handle dma;
665 	int err = 0;
666 	int i;
667 
668 	err = gve_dma_alloc_coherent(priv, sizeof(*ptype_map), PAGE_SIZE, &dma);
669 	if (err)
670 		return (err);
671 	ptype_map = dma.cpu_addr;
672 
673 	aq_cmd.opcode = htobe32(GVE_ADMINQ_GET_PTYPE_MAP);
674 	aq_cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
675 		.ptype_map_len = htobe64(sizeof(*ptype_map)),
676 		.ptype_map_addr = htobe64(dma.bus_addr),
677 	};
678 
679 	err = gve_adminq_execute_cmd(priv, &aq_cmd);
680 	if (err)
681 		goto err;
682 
683 	/* Populate ptype_lut_dqo. */
684 	for (i = 0; i < GVE_NUM_PTYPES; i++) {
685 		ptype_lut_dqo->ptypes[i].l3_type = ptype_map->ptypes[i].l3_type;
686 		ptype_lut_dqo->ptypes[i].l4_type = ptype_map->ptypes[i].l4_type;
687 	}
688 err:
689 	gve_dma_free_coherent(&dma);
690 	return (err);
691 }
692 
693 int
gve_adminq_alloc(struct gve_priv * priv)694 gve_adminq_alloc(struct gve_priv *priv)
695 {
696 	int rc;
697 
698 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK))
699 		return (0);
700 
701 	if (priv->aq_mem.cpu_addr == NULL) {
702 		rc = gve_dma_alloc_coherent(priv, ADMINQ_SIZE, ADMINQ_SIZE,
703 		    &priv->aq_mem);
704 		if (rc != 0) {
705 			device_printf(priv->dev, "Failed to allocate admin queue mem\n");
706 			return (rc);
707 		}
708 	}
709 
710 	priv->adminq = priv->aq_mem.cpu_addr;
711 	priv->adminq_bus_addr = priv->aq_mem.bus_addr;
712 
713 	if (priv->adminq == NULL)
714 		return (ENOMEM);
715 
716 	priv->adminq_mask = ADMINQ_SLOTS - 1;
717 	priv->adminq_prod_cnt = 0;
718 	priv->adminq_cmd_fail = 0;
719 	priv->adminq_timeouts = 0;
720 	priv->adminq_describe_device_cnt = 0;
721 	priv->adminq_cfg_device_resources_cnt = 0;
722 	priv->adminq_register_page_list_cnt = 0;
723 	priv->adminq_unregister_page_list_cnt = 0;
724 	priv->adminq_create_tx_queue_cnt = 0;
725 	priv->adminq_create_rx_queue_cnt = 0;
726 	priv->adminq_destroy_tx_queue_cnt = 0;
727 	priv->adminq_destroy_rx_queue_cnt = 0;
728 	priv->adminq_dcfg_device_resources_cnt = 0;
729 	priv->adminq_set_driver_parameter_cnt = 0;
730 	priv->adminq_get_ptype_map_cnt = 0;
731 
732 	gve_reg_bar_write_4(priv, GVE_REG_ADMINQ_ADDR,
733 	    priv->adminq_bus_addr / ADMINQ_SIZE);
734 
735 	gve_set_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK);
736 	return (0);
737 }
738 
739 void
gve_release_adminq(struct gve_priv * priv)740 gve_release_adminq(struct gve_priv *priv)
741 {
742 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK))
743 		return;
744 
745 	gve_reg_bar_write_4(priv, GVE_REG_ADMINQ_ADDR, 0);
746 	while (gve_reg_bar_read_4(priv, GVE_REG_ADMINQ_ADDR)) {
747 		device_printf(priv->dev, "Waiting until admin queue is released.\n");
748 		pause("gve release adminq", GVE_ADMINQ_SLEEP_LEN_MS);
749 	}
750 
751 	gve_dma_free_coherent(&priv->aq_mem);
752 	priv->aq_mem = (struct gve_dma_handle){};
753 	priv->adminq = 0;
754 	priv->adminq_bus_addr = 0;
755 
756 	gve_clear_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK);
757 
758 	if (bootverbose)
759 		device_printf(priv->dev, "Admin queue released\n");
760 }
761 
762 static int
gve_adminq_parse_err(struct gve_priv * priv,uint32_t opcode,uint32_t status)763 gve_adminq_parse_err(struct gve_priv *priv, uint32_t opcode, uint32_t status)
764 {
765 	if (status != GVE_ADMINQ_COMMAND_PASSED &&
766 	    status != GVE_ADMINQ_COMMAND_UNSET) {
767 		device_printf(priv->dev, "AQ command(%u): failed with status %d\n", opcode, status);
768 		priv->adminq_cmd_fail++;
769 	}
770 	switch (status) {
771 	case GVE_ADMINQ_COMMAND_PASSED:
772 		return (0);
773 
774 	case GVE_ADMINQ_COMMAND_UNSET:
775 		device_printf(priv->dev,
776 		    "AQ command(%u): err and status both unset, this should not be possible.\n",
777 		    opcode);
778 		return (EINVAL);
779 
780 	case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
781 	case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
782 	case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
783 	case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
784 	case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
785 		return (EAGAIN);
786 
787 	case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
788 	case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
789 	case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
790 	case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
791 	case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
792 	case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
793 		return (EINVAL);
794 
795 	case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
796 		return (ETIMEDOUT);
797 
798 	case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
799 	case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
800 		return (EACCES);
801 
802 	case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
803 		return (ENOMEM);
804 
805 	case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
806 		return (EOPNOTSUPP);
807 
808 	default:
809 		device_printf(priv->dev, "AQ command(%u): unknown status code %d\n",
810 		    opcode, status);
811 		return (EINVAL);
812 	}
813 }
814 
815 static void
gve_adminq_kick_cmd(struct gve_priv * priv,uint32_t prod_cnt)816 gve_adminq_kick_cmd(struct gve_priv *priv, uint32_t prod_cnt)
817 {
818 	gve_reg_bar_write_4(priv, ADMINQ_DOORBELL, prod_cnt);
819 
820 }
821 
822 static bool
gve_adminq_wait_for_cmd(struct gve_priv * priv,uint32_t prod_cnt)823 gve_adminq_wait_for_cmd(struct gve_priv *priv, uint32_t prod_cnt)
824 {
825 	int i;
826 
827 	for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
828 		if (gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER) == prod_cnt)
829 			return (true);
830 		pause("gve adminq cmd", GVE_ADMINQ_SLEEP_LEN_MS);
831 	}
832 
833 	return (false);
834 }
835 
836 /*
837  * Flushes all AQ commands currently queued and waits for them to complete.
838  * If there are failures, it will return the first error.
839  */
840 static int
gve_adminq_kick_and_wait(struct gve_priv * priv)841 gve_adminq_kick_and_wait(struct gve_priv *priv)
842 {
843 	struct gve_adminq_command *cmd;
844 	uint32_t status, err;
845 	uint32_t tail, head;
846 	uint32_t opcode;
847 	int i;
848 
849 	tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
850 	head = priv->adminq_prod_cnt;
851 
852 	gve_adminq_kick_cmd(priv, head);
853 	if (!gve_adminq_wait_for_cmd(priv, head)) {
854 		device_printf(priv->dev, "AQ commands timed out, need to reset AQ\n");
855 		priv->adminq_timeouts++;
856 		return (ENOTRECOVERABLE);
857 	}
858 	bus_dmamap_sync(
859 	    priv->aq_mem.tag, priv->aq_mem.map, BUS_DMASYNC_POSTREAD);
860 
861 	for (i = tail; i < head; i++) {
862 		cmd = &priv->adminq[i & priv->adminq_mask];
863 		status = be32toh(cmd->status);
864 		opcode = be32toh(cmd->opcode);
865 		err = gve_adminq_parse_err(priv, opcode, status);
866 		if (err != 0)
867 			return (err);
868 	}
869 
870 	return (0);
871 }
872 
873 /*
874  * This function is not threadsafe - the caller is responsible for any
875  * necessary locks.
876  */
877 static int
gve_adminq_issue_cmd(struct gve_priv * priv,struct gve_adminq_command * cmd_orig)878 gve_adminq_issue_cmd(struct gve_priv *priv, struct gve_adminq_command *cmd_orig)
879 {
880 	struct gve_adminq_command *cmd;
881 	uint32_t opcode;
882 	uint32_t tail;
883 	int err;
884 
885 	tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
886 
887 	/* Check if next command will overflow the buffer. */
888 	if ((priv->adminq_prod_cnt - tail) > priv->adminq_mask) {
889 		/* Flush existing commands to make room. */
890 		err = gve_adminq_kick_and_wait(priv);
891 		if (err != 0)
892 			return (err);
893 
894 		/* Retry. */
895 		tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
896 		if ((priv->adminq_prod_cnt - tail) > priv->adminq_mask) {
897 			/*
898 			 * This should never happen. We just flushed the
899 			 * command queue so there should be enough space.
900                          */
901 			return (ENOMEM);
902 		}
903 	}
904 
905 	cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
906 	priv->adminq_prod_cnt++;
907 
908 	memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
909 
910 	bus_dmamap_sync(
911 	    priv->aq_mem.tag, priv->aq_mem.map, BUS_DMASYNC_PREWRITE);
912 
913 	opcode = be32toh(cmd->opcode);
914 
915 	switch (opcode) {
916 	case GVE_ADMINQ_DESCRIBE_DEVICE:
917 		priv->adminq_describe_device_cnt++;
918 		break;
919 
920 	case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
921 		priv->adminq_cfg_device_resources_cnt++;
922 		break;
923 
924 	case GVE_ADMINQ_REGISTER_PAGE_LIST:
925 		priv->adminq_register_page_list_cnt++;
926 		break;
927 
928 	case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
929 		priv->adminq_unregister_page_list_cnt++;
930 		break;
931 
932 	case GVE_ADMINQ_CREATE_TX_QUEUE:
933 		priv->adminq_create_tx_queue_cnt++;
934 		break;
935 
936 	case GVE_ADMINQ_CREATE_RX_QUEUE:
937 		priv->adminq_create_rx_queue_cnt++;
938 		break;
939 
940 	case GVE_ADMINQ_DESTROY_TX_QUEUE:
941 		priv->adminq_destroy_tx_queue_cnt++;
942 		break;
943 
944 	case GVE_ADMINQ_DESTROY_RX_QUEUE:
945 		priv->adminq_destroy_rx_queue_cnt++;
946 		break;
947 
948 	case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
949 		priv->adminq_dcfg_device_resources_cnt++;
950 		break;
951 
952 	case GVE_ADMINQ_SET_DRIVER_PARAMETER:
953 		priv->adminq_set_driver_parameter_cnt++;
954 		break;
955 
956 	case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
957 		priv->adminq_verify_driver_compatibility_cnt++;
958 		break;
959 
960 	case GVE_ADMINQ_GET_PTYPE_MAP:
961 		priv->adminq_get_ptype_map_cnt++;
962 		break;
963 
964 	default:
965 		device_printf(priv->dev, "Unknown AQ command opcode %d\n", opcode);
966 	}
967 
968 	return (0);
969 }
970 
971 /*
972  * This function is not threadsafe - the caller is responsible for any
973  * necessary locks.
974  * The caller is also responsible for making sure there are no commands
975  * waiting to be executed.
976  */
977 static int
gve_adminq_execute_cmd(struct gve_priv * priv,struct gve_adminq_command * cmd_orig)978 gve_adminq_execute_cmd(struct gve_priv *priv, struct gve_adminq_command *cmd_orig)
979 {
980 	uint32_t tail, head;
981 	int err;
982 
983 	tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
984 	head = priv->adminq_prod_cnt;
985 
986 	if (tail != head)
987 		return (EINVAL);
988 	err = gve_adminq_issue_cmd(priv, cmd_orig);
989 	if (err != 0)
990 		return (err);
991 	return (gve_adminq_kick_and_wait(priv));
992 }
993