xref: /freebsd/sys/dev/gve/gve_adminq.c (revision 2348ac893d10f06d2d84e1e4bd5ca9f1c5da92d8)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2023-2024 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #include <sys/endian.h>
32 #include <sys/socket.h>
33 #include <sys/time.h>
34 
35 #include <net/ethernet.h>
36 #include <net/if.h>
37 #include <net/if_var.h>
38 
39 #include "gve.h"
40 #include "gve_adminq.h"
41 
42 #define GVE_ADMINQ_SLEEP_LEN_MS 20
43 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 10
44 #define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1
45 #define GVE_REG_ADMINQ_ADDR 16
46 #define ADMINQ_SLOTS (ADMINQ_SIZE / sizeof(struct gve_adminq_command))
47 
48 #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
49     "Expected: length=%d, feature_mask=%x.\n" \
50     "Actual: length=%d, feature_mask=%x.\n"
51 
52 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected." \
53     " Possible older version of guest driver.\n"
54 
55 static
gve_parse_device_option(struct gve_priv * priv,struct gve_device_descriptor * device_descriptor,struct gve_device_option * option,struct gve_device_option_gqi_qpl ** dev_op_gqi_qpl,struct gve_device_option_dqo_rda ** dev_op_dqo_rda,struct gve_device_option_dqo_qpl ** dev_op_dqo_qpl,struct gve_device_option_jumbo_frames ** dev_op_jumbo_frames)56 void gve_parse_device_option(struct gve_priv *priv,
57     struct gve_device_descriptor *device_descriptor,
58     struct gve_device_option *option,
59     struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
60     struct gve_device_option_dqo_rda **dev_op_dqo_rda,
61     struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
62     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
63 {
64 	uint32_t req_feat_mask = be32toh(option->required_features_mask);
65 	uint16_t option_length = be16toh(option->option_length);
66 	uint16_t option_id = be16toh(option->option_id);
67 
68 	/*
69 	 * If the length or feature mask doesn't match, continue without
70 	 * enabling the feature.
71 	 */
72 	switch (option_id) {
73 	case GVE_DEV_OPT_ID_GQI_QPL:
74 		if (option_length < sizeof(**dev_op_gqi_qpl) ||
75 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
76 			device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
77 			    "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
78 			    GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
79 			    option_length, req_feat_mask);
80 			break;
81 		}
82 
83 		if (option_length > sizeof(**dev_op_gqi_qpl)) {
84 			device_printf(priv->dev, GVE_DEVICE_OPTION_TOO_BIG_FMT,
85 			    "GQI QPL");
86 		}
87 		*dev_op_gqi_qpl = (void *)(option + 1);
88 		break;
89 
90 	case GVE_DEV_OPT_ID_DQO_RDA:
91 		if (option_length < sizeof(**dev_op_dqo_rda) ||
92 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
93 			device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
94 			    "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
95 			    GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
96 			    option_length, req_feat_mask);
97 			break;
98 		}
99 
100 		if (option_length > sizeof(**dev_op_dqo_rda)) {
101 			device_printf(priv->dev, GVE_DEVICE_OPTION_TOO_BIG_FMT,
102 			    "DQO RDA");
103 		}
104 		*dev_op_dqo_rda = (void *)(option + 1);
105 		break;
106 
107 	case GVE_DEV_OPT_ID_DQO_QPL:
108 		if (option_length < sizeof(**dev_op_dqo_qpl) ||
109 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) {
110 			device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
111 			    "DQO QPL", (int)sizeof(**dev_op_dqo_qpl),
112 			    GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL,
113 			    option_length, req_feat_mask);
114 			break;
115 		}
116 
117 		if (option_length > sizeof(**dev_op_dqo_qpl)) {
118 			device_printf(priv->dev, GVE_DEVICE_OPTION_TOO_BIG_FMT,
119 			    "DQO QPL");
120 		}
121 		*dev_op_dqo_qpl = (void *)(option + 1);
122 		break;
123 
124 	case GVE_DEV_OPT_ID_JUMBO_FRAMES:
125 		if (option_length < sizeof(**dev_op_jumbo_frames) ||
126 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
127 			device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
128 			    "Jumbo Frames", (int)sizeof(**dev_op_jumbo_frames),
129 			    GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
130 			    option_length, req_feat_mask);
131 			break;
132 		}
133 
134 		if (option_length > sizeof(**dev_op_jumbo_frames)) {
135 			device_printf(priv->dev,
136 			    GVE_DEVICE_OPTION_TOO_BIG_FMT, "Jumbo Frames");
137 		}
138 		*dev_op_jumbo_frames = (void *)(option + 1);
139 		break;
140 
141 	default:
142 		/*
143 		 * If we don't recognize the option just continue
144 		 * without doing anything.
145 		 */
146 		device_printf(priv->dev, "Unrecognized device option 0x%hx not enabled.\n",
147 		    option_id);
148 	}
149 }
150 
151 /* Process all device options for a given describe device call. */
152 static int
gve_process_device_options(struct gve_priv * priv,struct gve_device_descriptor * descriptor,struct gve_device_option_gqi_qpl ** dev_op_gqi_qpl,struct gve_device_option_dqo_rda ** dev_op_dqo_rda,struct gve_device_option_dqo_qpl ** dev_op_dqo_qpl,struct gve_device_option_jumbo_frames ** dev_op_jumbo_frames)153 gve_process_device_options(struct gve_priv *priv,
154     struct gve_device_descriptor *descriptor,
155     struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
156     struct gve_device_option_dqo_rda **dev_op_dqo_rda,
157     struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
158     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
159 {
160 	char *desc_end = (char *)descriptor + be16toh(descriptor->total_length);
161 	const int num_options = be16toh(descriptor->num_device_options);
162 	struct gve_device_option *dev_opt;
163 	int i;
164 
165 	/* The options struct directly follows the device descriptor. */
166 	dev_opt = (void *)(descriptor + 1);
167 	for (i = 0; i < num_options; i++) {
168 		if ((char *)(dev_opt + 1) > desc_end ||
169 		    (char *)(dev_opt + 1) + be16toh(dev_opt->option_length) > desc_end) {
170 			device_printf(priv->dev,
171 			    "options exceed device descriptor's total length.\n");
172 			return (EINVAL);
173 		}
174 
175 		gve_parse_device_option(priv, descriptor, dev_opt,
176 		    dev_op_gqi_qpl,
177 		    dev_op_dqo_rda,
178 		    dev_op_dqo_qpl,
179 		    dev_op_jumbo_frames);
180 		dev_opt = (void *)((char *)(dev_opt + 1) + be16toh(dev_opt->option_length));
181 	}
182 
183 	return (0);
184 }
185 
186 static int gve_adminq_execute_cmd(struct gve_priv *priv,
187     struct gve_adminq_command *cmd);
188 
189 static int
gve_adminq_destroy_tx_queue(struct gve_priv * priv,uint32_t id)190 gve_adminq_destroy_tx_queue(struct gve_priv *priv, uint32_t id)
191 {
192 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
193 
194 	cmd.opcode = htobe32(GVE_ADMINQ_DESTROY_TX_QUEUE);
195 	cmd.destroy_tx_queue.queue_id = htobe32(id);
196 
197 	return (gve_adminq_execute_cmd(priv, &cmd));
198 }
199 
200 static int
gve_adminq_destroy_rx_queue(struct gve_priv * priv,uint32_t id)201 gve_adminq_destroy_rx_queue(struct gve_priv *priv, uint32_t id)
202 {
203 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
204 
205 	cmd.opcode = htobe32(GVE_ADMINQ_DESTROY_RX_QUEUE);
206 	cmd.destroy_rx_queue.queue_id = htobe32(id);
207 
208 	return (gve_adminq_execute_cmd(priv, &cmd));
209 }
210 
211 int
gve_adminq_destroy_rx_queues(struct gve_priv * priv,uint32_t num_queues)212 gve_adminq_destroy_rx_queues(struct gve_priv *priv, uint32_t num_queues)
213 {
214 	int err;
215 	int i;
216 
217 	for (i = 0; i < num_queues; i++) {
218 		err = gve_adminq_destroy_rx_queue(priv, i);
219 		if (err != 0) {
220 			device_printf(priv->dev, "Failed to destroy rxq %d, err: %d\n",
221 			    i, err);
222 		}
223 	}
224 
225 	if (err != 0)
226 		return (err);
227 
228 	device_printf(priv->dev, "Destroyed %d rx queues\n", num_queues);
229 	return (0);
230 }
231 
232 int
gve_adminq_destroy_tx_queues(struct gve_priv * priv,uint32_t num_queues)233 gve_adminq_destroy_tx_queues(struct gve_priv *priv, uint32_t num_queues)
234 {
235 	int err;
236 	int i;
237 
238 	for (i = 0; i < num_queues; i++) {
239 		err = gve_adminq_destroy_tx_queue(priv, i);
240 		if (err != 0) {
241 			device_printf(priv->dev, "Failed to destroy txq %d, err: %d\n",
242 			    i, err);
243 		}
244 	}
245 
246 	if (err != 0)
247 		return (err);
248 
249 	device_printf(priv->dev, "Destroyed %d tx queues\n", num_queues);
250 	return (0);
251 }
252 
253 static int
gve_adminq_create_rx_queue(struct gve_priv * priv,uint32_t queue_index)254 gve_adminq_create_rx_queue(struct gve_priv *priv, uint32_t queue_index)
255 {
256 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
257 	struct gve_rx_ring *rx = &priv->rx[queue_index];
258 	struct gve_dma_handle *qres_dma = &rx->com.q_resources_mem;
259 
260 	bus_dmamap_sync(qres_dma->tag, qres_dma->map, BUS_DMASYNC_PREREAD);
261 
262 	cmd.opcode = htobe32(GVE_ADMINQ_CREATE_RX_QUEUE);
263 	cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
264 		.queue_id = htobe32(queue_index),
265 		.ntfy_id = htobe32(rx->com.ntfy_id),
266 		.queue_resources_addr = htobe64(qres_dma->bus_addr),
267 		.rx_ring_size = htobe16(priv->rx_desc_cnt),
268 		.packet_buffer_size = htobe16(GVE_DEFAULT_RX_BUFFER_SIZE),
269 	};
270 
271 	if (gve_is_gqi(priv)) {
272 		cmd.create_rx_queue.rx_desc_ring_addr =
273 		    htobe64(rx->desc_ring_mem.bus_addr);
274 		cmd.create_rx_queue.rx_data_ring_addr =
275 		    htobe64(rx->data_ring_mem.bus_addr);
276 		cmd.create_rx_queue.index =
277 		    htobe32(queue_index);
278 		cmd.create_rx_queue.queue_page_list_id =
279 		    htobe32((rx->com.qpl)->id);
280 	} else {
281 		cmd.create_rx_queue.queue_page_list_id =
282 		    htobe32(GVE_RAW_ADDRESSING_QPL_ID);
283 		cmd.create_rx_queue.rx_desc_ring_addr =
284 		    htobe64(rx->dqo.compl_ring_mem.bus_addr);
285 		cmd.create_rx_queue.rx_data_ring_addr =
286 		    htobe64(rx->desc_ring_mem.bus_addr);
287 		cmd.create_rx_queue.rx_buff_ring_size =
288 		    htobe16(priv->rx_desc_cnt);
289 		cmd.create_rx_queue.enable_rsc =
290 		    !!((if_getcapenable(priv->ifp) & IFCAP_LRO) &&
291 			!gve_disable_hw_lro);
292 	}
293 
294 	return (gve_adminq_execute_cmd(priv, &cmd));
295 }
296 
297 int
gve_adminq_create_rx_queues(struct gve_priv * priv,uint32_t num_queues)298 gve_adminq_create_rx_queues(struct gve_priv *priv, uint32_t num_queues)
299 {
300 	int err;
301 	int i;
302 
303 	for (i = 0; i < num_queues; i++) {
304 		err = gve_adminq_create_rx_queue(priv, i);
305 		if (err != 0) {
306 			device_printf(priv->dev, "Failed to create rxq %d, err: %d\n",
307 			    i, err);
308 			goto abort;
309 		}
310 	}
311 
312 	if (bootverbose)
313 		device_printf(priv->dev, "Created %d rx queues\n", num_queues);
314 	return (0);
315 
316 abort:
317 	gve_adminq_destroy_rx_queues(priv, i);
318 	return (err);
319 }
320 
321 static int
gve_adminq_create_tx_queue(struct gve_priv * priv,uint32_t queue_index)322 gve_adminq_create_tx_queue(struct gve_priv *priv, uint32_t queue_index)
323 {
324 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
325 	struct gve_tx_ring *tx = &priv->tx[queue_index];
326 	struct gve_dma_handle *qres_dma = &tx->com.q_resources_mem;
327 
328 	bus_dmamap_sync(qres_dma->tag, qres_dma->map, BUS_DMASYNC_PREREAD);
329 
330 	cmd.opcode = htobe32(GVE_ADMINQ_CREATE_TX_QUEUE);
331 	cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
332 		.queue_id = htobe32(queue_index),
333 		.queue_resources_addr = htobe64(qres_dma->bus_addr),
334 		.tx_ring_addr = htobe64(tx->desc_ring_mem.bus_addr),
335 		.ntfy_id = htobe32(tx->com.ntfy_id),
336 		.tx_ring_size = htobe16(priv->tx_desc_cnt),
337 	};
338 
339 	if (gve_is_gqi(priv)) {
340 		cmd.create_tx_queue.queue_page_list_id =
341 		    htobe32((tx->com.qpl)->id);
342 	} else {
343 		cmd.create_tx_queue.queue_page_list_id =
344 		    htobe32(GVE_RAW_ADDRESSING_QPL_ID);
345 		cmd.create_tx_queue.tx_comp_ring_addr =
346 		    htobe64(tx->dqo.compl_ring_mem.bus_addr);
347 		cmd.create_tx_queue.tx_comp_ring_size =
348 		    htobe16(priv->tx_desc_cnt);
349 	}
350 	return (gve_adminq_execute_cmd(priv, &cmd));
351 }
352 
353 int
gve_adminq_create_tx_queues(struct gve_priv * priv,uint32_t num_queues)354 gve_adminq_create_tx_queues(struct gve_priv *priv, uint32_t num_queues)
355 {
356 	int err;
357 	int i;
358 
359 	for (i = 0; i < num_queues; i++) {
360 		err = gve_adminq_create_tx_queue(priv, i);
361 		if (err != 0) {
362 			device_printf(priv->dev, "Failed to create txq %d, err: %d\n",
363 			    i, err);
364 			goto abort;
365 		}
366 	}
367 
368 	if (bootverbose)
369 		device_printf(priv->dev, "Created %d tx queues\n", num_queues);
370 	return (0);
371 
372 abort:
373 	gve_adminq_destroy_tx_queues(priv, i);
374 	return (err);
375 }
376 
377 int
gve_adminq_set_mtu(struct gve_priv * priv,uint32_t mtu)378 gve_adminq_set_mtu(struct gve_priv *priv, uint32_t mtu) {
379 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
380 
381 	cmd.opcode = htobe32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
382 	cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
383 		.parameter_type = htobe32(GVE_SET_PARAM_MTU),
384 		.parameter_value = htobe64(mtu),
385 	};
386 
387 	return (gve_adminq_execute_cmd(priv, &cmd));
388 }
389 
390 static void
gve_enable_supported_features(struct gve_priv * priv,uint32_t supported_features_mask,const struct gve_device_option_jumbo_frames * dev_op_jumbo_frames)391 gve_enable_supported_features(struct gve_priv *priv,
392     uint32_t supported_features_mask,
393     const struct gve_device_option_jumbo_frames *dev_op_jumbo_frames)
394 {
395 	if (dev_op_jumbo_frames &&
396 	    (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
397 		if (bootverbose)
398 			device_printf(priv->dev, "JUMBO FRAMES device option enabled: %u.\n",
399 			    be16toh(dev_op_jumbo_frames->max_mtu));
400 		priv->max_mtu = be16toh(dev_op_jumbo_frames->max_mtu);
401 	}
402 }
403 
404 int
gve_adminq_describe_device(struct gve_priv * priv)405 gve_adminq_describe_device(struct gve_priv *priv)
406 {
407 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
408 	struct gve_device_descriptor *desc;
409 	struct gve_dma_handle desc_mem;
410 	struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
411 	struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
412 	struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL;
413 	struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
414 	uint32_t supported_features_mask = 0;
415 	int rc;
416 	int i;
417 
418 	rc = gve_dma_alloc_coherent(priv, ADMINQ_SIZE, ADMINQ_SIZE, &desc_mem);
419 	if (rc != 0) {
420 		device_printf(priv->dev, "Failed to alloc DMA mem for DescribeDevice.\n");
421 		return (rc);
422 	}
423 
424 	desc = desc_mem.cpu_addr;
425 
426 	aq_cmd.opcode = htobe32(GVE_ADMINQ_DESCRIBE_DEVICE);
427 	aq_cmd.describe_device.device_descriptor_addr = htobe64(
428 	    desc_mem.bus_addr);
429 	aq_cmd.describe_device.device_descriptor_version = htobe32(
430 	    GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
431 	aq_cmd.describe_device.available_length = htobe32(ADMINQ_SIZE);
432 
433 	bus_dmamap_sync(desc_mem.tag, desc_mem.map, BUS_DMASYNC_PREWRITE);
434 
435 	rc = gve_adminq_execute_cmd(priv, &aq_cmd);
436 	if (rc != 0)
437 		goto free_device_descriptor;
438 
439 	bus_dmamap_sync(desc_mem.tag, desc_mem.map, BUS_DMASYNC_POSTREAD);
440 
441 	rc = gve_process_device_options(priv, desc,
442 	    &dev_op_gqi_qpl,
443 	    &dev_op_dqo_rda,
444 	    &dev_op_dqo_qpl,
445 	    &dev_op_jumbo_frames);
446 	if (rc != 0)
447 		goto free_device_descriptor;
448 
449 	if (dev_op_dqo_rda != NULL) {
450 		snprintf(gve_queue_format, sizeof(gve_queue_format),
451 		    "%s", "DQO RDA");
452 		priv->queue_format = GVE_DQO_RDA_FORMAT;
453 		supported_features_mask = be32toh(
454 		    dev_op_dqo_rda->supported_features_mask);
455 		if (bootverbose)
456 			device_printf(priv->dev,
457 			    "Driver is running with DQO RDA queue format.\n");
458 	} else if (dev_op_dqo_qpl != NULL) {
459 		snprintf(gve_queue_format, sizeof(gve_queue_format),
460 		    "%s", "DQO QPL");
461 		priv->queue_format = GVE_DQO_QPL_FORMAT;
462 		supported_features_mask = be32toh(
463 		    dev_op_dqo_qpl->supported_features_mask);
464 		if (bootverbose)
465 			device_printf(priv->dev,
466 			    "Driver is running with DQO QPL queue format.\n");
467 	} else if (dev_op_gqi_qpl != NULL) {
468 		snprintf(gve_queue_format, sizeof(gve_queue_format),
469 		    "%s", "GQI QPL");
470 		priv->queue_format = GVE_GQI_QPL_FORMAT;
471 		supported_features_mask = be32toh(
472 		    dev_op_gqi_qpl->supported_features_mask);
473 		if (bootverbose)
474 			device_printf(priv->dev,
475 			    "Driver is running with GQI QPL queue format.\n");
476 	} else {
477 		device_printf(priv->dev, "No compatible queue formats\n");
478 		rc = EINVAL;
479 		goto free_device_descriptor;
480 	}
481 
482         priv->num_event_counters = be16toh(desc->counters);
483 	priv->default_num_queues = be16toh(desc->default_num_queues);
484 	priv->tx_desc_cnt = be16toh(desc->tx_queue_entries);
485 	priv->rx_desc_cnt = be16toh(desc->rx_queue_entries);
486 	priv->rx_pages_per_qpl = be16toh(desc->rx_pages_per_qpl);
487 	priv->max_registered_pages = be64toh(desc->max_registered_pages);
488 	priv->max_mtu = be16toh(desc->mtu);
489 	priv->default_num_queues = be16toh(desc->default_num_queues);
490 	priv->supported_features =  supported_features_mask;
491 
492 	gve_enable_supported_features(priv, supported_features_mask,
493 	    dev_op_jumbo_frames);
494 
495 	for (i = 0; i < ETHER_ADDR_LEN; i++)
496 		priv->mac[i] = desc->mac[i];
497 
498 free_device_descriptor:
499 	gve_dma_free_coherent(&desc_mem);
500 
501 	return (rc);
502 }
503 
504 int
gve_adminq_register_page_list(struct gve_priv * priv,struct gve_queue_page_list * qpl)505 gve_adminq_register_page_list(struct gve_priv *priv,
506     struct gve_queue_page_list *qpl)
507 {
508 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
509 	uint32_t num_entries = qpl->num_pages;
510 	uint32_t size = num_entries * sizeof(qpl->dmas[0].bus_addr);
511 	__be64 *page_list;
512 	struct gve_dma_handle dma;
513 	int err;
514 	int i;
515 
516 	err = gve_dma_alloc_coherent(priv, size, PAGE_SIZE, &dma);
517 	if (err != 0)
518 		return (ENOMEM);
519 
520 	page_list = dma.cpu_addr;
521 
522 	for (i = 0; i < num_entries; i++)
523 		page_list[i] = htobe64(qpl->dmas[i].bus_addr);
524 
525 	bus_dmamap_sync(dma.tag, dma.map, BUS_DMASYNC_PREWRITE);
526 
527 	cmd.opcode = htobe32(GVE_ADMINQ_REGISTER_PAGE_LIST);
528 	cmd.reg_page_list = (struct gve_adminq_register_page_list) {
529 		.page_list_id = htobe32(qpl->id),
530 		.num_pages = htobe32(num_entries),
531 		.page_address_list_addr = htobe64(dma.bus_addr),
532 		.page_size = htobe64(PAGE_SIZE),
533 	};
534 
535 	err = gve_adminq_execute_cmd(priv, &cmd);
536 	gve_dma_free_coherent(&dma);
537 	return (err);
538 }
539 
540 int
gve_adminq_unregister_page_list(struct gve_priv * priv,uint32_t page_list_id)541 gve_adminq_unregister_page_list(struct gve_priv *priv, uint32_t page_list_id)
542 {
543 	struct gve_adminq_command cmd = (struct gve_adminq_command){};
544 
545 	cmd.opcode = htobe32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
546 	cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
547 		.page_list_id = htobe32(page_list_id),
548 	};
549 
550 	return (gve_adminq_execute_cmd(priv, &cmd));
551 }
552 
553 #define GVE_NTFY_BLK_BASE_MSIX_IDX	0
554 int
gve_adminq_configure_device_resources(struct gve_priv * priv)555 gve_adminq_configure_device_resources(struct gve_priv *priv)
556 {
557 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
558 
559 	bus_dmamap_sync(priv->irqs_db_mem.tag, priv->irqs_db_mem.map,
560 	    BUS_DMASYNC_PREREAD);
561 	bus_dmamap_sync(priv->counter_array_mem.tag,
562 	    priv->counter_array_mem.map, BUS_DMASYNC_PREREAD);
563 
564 	aq_cmd.opcode = htobe32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
565 	aq_cmd.configure_device_resources =
566 	    (struct gve_adminq_configure_device_resources) {
567 		.counter_array = htobe64(priv->counter_array_mem.bus_addr),
568 		.irq_db_addr = htobe64(priv->irqs_db_mem.bus_addr),
569 		.num_counters = htobe32(priv->num_event_counters),
570 		.num_irq_dbs = htobe32(priv->num_queues),
571 		.irq_db_stride = htobe32(sizeof(struct gve_irq_db)),
572 		.ntfy_blk_msix_base_idx = htobe32(GVE_NTFY_BLK_BASE_MSIX_IDX),
573 		.queue_format = priv->queue_format,
574 	};
575 
576 	return (gve_adminq_execute_cmd(priv, &aq_cmd));
577 }
578 
579 int
gve_adminq_deconfigure_device_resources(struct gve_priv * priv)580 gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
581 {
582 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
583 
584 	aq_cmd.opcode = htobe32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
585 	return (gve_adminq_execute_cmd(priv, &aq_cmd));
586 }
587 
588 int
gve_adminq_verify_driver_compatibility(struct gve_priv * priv,uint64_t driver_info_len,vm_paddr_t driver_info_addr)589 gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
590     uint64_t driver_info_len,
591     vm_paddr_t driver_info_addr)
592 {
593 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
594 
595 	aq_cmd.opcode = htobe32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY);
596 	aq_cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) {
597 		.driver_info_len = htobe64(driver_info_len),
598 		.driver_info_addr = htobe64(driver_info_addr),
599 	};
600 
601 	return (gve_adminq_execute_cmd(priv, &aq_cmd));
602 }
603 
604 int
gve_adminq_get_ptype_map_dqo(struct gve_priv * priv,struct gve_ptype_lut * ptype_lut_dqo)605 gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
606     struct gve_ptype_lut *ptype_lut_dqo)
607 {
608 	struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
609 	struct gve_ptype_map *ptype_map;
610 	struct gve_dma_handle dma;
611 	int err = 0;
612 	int i;
613 
614 	err = gve_dma_alloc_coherent(priv, sizeof(*ptype_map), PAGE_SIZE, &dma);
615 	if (err)
616 		return (err);
617 	ptype_map = dma.cpu_addr;
618 
619 	aq_cmd.opcode = htobe32(GVE_ADMINQ_GET_PTYPE_MAP);
620 	aq_cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
621 		.ptype_map_len = htobe64(sizeof(*ptype_map)),
622 		.ptype_map_addr = htobe64(dma.bus_addr),
623 	};
624 
625 	err = gve_adminq_execute_cmd(priv, &aq_cmd);
626 	if (err)
627 		goto err;
628 
629 	/* Populate ptype_lut_dqo. */
630 	for (i = 0; i < GVE_NUM_PTYPES; i++) {
631 		ptype_lut_dqo->ptypes[i].l3_type = ptype_map->ptypes[i].l3_type;
632 		ptype_lut_dqo->ptypes[i].l4_type = ptype_map->ptypes[i].l4_type;
633 	}
634 err:
635 	gve_dma_free_coherent(&dma);
636 	return (err);
637 }
638 
639 int
gve_adminq_alloc(struct gve_priv * priv)640 gve_adminq_alloc(struct gve_priv *priv)
641 {
642 	int rc;
643 
644 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK))
645 		return (0);
646 
647 	if (priv->aq_mem.cpu_addr == NULL) {
648 		rc = gve_dma_alloc_coherent(priv, ADMINQ_SIZE, ADMINQ_SIZE,
649 		    &priv->aq_mem);
650 		if (rc != 0) {
651 			device_printf(priv->dev, "Failed to allocate admin queue mem\n");
652 			return (rc);
653 		}
654 	}
655 
656 	priv->adminq = priv->aq_mem.cpu_addr;
657 	priv->adminq_bus_addr = priv->aq_mem.bus_addr;
658 
659 	if (priv->adminq == NULL)
660 		return (ENOMEM);
661 
662 	priv->adminq_mask = ADMINQ_SLOTS - 1;
663 	priv->adminq_prod_cnt = 0;
664 	priv->adminq_cmd_fail = 0;
665 	priv->adminq_timeouts = 0;
666 	priv->adminq_describe_device_cnt = 0;
667 	priv->adminq_cfg_device_resources_cnt = 0;
668 	priv->adminq_register_page_list_cnt = 0;
669 	priv->adminq_unregister_page_list_cnt = 0;
670 	priv->adminq_create_tx_queue_cnt = 0;
671 	priv->adminq_create_rx_queue_cnt = 0;
672 	priv->adminq_destroy_tx_queue_cnt = 0;
673 	priv->adminq_destroy_rx_queue_cnt = 0;
674 	priv->adminq_dcfg_device_resources_cnt = 0;
675 	priv->adminq_set_driver_parameter_cnt = 0;
676 	priv->adminq_get_ptype_map_cnt = 0;
677 
678 	gve_reg_bar_write_4(priv, GVE_REG_ADMINQ_ADDR,
679 	    priv->adminq_bus_addr / ADMINQ_SIZE);
680 
681 	gve_set_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK);
682 	return (0);
683 }
684 
685 void
gve_release_adminq(struct gve_priv * priv)686 gve_release_adminq(struct gve_priv *priv)
687 {
688 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK))
689 		return;
690 
691 	gve_reg_bar_write_4(priv, GVE_REG_ADMINQ_ADDR, 0);
692 	while (gve_reg_bar_read_4(priv, GVE_REG_ADMINQ_ADDR)) {
693 		device_printf(priv->dev, "Waiting until admin queue is released.\n");
694 		pause("gve release adminq", GVE_ADMINQ_SLEEP_LEN_MS);
695 	}
696 
697 	gve_dma_free_coherent(&priv->aq_mem);
698 	priv->aq_mem = (struct gve_dma_handle){};
699 	priv->adminq = 0;
700 	priv->adminq_bus_addr = 0;
701 
702 	gve_clear_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK);
703 
704 	if (bootverbose)
705 		device_printf(priv->dev, "Admin queue released\n");
706 }
707 
708 static int
gve_adminq_parse_err(struct gve_priv * priv,uint32_t opcode,uint32_t status)709 gve_adminq_parse_err(struct gve_priv *priv, uint32_t opcode, uint32_t status)
710 {
711 	if (status != GVE_ADMINQ_COMMAND_PASSED &&
712 	    status != GVE_ADMINQ_COMMAND_UNSET) {
713 		device_printf(priv->dev, "AQ command(%u): failed with status %d\n", opcode, status);
714 		priv->adminq_cmd_fail++;
715 	}
716 	switch (status) {
717 	case GVE_ADMINQ_COMMAND_PASSED:
718 		return (0);
719 
720 	case GVE_ADMINQ_COMMAND_UNSET:
721 		device_printf(priv->dev,
722 		    "AQ command(%u): err and status both unset, this should not be possible.\n",
723 		    opcode);
724 		return (EINVAL);
725 
726 	case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
727 	case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
728 	case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
729 	case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
730 	case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
731 		return (EAGAIN);
732 
733 	case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
734 	case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
735 	case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
736 	case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
737 	case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
738 	case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
739 		return (EINVAL);
740 
741 	case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
742 		return (ETIMEDOUT);
743 
744 	case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
745 	case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
746 		return (EACCES);
747 
748 	case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
749 		return (ENOMEM);
750 
751 	case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
752 		return (EOPNOTSUPP);
753 
754 	default:
755 		device_printf(priv->dev, "AQ command(%u): unknown status code %d\n",
756 		    opcode, status);
757 		return (EINVAL);
758 	}
759 }
760 
761 static void
gve_adminq_kick_cmd(struct gve_priv * priv,uint32_t prod_cnt)762 gve_adminq_kick_cmd(struct gve_priv *priv, uint32_t prod_cnt)
763 {
764 	gve_reg_bar_write_4(priv, ADMINQ_DOORBELL, prod_cnt);
765 
766 }
767 
768 static bool
gve_adminq_wait_for_cmd(struct gve_priv * priv,uint32_t prod_cnt)769 gve_adminq_wait_for_cmd(struct gve_priv *priv, uint32_t prod_cnt)
770 {
771 	int i;
772 
773 	for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
774 		if (gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER) == prod_cnt)
775 			return (true);
776 		pause("gve adminq cmd", GVE_ADMINQ_SLEEP_LEN_MS);
777 	}
778 
779 	return (false);
780 }
781 
782 /*
783  * Flushes all AQ commands currently queued and waits for them to complete.
784  * If there are failures, it will return the first error.
785  */
786 static int
gve_adminq_kick_and_wait(struct gve_priv * priv)787 gve_adminq_kick_and_wait(struct gve_priv *priv)
788 {
789 	struct gve_adminq_command *cmd;
790 	uint32_t status, err;
791 	uint32_t tail, head;
792 	uint32_t opcode;
793 	int i;
794 
795 	tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
796 	head = priv->adminq_prod_cnt;
797 
798 	gve_adminq_kick_cmd(priv, head);
799 	if (!gve_adminq_wait_for_cmd(priv, head)) {
800 		device_printf(priv->dev, "AQ commands timed out, need to reset AQ\n");
801 		priv->adminq_timeouts++;
802 		return (ENOTRECOVERABLE);
803 	}
804 	bus_dmamap_sync(
805 	    priv->aq_mem.tag, priv->aq_mem.map, BUS_DMASYNC_POSTREAD);
806 
807 	for (i = tail; i < head; i++) {
808 		cmd = &priv->adminq[i & priv->adminq_mask];
809 		status = be32toh(cmd->status);
810 		opcode = be32toh(cmd->opcode);
811 		err = gve_adminq_parse_err(priv, opcode, status);
812 		if (err != 0)
813 			return (err);
814 	}
815 
816 	return (0);
817 }
818 
819 /*
820  * This function is not threadsafe - the caller is responsible for any
821  * necessary locks.
822  */
823 static int
gve_adminq_issue_cmd(struct gve_priv * priv,struct gve_adminq_command * cmd_orig)824 gve_adminq_issue_cmd(struct gve_priv *priv, struct gve_adminq_command *cmd_orig)
825 {
826 	struct gve_adminq_command *cmd;
827 	uint32_t opcode;
828 	uint32_t tail;
829 	int err;
830 
831 	tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
832 
833 	/* Check if next command will overflow the buffer. */
834 	if ((priv->adminq_prod_cnt - tail) > priv->adminq_mask) {
835 		/* Flush existing commands to make room. */
836 		err = gve_adminq_kick_and_wait(priv);
837 		if (err != 0)
838 			return (err);
839 
840 		/* Retry. */
841 		tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
842 		if ((priv->adminq_prod_cnt - tail) > priv->adminq_mask) {
843 			/*
844 			 * This should never happen. We just flushed the
845 			 * command queue so there should be enough space.
846                          */
847 			return (ENOMEM);
848 		}
849 	}
850 
851 	cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
852 	priv->adminq_prod_cnt++;
853 
854 	memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
855 
856 	bus_dmamap_sync(
857 	    priv->aq_mem.tag, priv->aq_mem.map, BUS_DMASYNC_PREWRITE);
858 
859 	opcode = be32toh(cmd->opcode);
860 
861 	switch (opcode) {
862 	case GVE_ADMINQ_DESCRIBE_DEVICE:
863 		priv->adminq_describe_device_cnt++;
864 		break;
865 
866 	case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
867 		priv->adminq_cfg_device_resources_cnt++;
868 		break;
869 
870 	case GVE_ADMINQ_REGISTER_PAGE_LIST:
871 		priv->adminq_register_page_list_cnt++;
872 		break;
873 
874 	case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
875 		priv->adminq_unregister_page_list_cnt++;
876 		break;
877 
878 	case GVE_ADMINQ_CREATE_TX_QUEUE:
879 		priv->adminq_create_tx_queue_cnt++;
880 		break;
881 
882 	case GVE_ADMINQ_CREATE_RX_QUEUE:
883 		priv->adminq_create_rx_queue_cnt++;
884 		break;
885 
886 	case GVE_ADMINQ_DESTROY_TX_QUEUE:
887 		priv->adminq_destroy_tx_queue_cnt++;
888 		break;
889 
890 	case GVE_ADMINQ_DESTROY_RX_QUEUE:
891 		priv->adminq_destroy_rx_queue_cnt++;
892 		break;
893 
894 	case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
895 		priv->adminq_dcfg_device_resources_cnt++;
896 		break;
897 
898 	case GVE_ADMINQ_SET_DRIVER_PARAMETER:
899 		priv->adminq_set_driver_parameter_cnt++;
900 		break;
901 
902 	case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
903 		priv->adminq_verify_driver_compatibility_cnt++;
904 		break;
905 
906 	case GVE_ADMINQ_GET_PTYPE_MAP:
907 		priv->adminq_get_ptype_map_cnt++;
908 		break;
909 
910 	default:
911 		device_printf(priv->dev, "Unknown AQ command opcode %d\n", opcode);
912 	}
913 
914 	return (0);
915 }
916 
917 /*
918  * This function is not threadsafe - the caller is responsible for any
919  * necessary locks.
920  * The caller is also responsible for making sure there are no commands
921  * waiting to be executed.
922  */
923 static int
gve_adminq_execute_cmd(struct gve_priv * priv,struct gve_adminq_command * cmd_orig)924 gve_adminq_execute_cmd(struct gve_priv *priv, struct gve_adminq_command *cmd_orig)
925 {
926 	uint32_t tail, head;
927 	int err;
928 
929 	tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
930 	head = priv->adminq_prod_cnt;
931 
932 	if (tail != head)
933 		return (EINVAL);
934 	err = gve_adminq_issue_cmd(priv, cmd_orig);
935 	if (err != 0)
936 		return (err);
937 	return (gve_adminq_kick_and_wait(priv));
938 }
939