xref: /illumos-gate/usr/src/uts/common/io/ena/ena_admin.c (revision e00bdde3c6d406f40f53f3025defadc22f7ec31a)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2024 Oxide Computer Company
14  */
15 
16 /*
17  * This file contains everything having to do with communicating with
18  * the admin queue for sending commands to the device.
19  */
20 
21 #include "ena_hw.h"
22 #include "ena.h"
23 
24 /*
25  * Mark the context as complete (a response has been received).
26  */
27 static void
28 ena_complete_cmd_ctx(ena_cmd_ctx_t *ctx, enahw_resp_desc_t *hwresp)
29 {
30 	bcopy(hwresp, ctx->ectx_resp, sizeof (*hwresp));
31 	ctx->ectx_pending = B_FALSE;
32 }
33 
34 /*
35  * Reset and release the context back to the free list.
36  */
37 static void
38 ena_release_cmd_ctx(ena_t *ena, ena_cmd_ctx_t *ctx)
39 {
40 	ASSERT(ctx->ectx_pending == B_FALSE);
41 	ctx->ectx_resp = NULL;
42 	ctx->ectx_cmd_opcode = ENAHW_CMD_NONE;
43 
44 	mutex_enter(&ena->ena_aq.ea_sq_lock);
45 	/*
46 	 * We return the free descriptor to the end of the list so that we
47 	 * cycle through them with each admin command, and don't end up almost
48 	 * always re-using the same entry with the same command ID. While the
49 	 * controller does not appear to mind, it's a little counterintuitive.
50 	 */
51 	list_insert_tail(&ena->ena_aq.ea_cmd_ctxs_free, ctx);
52 	ena->ena_aq.ea_pending_cmds--;
53 	mutex_exit(&ena->ena_aq.ea_sq_lock);
54 }
55 
56 /*
57  * Acquire the next available command context.
58  */
59 static ena_cmd_ctx_t *
60 ena_acquire_cmd_ctx(ena_adminq_t *aq)
61 {
62 	VERIFY(MUTEX_HELD(&aq->ea_sq_lock));
63 	ASSERT3U(aq->ea_pending_cmds, <, aq->ea_qlen);
64 	ena_cmd_ctx_t *ctx = list_remove_head(&aq->ea_cmd_ctxs_free);
65 
66 	ctx->ectx_pending = B_TRUE;
67 	return (ctx);
68 }
69 
70 /*
71  * Submit a command to the admin queue.
72  */
73 int
74 ena_admin_submit_cmd(ena_t *ena, enahw_cmd_desc_t *cmd, enahw_resp_desc_t *resp,
75     ena_cmd_ctx_t **ctx)
76 {
77 	VERIFY3U(cmd->ecd_opcode, !=, 0);
78 	ena_adminq_t *aq = &ena->ena_aq;
79 	ena_admin_sq_t *sq = &aq->ea_sq;
80 	uint16_t modulo_mask = aq->ea_qlen - 1;
81 	ena_cmd_ctx_t *lctx = NULL;
82 
83 	mutex_enter(&aq->ea_sq_lock);
84 	uint16_t tail_mod = sq->eas_tail & modulo_mask;
85 
86 	if (aq->ea_pending_cmds >= aq->ea_qlen) {
87 		mutex_enter(&aq->ea_stat_lock);
88 		aq->ea_stats.queue_full++;
89 		mutex_exit(&aq->ea_stat_lock);
90 		mutex_exit(&aq->ea_sq_lock);
91 		return (ENOSPC);
92 	}
93 
94 	lctx = ena_acquire_cmd_ctx(aq);
95 	lctx->ectx_cmd_opcode = cmd->ecd_opcode;
96 	lctx->ectx_resp = resp;
97 
98 	cmd->ecd_flags = sq->eas_phase & ENAHW_CMD_PHASE_MASK;
99 	ENAHW_CMD_ID(cmd, lctx->ectx_id);
100 	bcopy(cmd, &sq->eas_entries[tail_mod], sizeof (*cmd));
101 	ENA_DMA_SYNC(sq->eas_dma, DDI_DMA_SYNC_FORDEV);
102 	sq->eas_tail++;
103 	aq->ea_pending_cmds++;
104 
105 	mutex_enter(&aq->ea_stat_lock);
106 	aq->ea_stats.cmds_submitted++;
107 	mutex_exit(&aq->ea_stat_lock);
108 
109 	DTRACE_PROBE4(cmd__submit, enahw_cmd_desc_t *, cmd, ena_cmd_ctx_t *,
110 	    lctx, uint16_t, tail_mod, uint8_t, sq->eas_phase);
111 
112 	if ((sq->eas_tail & modulo_mask) == 0) {
113 		sq->eas_phase ^= 1;
114 	}
115 
116 	ena_hw_abs_write32(ena, sq->eas_dbaddr, sq->eas_tail);
117 	mutex_exit(&aq->ea_sq_lock);
118 	*ctx = lctx;
119 	return (0);
120 }
121 
122 /*
123  * Read a single response from the admin queue.
124  */
125 static void
126 ena_admin_read_resp(ena_t *ena, enahw_resp_desc_t *hwresp)
127 {
128 	ena_adminq_t *aq = &ena->ena_aq;
129 	ena_admin_cq_t *cq = &aq->ea_cq;
130 	ena_cmd_ctx_t *ctx = NULL;
131 	uint16_t modulo_mask = aq->ea_qlen - 1;
132 	VERIFY(MUTEX_HELD(&aq->ea_cq_lock));
133 
134 	uint16_t head_mod = cq->eac_head & modulo_mask;
135 	uint8_t phase = cq->eac_phase & ENAHW_RESP_PHASE_MASK;
136 	uint16_t cmd_id = ENAHW_RESP_CMD_ID(hwresp);
137 	ctx = &aq->ea_cmd_ctxs[cmd_id];
138 	ASSERT3U(ctx->ectx_id, ==, cmd_id);
139 	ena_complete_cmd_ctx(ctx, hwresp);
140 
141 	if (hwresp->erd_status != ENAHW_RESP_SUCCESS) {
142 		mutex_enter(&aq->ea_stat_lock);
143 		aq->ea_stats.cmds_fail++;
144 		mutex_exit(&aq->ea_stat_lock);
145 		DTRACE_PROBE4(cmd__fail, enahw_resp_desc_t *, hwresp,
146 		    ena_cmd_ctx_t *, ctx, uint16_t, head_mod, uint8_t, phase);
147 		return;
148 	}
149 
150 	DTRACE_PROBE4(cmd__success, enahw_resp_desc_t *, hwresp,
151 	    ena_cmd_ctx_t *, ctx, uint16_t, head_mod, uint8_t, phase);
152 	mutex_enter(&aq->ea_stat_lock);
153 	aq->ea_stats.cmds_success++;
154 	mutex_exit(&aq->ea_stat_lock);
155 }
156 
157 static void
158 ena_admin_process_responses(ena_t *ena)
159 {
160 	ena_adminq_t *aq = &ena->ena_aq;
161 	ena_admin_cq_t *cq = &aq->ea_cq;
162 	uint16_t modulo_mask = aq->ea_qlen - 1;
163 	enahw_resp_desc_t *hwresp;
164 
165 	mutex_enter(&aq->ea_cq_lock);
166 	uint16_t head_mod = cq->eac_head & modulo_mask;
167 	uint8_t phase = cq->eac_phase & ENAHW_RESP_PHASE_MASK;
168 
169 	ENA_DMA_SYNC(cq->eac_dma, DDI_DMA_SYNC_FORKERNEL);
170 	hwresp = &cq->eac_entries[head_mod];
171 	while ((hwresp->erd_flags & ENAHW_RESP_PHASE_MASK) == phase) {
172 		ena_admin_read_resp(ena, hwresp);
173 
174 		cq->eac_head++;
175 		head_mod = cq->eac_head & modulo_mask;
176 
177 		if (head_mod == 0) {
178 			phase ^= 1;
179 		}
180 
181 		hwresp = &cq->eac_entries[head_mod];
182 	}
183 
184 	cq->eac_phase = phase;
185 	mutex_exit(&aq->ea_cq_lock);
186 }
187 
188 /*
189  * Wait for the command described by ctx to complete by polling for
190  * status updates.
191  */
192 int
193 ena_admin_poll_for_resp(ena_t *ena, ena_cmd_ctx_t *ctx)
194 {
195 	int ret = 0;
196 	hrtime_t expire = gethrtime() + ena->ena_aq.ea_cmd_timeout_ns;
197 
198 	while (1) {
199 		ena_admin_process_responses(ena);
200 
201 		if (!ctx->ectx_pending) {
202 			break;
203 		}
204 
205 		/* Wait for 1 millisecond. */
206 		delay(drv_usectohz(1000));
207 
208 		if (gethrtime() > expire) {
209 			/*
210 			 * We have no visibility into the device to
211 			 * confirm it is making progress on this
212 			 * command. At this point the driver and
213 			 * device cannot agree on the state of the
214 			 * world: perhaps the device is still making
215 			 * progress but not fast enough, perhaps the
216 			 * device completed the command but there was
217 			 * a failure to deliver the reply, perhaps the
218 			 * command failed but once again the reply was
219 			 * not delivered. With this unknown state the
220 			 * best thing to do is to reset the device and
221 			 * start from scratch. But as we don't have
222 			 * that capability at the moment the next best
223 			 * thing to do is to spin or panic; we choose
224 			 * to panic.
225 			 */
226 			dev_err(ena->ena_dip, CE_PANIC,
227 			    "timed out waiting for admin response");
228 		}
229 	}
230 
231 	ret = enahw_resp_status_to_errno(ena, ctx->ectx_resp->erd_status);
232 	ena_release_cmd_ctx(ena, ctx);
233 	return (ret);
234 }
235 
236 void
237 ena_free_host_info(ena_t *ena)
238 {
239 	ena_dma_free(&ena->ena_host_info);
240 }
241 
242 boolean_t
243 ena_init_host_info(ena_t *ena)
244 {
245 	enahw_host_info_t *ehi;
246 	int ret = 0;
247 	int *regs;
248 	uint_t nregs;
249 	ena_dma_buf_t *hi_dma;
250 	enahw_cmd_desc_t cmd;
251 	enahw_feat_host_attr_t *ha_cmd =
252 	    &cmd.ecd_cmd.ecd_set_feat.ecsf_feat.ecsf_host_attr;
253 	enahw_resp_desc_t resp;
254 	ena_dma_conf_t conf = {
255 		.edc_size = ENAHW_HOST_INFO_ALLOC_SZ,
256 		.edc_align = ENAHW_HOST_INFO_ALIGNMENT,
257 		.edc_sgl = 1,
258 		.edc_endian = DDI_NEVERSWAP_ACC,
259 		.edc_stream = B_FALSE,
260 	};
261 
262 	hi_dma = &ena->ena_host_info;
263 
264 	if (!ena_dma_alloc(ena, hi_dma, &conf, 4096)) {
265 		ena_err(ena, "failed to allocate DMA for host info");
266 		return (B_FALSE);
267 	}
268 
269 	ehi = (void *)hi_dma->edb_va;
270 	ehi->ehi_ena_spec_version =
271 	    ((ENA_SPEC_VERSION_MAJOR << ENAHW_HOST_INFO_SPEC_MAJOR_SHIFT) |
272 	    (ENA_SPEC_VERSION_MINOR));
273 
274 	ehi->ehi_bdf = 0;
275 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, ena->ena_dip,
276 	    DDI_PROP_DONTPASS, "reg", &regs, &nregs) == DDI_PROP_SUCCESS) {
277 		if (nregs != 0) {
278 			ehi->ehi_bdf |= PCI_REG_BUS_G(regs[0]) << 8;
279 			ehi->ehi_bdf |= PCI_REG_DEV_G(regs[0]) << 3;
280 			ehi->ehi_bdf |= PCI_REG_FUNC_G(regs[0]);
281 		}
282 
283 		ddi_prop_free(regs);
284 	}
285 
286 	/*
287 	 * There is no illumos OS type, it would be nice to ping
288 	 * someone at Amazon and see if we can't get one added.
289 	 */
290 	ehi->ehi_os_type = ENAHW_OS_FREEBSD;
291 	ehi->ehi_kernel_ver = 511; /* If you know you know */
292 	(void) strlcpy((char *)ehi->ehi_kernel_ver_str, utsname.version,
293 	    sizeof (ehi->ehi_kernel_ver_str));
294 	ehi->ehi_os_dist = 0;	/* What everyone else does. */
295 	ehi->ehi_driver_ver =
296 	    (ENA_MODULE_VER_MAJOR) |
297 	    (ENA_MODULE_VER_MINOR << ENAHW_HOST_INFO_MINOR_SHIFT) |
298 	    (ENA_MODULE_VER_SUBMINOR << ENAHW_HOST_INFO_SUB_MINOR_SHIFT);
299 	ehi->ehi_num_cpus = ncpus_online;
300 
301 	/*
302 	 * ENA devices are not created equal. Some will support
303 	 * features not found in others. This field tells the device
304 	 * which features the driver supports.
305 	 *
306 	 * ENAHW_HOST_INFO_RX_OFFSET
307 	 *
308 	 *    Some ENA devices will write the frame data at an offset
309 	 *    in the buffer, presumably for alignment purposes. We
310 	 *    support this feature for the sole reason that the Linux
311 	 *    driver does as well.
312 	 *
313 	 * ENAHW_HOST_INFO_INTERRUPT_MODERATION
314 	 *
315 	 *    Based on the Linux history this flag indicates that the
316 	 *    driver "supports interrupt moderation properly". What
317 	 *    that means is anyone's guess. The Linux driver seems to
318 	 *    have some "adaptive" interrupt moderation, so perhaps
319 	 *    it's that? In any case, FreeBSD doesn't bother with
320 	 *    setting this flag, so we'll leave it be for now as well.
321 	 *
322 	 *    If you're curious to know if the device supports
323 	 *    interrupt moderation: the FEAT_INTERRUPT_MODERATION flag
324 	 *    will be set in ena_hw.eh_supported_features.
325 	 *
326 	 * ENAHW_HOST_INFO_RX_BUF_MIRRORING
327 	 *
328 	 *    Support traffic mirroring by allowing the hypervisor to
329 	 *    read the buffer memory directly. This probably has to do
330 	 *    with AWS flow logs, allowing more efficient mirroring.
331 	 *    But it's hard to say for sure given we only have the
332 	 *    Linux commit log to go off of. In any case, the only
333 	 *    requirement for this feature is that the Rx DMA buffers
334 	 *    be read/write, which they are.
335 	 *
336 	 * ENAHW_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY
337 	 *
338 	 *    The device supports the retrieving and updating of the
339 	 *    RSS function and hash key. As we don't yet implement RSS
340 	 *    this is disabled.
341 	 */
342 	ehi->ehi_driver_supported_features =
343 	    ENAHW_HOST_INFO_RX_OFFSET_MASK |
344 	    ENAHW_HOST_INFO_RX_BUF_MIRRORING_MASK;
345 
346 	ENA_DMA_SYNC(*hi_dma, DDI_DMA_SYNC_FORDEV);
347 	bzero(&cmd, sizeof (cmd));
348 	ena_set_dma_addr(ena, hi_dma->edb_cookie->dmac_laddress,
349 	    &ha_cmd->efha_os_addr);
350 
351 	/*
352 	 * You might notice the "debug area" is not allocated or
353 	 * configured, that is on purpose.
354 	 *
355 	 * The "debug area" is a region of host memory that contains
356 	 * the String Set (SS) tables used to report statistics to
357 	 * tools like ethtool (on Linux). This table consists of one
358 	 * of more entries of a 32-byte string (the name of the
359 	 * statistic) along with its associated 64-bit value. The
360 	 * stats reported here contain both the host-side stats as
361 	 * well as device-reported stats (ENAHW_GET_STATS_TYPE_ENI). I
362 	 * believe the reason for calling it the "debug area" is that
363 	 * it can be accessed from outside of the guest, allowing an
364 	 * AWS user (?) or Amazon employee to get basic information
365 	 * about the state of the device from the guest's point of
366 	 * view.
367 	 *
368 	 * In the fullness of time, our driver should probably support
369 	 * this aspect of ENA. For the time being, all testing
370 	 * indicates the driver and device function fine without it.
371 	 */
372 
373 	ret = ena_set_feature(ena, &cmd, &resp, ENAHW_FEAT_HOST_ATTR_CONFIG,
374 	    ENAHW_FEAT_HOST_ATTR_CONFIG_VER);
375 	if (ret != 0) {
376 		ena_err(ena, "failed to set host attributes: %d", ret);
377 		ena_dma_free(hi_dma);
378 		return (B_FALSE);
379 	}
380 
381 	return (B_TRUE);
382 }
383 
384 int
385 ena_create_cq(ena_t *ena, uint16_t num_descs, uint64_t phys_addr,
386     boolean_t is_tx, uint32_t vector, uint16_t *hw_index,
387     uint32_t **unmask_addr, uint32_t **numanode)
388 {
389 	int ret;
390 	enahw_cmd_desc_t cmd;
391 	enahw_cmd_create_cq_t *cmd_cq = &cmd.ecd_cmd.ecd_create_cq;
392 	enahw_resp_desc_t resp;
393 	enahw_resp_create_cq_t *resp_cq = &resp.erd_resp.erd_create_cq;
394 	ena_cmd_ctx_t *ctx = NULL;
395 	uint8_t desc_size = is_tx ? sizeof (enahw_tx_cdesc_t) :
396 	    sizeof (enahw_rx_cdesc_t);
397 
398 	bzero(&cmd, sizeof (cmd));
399 	bzero(&resp, sizeof (resp));
400 
401 	cmd.ecd_opcode = ENAHW_CMD_CREATE_CQ;
402 	ENAHW_CMD_CREATE_CQ_INTERRUPT_MODE_ENABLE(cmd_cq);
403 	ASSERT3U(desc_size % 4, ==, 0);
404 	ENAHW_CMD_CREATE_CQ_DESC_SIZE_WORDS(cmd_cq, desc_size / 4);
405 	cmd_cq->ecq_num_descs = num_descs;
406 	cmd_cq->ecq_msix_vector = vector;
407 	ena_set_dma_addr(ena, phys_addr, &cmd_cq->ecq_addr);
408 
409 	if ((ret = ena_admin_submit_cmd(ena, &cmd, &resp, &ctx)) != 0) {
410 		ena_err(ena, "failed to submit Create CQ command: %d", ret);
411 		return (ret);
412 	}
413 
414 	if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) {
415 		ena_err(ena, "failed to Create CQ: %d", ret);
416 		return (ret);
417 	}
418 
419 	*hw_index = resp_cq->ercq_idx;
420 	*unmask_addr = (uint32_t *)(ena->ena_reg_base +
421 	    resp_cq->ercq_interrupt_mask_reg_offset);
422 
423 	if (resp_cq->ercq_numa_node_reg_offset != 0) {
424 		*numanode = (uint32_t *)(ena->ena_reg_base +
425 		    resp_cq->ercq_numa_node_reg_offset);
426 	} else {
427 		*numanode = NULL;
428 	}
429 
430 	/*
431 	 * The CQ head doorbell register is no longer supported by any
432 	 * existing adapter hardware.
433 	 */
434 	VERIFY0(resp_cq->ercq_head_db_reg_offset);
435 
436 	return (0);
437 }
438 
439 int
440 ena_destroy_cq(ena_t *ena, uint16_t hw_idx)
441 {
442 	enahw_cmd_desc_t cmd;
443 	enahw_resp_desc_t resp;
444 	ena_cmd_ctx_t *ctx = NULL;
445 	int ret;
446 
447 	bzero(&cmd, sizeof (cmd));
448 	bzero(&resp, sizeof (resp));
449 	cmd.ecd_opcode = ENAHW_CMD_DESTROY_CQ;
450 	cmd.ecd_cmd.ecd_destroy_cq.edcq_idx = hw_idx;
451 
452 	if ((ret = ena_admin_submit_cmd(ena, &cmd, &resp, &ctx)) != 0) {
453 		ena_err(ena, "failed to submit Destroy CQ command: %d", ret);
454 		return (ret);
455 	}
456 
457 	if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) {
458 		ena_err(ena, "failed to Destroy CQ: %d", ret);
459 		return (ret);
460 	}
461 
462 	return (0);
463 }
464 
465 int
466 ena_create_sq(ena_t *ena, uint16_t num_descs, uint64_t phys_addr,
467     boolean_t is_tx, uint16_t cq_index, uint16_t *hw_index, uint32_t **db_addr)
468 {
469 	int ret;
470 	enahw_cmd_desc_t cmd;
471 	enahw_cmd_create_sq_t *cmd_sq = &cmd.ecd_cmd.ecd_create_sq;
472 	enahw_resp_desc_t resp;
473 	enahw_resp_create_sq_t *resp_sq = &resp.erd_resp.erd_create_sq;
474 	enahw_sq_direction_t dir =
475 	    is_tx ? ENAHW_SQ_DIRECTION_TX : ENAHW_SQ_DIRECTION_RX;
476 	ena_cmd_ctx_t *ctx = NULL;
477 
478 	if (!ISP2(num_descs)) {
479 		ena_err(ena, "the number of descs must be a power of 2, but "
480 		    " is %d", num_descs);
481 		return (B_FALSE);
482 	}
483 
484 	bzero(&cmd, sizeof (cmd));
485 	bzero(&resp, sizeof (resp));
486 	cmd.ecd_opcode = ENAHW_CMD_CREATE_SQ;
487 	ENAHW_CMD_CREATE_SQ_DIR(cmd_sq, dir);
488 	ENAHW_CMD_CREATE_SQ_PLACEMENT_POLICY(cmd_sq,
489 	    ENAHW_PLACEMENT_POLICY_HOST);
490 	ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY(cmd_sq,
491 	    ENAHW_COMPLETION_POLICY_DESC);
492 	/*
493 	 * We limit all SQ descriptor rings to an SGL of 1, therefore
494 	 * they are always physically contiguous.
495 	 */
496 	ENAHW_CMD_CREATE_SQ_PHYSMEM_CONTIG(cmd_sq);
497 	cmd_sq->ecsq_cq_idx = cq_index;
498 	cmd_sq->ecsq_num_descs = num_descs;
499 
500 	/*
501 	 * If we ever use a non-host placement policy, then guard this
502 	 * code against placement type (this value should not be set
503 	 * for device placement).
504 	 */
505 	ena_set_dma_addr(ena, phys_addr, &cmd_sq->ecsq_base);
506 
507 	if ((ret = ena_admin_submit_cmd(ena, &cmd, &resp, &ctx)) != 0) {
508 		ena_err(ena, "failed to submit Create SQ command: %d", ret);
509 		return (ret);
510 	}
511 
512 	if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) {
513 		ena_err(ena, "failed to Create SQ: %d", ret);
514 		return (ret);
515 	}
516 
517 	*hw_index = resp_sq->ersq_idx;
518 	*db_addr = (uint32_t *)(ena->ena_reg_base +
519 	    resp_sq->ersq_db_reg_offset);
520 	return (0);
521 }
522 
523 int
524 ena_destroy_sq(ena_t *ena, uint16_t hw_idx, boolean_t is_tx)
525 {
526 	enahw_cmd_desc_t cmd;
527 	enahw_cmd_destroy_sq_t *cmd_sq = &cmd.ecd_cmd.ecd_destroy_sq;
528 	enahw_sq_direction_t dir =
529 	    is_tx ? ENAHW_SQ_DIRECTION_TX : ENAHW_SQ_DIRECTION_RX;
530 	enahw_resp_desc_t resp;
531 	ena_cmd_ctx_t *ctx = NULL;
532 	int ret;
533 
534 	bzero(&cmd, sizeof (cmd));
535 	bzero(&resp, sizeof (resp));
536 	cmd.ecd_opcode = ENAHW_CMD_DESTROY_SQ;
537 	cmd_sq->edsq_idx = hw_idx;
538 	ENAHW_CMD_DESTROY_SQ_DIR(cmd_sq, dir);
539 
540 	if ((ret = ena_admin_submit_cmd(ena, &cmd, &resp, &ctx)) != 0) {
541 		ena_err(ena, "failed to submit Destroy SQ command: %d", ret);
542 		return (ret);
543 	}
544 
545 	if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) {
546 		ena_err(ena, "failed Destroy SQ: %d", ret);
547 		return (ret);
548 	}
549 
550 	return (0);
551 }
552 
553 /*
554  * Determine if a given feature is available on this device.
555  */
556 static boolean_t
557 ena_is_feature_avail(ena_t *ena, const enahw_feature_id_t feat_id)
558 {
559 	VERIFY3U(feat_id, <=, ENAHW_FEAT_NUM);
560 	uint32_t mask = 1U << feat_id;
561 
562 	/*
563 	 * The device attributes feature is always supported, as
564 	 * indicated by the common code.
565 	 */
566 	if (feat_id == ENAHW_FEAT_DEVICE_ATTRIBUTES) {
567 		return (B_TRUE);
568 	}
569 
570 	return ((ena->ena_supported_features & mask) != 0);
571 }
572 
573 int
574 ena_set_feature(ena_t *ena, enahw_cmd_desc_t *cmd, enahw_resp_desc_t *resp,
575     const enahw_feature_id_t feat_id, const uint8_t feat_ver)
576 {
577 	enahw_cmd_set_feat_t *cmd_sf = &cmd->ecd_cmd.ecd_set_feat;
578 	ena_cmd_ctx_t *ctx = NULL;
579 	int ret = 0;
580 
581 	if (!ena_is_feature_avail(ena, feat_id)) {
582 		ena_err(ena, "attempted to set unsupported feature: 0x%x %d"
583 		    " (0x%x)", feat_id, feat_ver, ena->ena_supported_features);
584 		return (ENOTSUP);
585 	}
586 
587 	cmd->ecd_opcode = ENAHW_CMD_SET_FEATURE;
588 	cmd_sf->ecsf_comm.efc_id = feat_id;
589 	cmd_sf->ecsf_comm.efc_version = feat_ver;
590 	cmd_sf->ecsf_comm.efc_flags = 0;
591 
592 	if ((ret = ena_admin_submit_cmd(ena, cmd, resp, &ctx)) != 0) {
593 		ena_err(ena, "failed to submit Set Feature command: %d", ret);
594 		return (ret);
595 	}
596 
597 	return (ena_admin_poll_for_resp(ena, ctx));
598 }
599 
600 int
601 ena_get_feature(ena_t *ena, enahw_resp_desc_t *resp,
602     const enahw_feature_id_t feat_id, const uint8_t feat_ver)
603 {
604 	enahw_cmd_desc_t cmd;
605 	enahw_cmd_get_feat_t *cmd_gf = &cmd.ecd_cmd.ecd_get_feat;
606 	ena_cmd_ctx_t *ctx = NULL;
607 	int ret = 0;
608 
609 	if (!ena_is_feature_avail(ena, feat_id)) {
610 		return (ENOTSUP);
611 	}
612 
613 	bzero(&cmd, sizeof (cmd));
614 	cmd.ecd_opcode = ENAHW_CMD_GET_FEATURE;
615 	cmd_gf->ecgf_comm.efc_id = feat_id;
616 	cmd_gf->ecgf_comm.efc_version = feat_ver;
617 	ENAHW_GET_FEAT_FLAGS_GET_CURR_VAL(cmd_gf);
618 
619 	if ((ret = ena_admin_submit_cmd(ena, &cmd, resp, &ctx)) != 0) {
620 		ena_err(ena, "failed to submit Get Feature command: %d", ret);
621 		return (ret);
622 	}
623 
624 	return (ena_admin_poll_for_resp(ena, ctx));
625 }
626 
627 int
628 ena_admin_get_basic_stats(ena_t *ena, enahw_resp_desc_t *resp)
629 {
630 	int ret = 0;
631 	enahw_cmd_desc_t cmd;
632 	enahw_cmd_get_stats_t *cmd_stats = &cmd.ecd_cmd.ecd_get_stats;
633 	ena_cmd_ctx_t *ctx = NULL;
634 
635 	bzero(&cmd, sizeof (cmd));
636 	bzero(resp, sizeof (*resp));
637 	cmd.ecd_opcode = ENAHW_CMD_GET_STATS;
638 	cmd_stats->ecgs_type = ENAHW_GET_STATS_TYPE_BASIC;
639 	cmd_stats->ecgs_scope = ENAHW_GET_STATS_SCOPE_ETH;
640 	cmd_stats->ecgs_device_id = ENAHW_CMD_GET_STATS_MY_DEVICE_ID;
641 
642 	if ((ret = ena_admin_submit_cmd(ena, &cmd, resp, &ctx)) != 0) {
643 		ena_err(ena, "failed to submit Get Basic Stats command: %d",
644 		    ret);
645 		return (ret);
646 	}
647 
648 	if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) {
649 		ena_err(ena, "failed to Get Basic Stats: %d", ret);
650 		return (ret);
651 	}
652 
653 	return (0);
654 }
655 
656 int
657 ena_admin_get_eni_stats(ena_t *ena, enahw_resp_desc_t *resp)
658 {
659 	int ret = 0;
660 	enahw_cmd_desc_t cmd;
661 	enahw_cmd_get_stats_t *cmd_stats = &cmd.ecd_cmd.ecd_get_stats;
662 	ena_cmd_ctx_t *ctx = NULL;
663 
664 	bzero(&cmd, sizeof (cmd));
665 	bzero(resp, sizeof (*resp));
666 	cmd.ecd_opcode = ENAHW_CMD_GET_STATS;
667 	cmd_stats->ecgs_type = ENAHW_GET_STATS_TYPE_ENI;
668 	cmd_stats->ecgs_scope = ENAHW_GET_STATS_SCOPE_ETH;
669 	cmd_stats->ecgs_device_id = ENAHW_CMD_GET_STATS_MY_DEVICE_ID;
670 
671 	if ((ret = ena_admin_submit_cmd(ena, &cmd, resp, &ctx)) != 0) {
672 		ena_err(ena, "failed to submit Get ENI Stats command: %d", ret);
673 		return (ret);
674 	}
675 
676 	if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) {
677 		ena_err(ena, "failed to Get ENI Stats: %d", ret);
678 		return (ret);
679 	}
680 
681 	return (0);
682 }
683