xref: /illumos-gate/usr/src/uts/common/io/ena/ena_admin.c (revision cdd3e9a818787b4def17c9f707f435885ce0ed31)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2024 Oxide Computer Company
14  */
15 
16 /*
17  * This file contains everything having to do with communicating with
18  * the admin queue for sending commands to the device.
19  */
20 
21 #include "ena_hw.h"
22 #include "ena.h"
23 
24 /*
25  * Mark the context as complete (a response has been received).
26  */
27 static void
28 ena_complete_cmd_ctx(ena_cmd_ctx_t *ctx, enahw_resp_desc_t *hwresp)
29 {
30 	bcopy(hwresp, ctx->ectx_resp, sizeof (*hwresp));
31 	ctx->ectx_pending = false;
32 }
33 
34 static inline void
35 ena_reset_cmd_ctx(ena_cmd_ctx_t *ctx)
36 {
37 	ctx->ectx_pending = false;
38 	ctx->ectx_resp = NULL;
39 	ctx->ectx_cmd_opcode = ENAHW_CMD_NONE;
40 }
41 
42 /*
43  * Reset and release the context back to the free list.
44  */
45 static void
46 ena_release_cmd_ctx(ena_t *ena, ena_cmd_ctx_t *ctx)
47 {
48 	ASSERT(ctx->ectx_pending == false);
49 	ena_reset_cmd_ctx(ctx);
50 
51 	mutex_enter(&ena->ena_aq.ea_sq_lock);
52 	/*
53 	 * We return the free descriptor to the end of the list so that we
54 	 * cycle through them with each admin command, and don't end up almost
55 	 * always re-using the same entry with the same command ID. While the
56 	 * controller does not appear to mind, it's a little counter-intuitive.
57 	 */
58 	list_remove(&ena->ena_aq.ea_cmd_ctxs_used, ctx);
59 	list_insert_tail(&ena->ena_aq.ea_cmd_ctxs_free, ctx);
60 	ena->ena_aq.ea_pending_cmds--;
61 	mutex_exit(&ena->ena_aq.ea_sq_lock);
62 }
63 
64 void
65 ena_release_all_cmd_ctx(ena_t *ena)
66 {
67 	ena_adminq_t *aq = &ena->ena_aq;
68 	ena_cmd_ctx_t *ctx;
69 
70 	mutex_enter(&aq->ea_sq_lock);
71 	while ((ctx = list_remove_head(&aq->ea_cmd_ctxs_used)) != NULL) {
72 		ena_reset_cmd_ctx(ctx);
73 		list_insert_tail(&aq->ea_cmd_ctxs_free, ctx);
74 	}
75 	aq->ea_pending_cmds = 0;
76 	mutex_exit(&aq->ea_sq_lock);
77 }
78 
79 void
80 ena_create_cmd_ctx(ena_t *ena)
81 {
82 	ena_adminq_t *aq = &ena->ena_aq;
83 
84 	for (uint_t i = 0; i < aq->ea_qlen; i++) {
85 		ena_cmd_ctx_t *ctx = &aq->ea_cmd_ctxs[i];
86 
87 		ctx->ectx_id = i;
88 		ena_reset_cmd_ctx(ctx);
89 		list_insert_tail(&aq->ea_cmd_ctxs_free, ctx);
90 	}
91 }
92 
93 /*
94  * Acquire the next available command context.
95  */
96 static ena_cmd_ctx_t *
97 ena_acquire_cmd_ctx(ena_adminq_t *aq)
98 {
99 	VERIFY(MUTEX_HELD(&aq->ea_sq_lock));
100 	ASSERT3U(aq->ea_pending_cmds, <, aq->ea_qlen);
101 	ena_cmd_ctx_t *ctx = list_remove_head(&aq->ea_cmd_ctxs_free);
102 	list_insert_head(&aq->ea_cmd_ctxs_used, ctx);
103 
104 	ctx->ectx_pending = true;
105 	return (ctx);
106 }
107 
108 /*
109  * Submit a command to the admin queue.
110  */
111 int
112 ena_admin_submit_cmd(ena_t *ena, enahw_cmd_desc_t *cmd, enahw_resp_desc_t *resp,
113     ena_cmd_ctx_t **ctx)
114 {
115 	VERIFY3U(cmd->ecd_opcode, !=, 0);
116 	ena_adminq_t *aq = &ena->ena_aq;
117 	ena_admin_sq_t *sq = &aq->ea_sq;
118 	const uint16_t modulo_mask = aq->ea_qlen - 1;
119 	ena_cmd_ctx_t *lctx = NULL;
120 
121 	mutex_enter(&aq->ea_sq_lock);
122 	uint16_t tail_mod = sq->eas_tail & modulo_mask;
123 
124 	if (aq->ea_pending_cmds >= aq->ea_qlen) {
125 		mutex_enter(&aq->ea_stat_lock);
126 		aq->ea_stats.queue_full++;
127 		mutex_exit(&aq->ea_stat_lock);
128 		mutex_exit(&aq->ea_sq_lock);
129 		return (ENOSPC);
130 	}
131 
132 	lctx = ena_acquire_cmd_ctx(aq);
133 	lctx->ectx_cmd_opcode = cmd->ecd_opcode;
134 	lctx->ectx_resp = resp;
135 
136 	cmd->ecd_flags = sq->eas_phase & ENAHW_CMD_PHASE_MASK;
137 	ENAHW_CMD_ID(cmd, lctx->ectx_id);
138 	bcopy(cmd, &sq->eas_entries[tail_mod], sizeof (*cmd));
139 	ENA_DMA_SYNC(sq->eas_dma, DDI_DMA_SYNC_FORDEV);
140 
141 	sq->eas_tail++;
142 	aq->ea_pending_cmds++;
143 
144 	mutex_enter(&aq->ea_stat_lock);
145 	aq->ea_stats.cmds_submitted++;
146 	mutex_exit(&aq->ea_stat_lock);
147 
148 	DTRACE_PROBE4(cmd__submit, enahw_cmd_desc_t *, cmd, ena_cmd_ctx_t *,
149 	    lctx, uint16_t, tail_mod, uint8_t, sq->eas_phase);
150 
151 	if ((sq->eas_tail & modulo_mask) == 0) {
152 		sq->eas_phase ^= 1;
153 	}
154 
155 	ena_hw_abs_write32(ena, sq->eas_dbaddr, sq->eas_tail);
156 	mutex_exit(&aq->ea_sq_lock);
157 	*ctx = lctx;
158 	return (0);
159 }
160 
161 /*
162  * Read a single response from the admin queue.
163  */
164 static void
165 ena_admin_read_resp(ena_t *ena, enahw_resp_desc_t *hwresp)
166 {
167 	ena_adminq_t *aq = &ena->ena_aq;
168 	ena_admin_cq_t *cq = &aq->ea_cq;
169 	ena_cmd_ctx_t *ctx = NULL;
170 	uint16_t modulo_mask = aq->ea_qlen - 1;
171 
172 	VERIFY(MUTEX_HELD(&aq->ea_cq_lock));
173 
174 	uint16_t head_mod = cq->eac_head & modulo_mask;
175 	uint8_t phase = cq->eac_phase & ENAHW_RESP_PHASE_MASK;
176 	uint16_t cmd_id = ENAHW_RESP_CMD_ID(hwresp);
177 
178 	ctx = &aq->ea_cmd_ctxs[cmd_id];
179 
180 	ASSERT3U(ctx->ectx_id, ==, cmd_id);
181 	ena_complete_cmd_ctx(ctx, hwresp);
182 
183 	if (hwresp->erd_status != ENAHW_RESP_SUCCESS) {
184 		mutex_enter(&aq->ea_stat_lock);
185 		aq->ea_stats.cmds_fail++;
186 		mutex_exit(&aq->ea_stat_lock);
187 		DTRACE_PROBE4(cmd__fail, enahw_resp_desc_t *, hwresp,
188 		    ena_cmd_ctx_t *, ctx, uint16_t, head_mod, uint8_t, phase);
189 		return;
190 	}
191 
192 	DTRACE_PROBE4(cmd__success, enahw_resp_desc_t *, hwresp,
193 	    ena_cmd_ctx_t *, ctx, uint16_t, head_mod, uint8_t, phase);
194 	mutex_enter(&aq->ea_stat_lock);
195 	aq->ea_stats.cmds_success++;
196 	mutex_exit(&aq->ea_stat_lock);
197 }
198 
199 static void
200 ena_admin_process_responses(ena_t *ena)
201 {
202 	ena_adminq_t *aq = &ena->ena_aq;
203 	ena_admin_cq_t *cq = &aq->ea_cq;
204 	uint16_t modulo_mask = aq->ea_qlen - 1;
205 	enahw_resp_desc_t *hwresp;
206 
207 	mutex_enter(&aq->ea_cq_lock);
208 	uint16_t head_mod = cq->eac_head & modulo_mask;
209 	uint8_t phase = cq->eac_phase & ENAHW_RESP_PHASE_MASK;
210 
211 	ENA_DMA_SYNC(cq->eac_dma, DDI_DMA_SYNC_FORKERNEL);
212 	hwresp = &cq->eac_entries[head_mod];
213 	while ((hwresp->erd_flags & ENAHW_RESP_PHASE_MASK) == phase) {
214 		ena_admin_read_resp(ena, hwresp);
215 
216 		cq->eac_head++;
217 		head_mod = cq->eac_head & modulo_mask;
218 
219 		if (head_mod == 0) {
220 			phase ^= 1;
221 		}
222 
223 		hwresp = &cq->eac_entries[head_mod];
224 	}
225 
226 	cq->eac_phase = phase;
227 	mutex_exit(&aq->ea_cq_lock);
228 }
229 
230 /*
231  * Wait for the command described by ctx to complete by polling for
232  * status updates.
233  */
234 int
235 ena_admin_poll_for_resp(ena_t *ena, ena_cmd_ctx_t *ctx)
236 {
237 	int ret = 0;
238 	hrtime_t expire = gethrtime() + ena->ena_aq.ea_cmd_timeout_ns;
239 
240 	for (;;) {
241 		ena_admin_process_responses(ena);
242 
243 		if (!ctx->ectx_pending) {
244 			break;
245 		}
246 
247 		/* Wait for 1 millisecond. */
248 		delay(drv_usectohz(1000));
249 
250 		if (gethrtime() > expire) {
251 			/*
252 			 * We have no visibility into the device to
253 			 * confirm it is making progress on this
254 			 * command. At this point the driver and
255 			 * device cannot agree on the state of the
256 			 * world: perhaps the device is still making
257 			 * progress but not fast enough, perhaps the
258 			 * device completed the command but there was
259 			 * a failure to deliver the reply, perhaps the
260 			 * command failed but once again the reply was
261 			 * not delivered. With this unknown state the
262 			 * best thing to do is to reset the device and
263 			 * start from scratch. There is even a reset
264 			 * reason code just for this.
265 			 */
266 			ena_err(ena, "timed out waiting for admin response");
267 			ena_trigger_reset(ena, ENAHW_RESET_ADMIN_TO);
268 			return (EIO);
269 		}
270 	}
271 
272 	ret = enahw_resp_status_to_errno(ena, ctx->ectx_resp->erd_status);
273 	ena_release_cmd_ctx(ena, ctx);
274 	return (ret);
275 }
276 
277 void
278 ena_free_host_info(ena_t *ena)
279 {
280 	ena_dma_free(&ena->ena_host_info);
281 }
282 
283 bool
284 ena_init_host_info(ena_t *ena)
285 {
286 	enahw_host_info_t *ehi;
287 	int ret = 0;
288 	int *regs;
289 	uint_t nregs;
290 	ena_dma_buf_t *hi_dma;
291 	enahw_cmd_desc_t cmd;
292 	enahw_feat_host_attr_t *ha_cmd =
293 	    &cmd.ecd_cmd.ecd_set_feat.ecsf_feat.ecsf_host_attr;
294 	enahw_resp_desc_t resp;
295 
296 	hi_dma = &ena->ena_host_info;
297 
298 	if (hi_dma->edb_va == NULL) {
299 		ena_dma_conf_t conf = {
300 			.edc_size = ENAHW_HOST_INFO_ALLOC_SZ,
301 			.edc_align = ENAHW_HOST_INFO_ALIGNMENT,
302 			.edc_sgl = 1,
303 			.edc_endian = DDI_NEVERSWAP_ACC,
304 			.edc_stream = false,
305 		};
306 
307 		if (!ena_dma_alloc(ena, hi_dma, &conf, 4096)) {
308 			ena_err(ena, "failed to allocate DMA for host info");
309 			return (false);
310 		}
311 	}
312 
313 	ehi = (void *)hi_dma->edb_va;
314 	ehi->ehi_ena_spec_version =
315 	    ((ENA_SPEC_VERSION_MAJOR << ENAHW_HOST_INFO_SPEC_MAJOR_SHIFT) |
316 	    (ENA_SPEC_VERSION_MINOR));
317 
318 	ehi->ehi_bdf = 0;
319 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, ena->ena_dip,
320 	    DDI_PROP_DONTPASS, "reg", &regs, &nregs) == DDI_PROP_SUCCESS) {
321 		if (nregs != 0) {
322 			ehi->ehi_bdf |= PCI_REG_BUS_G(regs[0]) << 8;
323 			ehi->ehi_bdf |= PCI_REG_DEV_G(regs[0]) << 3;
324 			ehi->ehi_bdf |= PCI_REG_FUNC_G(regs[0]);
325 		}
326 
327 		ddi_prop_free(regs);
328 	}
329 
330 	/*
331 	 * There is no illumos OS type, it would be nice to ping
332 	 * someone at Amazon and see if we can't get one added.
333 	 */
334 	ehi->ehi_os_type = ENAHW_OS_FREEBSD;
335 	ehi->ehi_kernel_ver = 511; /* If you know you know */
336 	(void) strlcpy((char *)ehi->ehi_kernel_ver_str, utsname.version,
337 	    sizeof (ehi->ehi_kernel_ver_str));
338 	ehi->ehi_os_dist = 0;	/* What everyone else does. */
339 	ehi->ehi_driver_ver =
340 	    (ENA_MODULE_VER_MAJOR) |
341 	    (ENA_MODULE_VER_MINOR << ENAHW_HOST_INFO_MINOR_SHIFT) |
342 	    (ENA_MODULE_VER_SUBMINOR << ENAHW_HOST_INFO_SUB_MINOR_SHIFT);
343 	ehi->ehi_num_cpus = ncpus_online;
344 
345 	/*
346 	 * ENA devices are not created equal. Some will support
347 	 * features not found in others. This field tells the device
348 	 * which features the driver supports.
349 	 *
350 	 * ENAHW_HOST_INFO_RX_OFFSET
351 	 *
352 	 *    Some ENA devices will write the frame data at an offset
353 	 *    in the buffer, presumably for alignment purposes. We
354 	 *    support this feature for the sole reason that the Linux
355 	 *    driver does as well.
356 	 *
357 	 * ENAHW_HOST_INFO_INTERRUPT_MODERATION
358 	 *
359 	 *    Based on the Linux history this flag indicates that the
360 	 *    driver "supports interrupt moderation properly". What
361 	 *    that means is anyone's guess. The Linux driver seems to
362 	 *    have some "adaptive" interrupt moderation, so perhaps
363 	 *    it's that? In any case, FreeBSD doesn't bother with
364 	 *    setting this flag, so we'll leave it be for now as well.
365 	 *
366 	 *    If you're curious to know if the device supports
367 	 *    interrupt moderation: the FEAT_INTERRUPT_MODERATION flag
368 	 *    will be set in ena_hw.eh_supported_features.
369 	 *
370 	 * ENAHW_HOST_INFO_RX_BUF_MIRRORING
371 	 *
372 	 *    Support traffic mirroring by allowing the hypervisor to
373 	 *    read the buffer memory directly. This probably has to do
374 	 *    with AWS flow logs, allowing more efficient mirroring.
375 	 *    But it's hard to say for sure given we only have the
376 	 *    Linux commit log to go off of. In any case, the only
377 	 *    requirement for this feature is that the Rx DMA buffers
378 	 *    be read/write, which they are.
379 	 *
380 	 * ENAHW_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY
381 	 *
382 	 *    The device supports the retrieving and updating of the
383 	 *    RSS function and hash key. As we don't yet implement RSS
384 	 *    this is disabled.
385 	 *
386 	 * ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE
387 	 *
388 	 *    Dynamic Rx Buffer feature. This feature allows the driver to
389 	 *    avoid additional Rx buffer allocations by effectively using a
390 	 *    buffer more than once if there is space remaining after receiving
391 	 *    a packet. We currently use fixed TCBs and rings and don't
392 	 *    implement this feature.
393 	 *
394 	 * ENA_ADMIN_HOST_INFO_TX_IPV6_CSUM_OFFLOAD
395 	 *
396 	 *    Indicate that the driver supports Tx IPv6 checksum offload.
397 	 *
398 	 * ENA_ADMIN_HOST_INFO_PHC
399 	 *
400 	 *    Instructs the device to enable its PHC (Precision Time Protocol
401 	 *    Hardware Clock). In Linux, this would be exposed to userland NTP
402 	 *    software as a PTP device. We don't support this so leave it
403 	 *    disabled.
404 	 */
405 	ehi->ehi_driver_supported_features =
406 	    ENAHW_HOST_INFO_RX_OFFSET_MASK |
407 	    ENAHW_HOST_INFO_RX_BUF_MIRRORING_MASK;
408 
409 	ENA_DMA_SYNC(*hi_dma, DDI_DMA_SYNC_FORDEV);
410 	bzero(&cmd, sizeof (cmd));
411 	ena_set_dma_addr(ena, hi_dma->edb_cookie->dmac_laddress,
412 	    &ha_cmd->efha_os_addr);
413 
414 	/*
415 	 * You might notice the "debug area" is not allocated or
416 	 * configured, that is on purpose.
417 	 *
418 	 * The "debug area" is a region of host memory that contains
419 	 * the String Set (SS) tables used to report statistics to
420 	 * tools like ethtool (on Linux). This table consists of one
421 	 * of more entries of a 32-byte string (the name of the
422 	 * statistic) along with its associated 64-bit value. The
423 	 * stats reported here contain both the host-side stats as
424 	 * well as device-reported stats (ENAHW_GET_STATS_TYPE_ENI). I
425 	 * believe the reason for calling it the "debug area" is that
426 	 * it can be accessed from outside of the guest, allowing an
427 	 * AWS user (?) or Amazon employee to get basic information
428 	 * about the state of the device from the guest's point of
429 	 * view.
430 	 *
431 	 * In the fullness of time, our driver should probably support
432 	 * this aspect of ENA. For the time being, all testing
433 	 * indicates the driver and device function fine without it.
434 	 */
435 
436 	ret = ena_set_feature(ena, &cmd, &resp, ENAHW_FEAT_HOST_ATTR_CONFIG,
437 	    ENAHW_FEAT_HOST_ATTR_CONFIG_VER);
438 	if (ret != 0) {
439 		ena_err(ena, "failed to set host attributes: %d", ret);
440 		ena_dma_free(hi_dma);
441 		return (false);
442 	}
443 
444 	return (true);
445 }
446 
447 int
448 ena_create_cq(ena_t *ena, uint16_t num_descs, uint64_t phys_addr,
449     bool is_tx, uint32_t vector, uint16_t *hw_index,
450     uint32_t **unmask_addr, uint32_t **numanode)
451 {
452 	int ret;
453 	enahw_cmd_desc_t cmd;
454 	enahw_cmd_create_cq_t *cmd_cq = &cmd.ecd_cmd.ecd_create_cq;
455 	enahw_resp_desc_t resp;
456 	enahw_resp_create_cq_t *resp_cq = &resp.erd_resp.erd_create_cq;
457 	ena_cmd_ctx_t *ctx = NULL;
458 	uint8_t desc_size = is_tx ? sizeof (enahw_tx_cdesc_t) :
459 	    sizeof (enahw_rx_cdesc_t);
460 
461 	bzero(&cmd, sizeof (cmd));
462 	bzero(&resp, sizeof (resp));
463 
464 	cmd.ecd_opcode = ENAHW_CMD_CREATE_CQ;
465 	ENAHW_CMD_CREATE_CQ_INTERRUPT_MODE_ENABLE(cmd_cq);
466 	ASSERT3U(desc_size % 4, ==, 0);
467 	ENAHW_CMD_CREATE_CQ_DESC_SIZE_WORDS(cmd_cq, desc_size / 4);
468 	cmd_cq->ecq_num_descs = num_descs;
469 	cmd_cq->ecq_msix_vector = vector;
470 	ena_set_dma_addr(ena, phys_addr, &cmd_cq->ecq_addr);
471 
472 	if ((ret = ena_admin_submit_cmd(ena, &cmd, &resp, &ctx)) != 0) {
473 		ena_err(ena, "failed to submit Create CQ command: %d", ret);
474 		return (ret);
475 	}
476 
477 	if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) {
478 		ena_err(ena, "failed to Create CQ: %d", ret);
479 		return (ret);
480 	}
481 
482 	*hw_index = resp_cq->ercq_idx;
483 	*unmask_addr = (uint32_t *)(ena->ena_reg_base +
484 	    resp_cq->ercq_interrupt_mask_reg_offset);
485 
486 	if (resp_cq->ercq_numa_node_reg_offset != 0) {
487 		*numanode = (uint32_t *)(ena->ena_reg_base +
488 		    resp_cq->ercq_numa_node_reg_offset);
489 	} else {
490 		*numanode = NULL;
491 	}
492 
493 	/*
494 	 * The CQ head doorbell register is no longer supported by any
495 	 * existing adapter hardware.
496 	 */
497 	VERIFY0(resp_cq->ercq_head_db_reg_offset);
498 
499 	return (0);
500 }
501 
502 int
503 ena_destroy_cq(ena_t *ena, uint16_t hw_idx)
504 {
505 	enahw_cmd_desc_t cmd;
506 	enahw_resp_desc_t resp;
507 	ena_cmd_ctx_t *ctx = NULL;
508 	int ret;
509 
510 	bzero(&cmd, sizeof (cmd));
511 	bzero(&resp, sizeof (resp));
512 	cmd.ecd_opcode = ENAHW_CMD_DESTROY_CQ;
513 	cmd.ecd_cmd.ecd_destroy_cq.edcq_idx = hw_idx;
514 
515 	if ((ret = ena_admin_submit_cmd(ena, &cmd, &resp, &ctx)) != 0) {
516 		ena_err(ena, "failed to submit Destroy CQ command: %d", ret);
517 		return (ret);
518 	}
519 
520 	if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) {
521 		ena_err(ena, "failed to Destroy CQ: %d", ret);
522 		return (ret);
523 	}
524 
525 	return (0);
526 }
527 
528 int
529 ena_create_sq(ena_t *ena, uint16_t num_descs, uint64_t phys_addr,
530     bool is_tx, uint16_t cq_index, uint16_t *hw_index, uint32_t **db_addr)
531 {
532 	int ret;
533 	enahw_cmd_desc_t cmd;
534 	enahw_cmd_create_sq_t *cmd_sq = &cmd.ecd_cmd.ecd_create_sq;
535 	enahw_resp_desc_t resp;
536 	enahw_resp_create_sq_t *resp_sq = &resp.erd_resp.erd_create_sq;
537 	enahw_sq_direction_t dir =
538 	    is_tx ? ENAHW_SQ_DIRECTION_TX : ENAHW_SQ_DIRECTION_RX;
539 	ena_cmd_ctx_t *ctx = NULL;
540 
541 	if (!ISP2(num_descs)) {
542 		ena_err(ena, "the number of descs must be a power of 2, but "
543 		    " is %d", num_descs);
544 		return (false);
545 	}
546 
547 	bzero(&cmd, sizeof (cmd));
548 	bzero(&resp, sizeof (resp));
549 	cmd.ecd_opcode = ENAHW_CMD_CREATE_SQ;
550 	ENAHW_CMD_CREATE_SQ_DIR(cmd_sq, dir);
551 	ENAHW_CMD_CREATE_SQ_PLACEMENT_POLICY(cmd_sq,
552 	    ENAHW_PLACEMENT_POLICY_HOST);
553 	ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY(cmd_sq,
554 	    ENAHW_COMPLETION_POLICY_DESC);
555 	/*
556 	 * We limit all SQ descriptor rings to an SGL of 1, therefore
557 	 * they are always physically contiguous.
558 	 */
559 	ENAHW_CMD_CREATE_SQ_PHYSMEM_CONTIG(cmd_sq);
560 	cmd_sq->ecsq_cq_idx = cq_index;
561 	cmd_sq->ecsq_num_descs = num_descs;
562 
563 	/*
564 	 * If we ever use a non-host placement policy, then guard this
565 	 * code against placement type (this value should not be set
566 	 * for device placement).
567 	 */
568 	ena_set_dma_addr(ena, phys_addr, &cmd_sq->ecsq_base);
569 
570 	if ((ret = ena_admin_submit_cmd(ena, &cmd, &resp, &ctx)) != 0) {
571 		ena_err(ena, "failed to submit Create SQ command: %d", ret);
572 		return (ret);
573 	}
574 
575 	if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) {
576 		ena_err(ena, "failed to Create SQ: %d", ret);
577 		return (ret);
578 	}
579 
580 	*hw_index = resp_sq->ersq_idx;
581 	*db_addr = (uint32_t *)(ena->ena_reg_base +
582 	    resp_sq->ersq_db_reg_offset);
583 	return (0);
584 }
585 
586 int
587 ena_destroy_sq(ena_t *ena, uint16_t hw_idx, bool is_tx)
588 {
589 	enahw_cmd_desc_t cmd;
590 	enahw_cmd_destroy_sq_t *cmd_sq = &cmd.ecd_cmd.ecd_destroy_sq;
591 	enahw_sq_direction_t dir =
592 	    is_tx ? ENAHW_SQ_DIRECTION_TX : ENAHW_SQ_DIRECTION_RX;
593 	enahw_resp_desc_t resp;
594 	ena_cmd_ctx_t *ctx = NULL;
595 	int ret;
596 
597 	bzero(&cmd, sizeof (cmd));
598 	bzero(&resp, sizeof (resp));
599 	cmd.ecd_opcode = ENAHW_CMD_DESTROY_SQ;
600 	cmd_sq->edsq_idx = hw_idx;
601 	ENAHW_CMD_DESTROY_SQ_DIR(cmd_sq, dir);
602 
603 	if ((ret = ena_admin_submit_cmd(ena, &cmd, &resp, &ctx)) != 0) {
604 		ena_err(ena, "failed to submit Destroy SQ command: %d", ret);
605 		return (ret);
606 	}
607 
608 	if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) {
609 		ena_err(ena, "failed Destroy SQ: %d", ret);
610 		return (ret);
611 	}
612 
613 	return (0);
614 }
615 
616 int
617 ena_set_feature(ena_t *ena, enahw_cmd_desc_t *cmd, enahw_resp_desc_t *resp,
618     const enahw_feature_id_t feat_id, const uint8_t feat_ver)
619 {
620 	enahw_cmd_set_feat_t *cmd_sf = &cmd->ecd_cmd.ecd_set_feat;
621 	ena_cmd_ctx_t *ctx = NULL;
622 	int ret = 0;
623 
624 	if (!ena_is_feat_avail(ena, feat_id)) {
625 		ena_err(ena, "attempted to set unsupported feature: 0x%x %d"
626 		    " (0x%x)", feat_id, feat_ver, ena->ena_supported_features);
627 		return (ENOTSUP);
628 	}
629 
630 	cmd->ecd_opcode = ENAHW_CMD_SET_FEATURE;
631 	cmd_sf->ecsf_comm.efc_id = feat_id;
632 	cmd_sf->ecsf_comm.efc_version = feat_ver;
633 	cmd_sf->ecsf_comm.efc_flags = 0;
634 
635 	if ((ret = ena_admin_submit_cmd(ena, cmd, resp, &ctx)) != 0) {
636 		ena_err(ena, "failed to submit Set Feature command: %d", ret);
637 		return (ret);
638 	}
639 
640 	return (ena_admin_poll_for_resp(ena, ctx));
641 }
642 
643 int
644 ena_get_feature(ena_t *ena, enahw_resp_desc_t *resp,
645     const enahw_feature_id_t feat_id, const uint8_t feat_ver)
646 {
647 	enahw_cmd_desc_t cmd;
648 	enahw_cmd_get_feat_t *cmd_gf = &cmd.ecd_cmd.ecd_get_feat;
649 	ena_cmd_ctx_t *ctx = NULL;
650 	int ret = 0;
651 
652 	if (!ena_is_feat_avail(ena, feat_id)) {
653 		return (ENOTSUP);
654 	}
655 
656 	bzero(&cmd, sizeof (cmd));
657 	cmd.ecd_opcode = ENAHW_CMD_GET_FEATURE;
658 	cmd_gf->ecgf_comm.efc_id = feat_id;
659 	cmd_gf->ecgf_comm.efc_version = feat_ver;
660 	ENAHW_GET_FEAT_FLAGS_GET_CURR_VAL(cmd_gf);
661 
662 	if ((ret = ena_admin_submit_cmd(ena, &cmd, resp, &ctx)) != 0) {
663 		ena_err(ena, "failed to submit Get Feature command: %d", ret);
664 		return (ret);
665 	}
666 
667 	return (ena_admin_poll_for_resp(ena, ctx));
668 }
669 
670 int
671 ena_admin_get_basic_stats(ena_t *ena, enahw_resp_desc_t *resp)
672 {
673 	int ret = 0;
674 	enahw_cmd_desc_t cmd;
675 	enahw_cmd_get_stats_t *cmd_stats = &cmd.ecd_cmd.ecd_get_stats;
676 	ena_cmd_ctx_t *ctx = NULL;
677 
678 	bzero(&cmd, sizeof (cmd));
679 	bzero(resp, sizeof (*resp));
680 	cmd.ecd_opcode = ENAHW_CMD_GET_STATS;
681 	cmd_stats->ecgs_type = ENAHW_GET_STATS_TYPE_BASIC;
682 	cmd_stats->ecgs_scope = ENAHW_GET_STATS_SCOPE_ETH;
683 	cmd_stats->ecgs_device_id = ENAHW_CMD_GET_STATS_MY_DEVICE_ID;
684 
685 	if ((ret = ena_admin_submit_cmd(ena, &cmd, resp, &ctx)) != 0) {
686 		ena_err(ena, "failed to submit Get Basic Stats command: %d",
687 		    ret);
688 		return (ret);
689 	}
690 
691 	if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) {
692 		ena_err(ena, "failed to Get Basic Stats: %d", ret);
693 		return (ret);
694 	}
695 
696 	return (0);
697 }
698 
699 int
700 ena_admin_get_eni_stats(ena_t *ena, enahw_resp_desc_t *resp)
701 {
702 	int ret = 0;
703 	enahw_cmd_desc_t cmd;
704 	enahw_cmd_get_stats_t *cmd_stats = &cmd.ecd_cmd.ecd_get_stats;
705 	ena_cmd_ctx_t *ctx = NULL;
706 
707 	bzero(&cmd, sizeof (cmd));
708 	bzero(resp, sizeof (*resp));
709 	cmd.ecd_opcode = ENAHW_CMD_GET_STATS;
710 	cmd_stats->ecgs_type = ENAHW_GET_STATS_TYPE_ENI;
711 	cmd_stats->ecgs_scope = ENAHW_GET_STATS_SCOPE_ETH;
712 	cmd_stats->ecgs_device_id = ENAHW_CMD_GET_STATS_MY_DEVICE_ID;
713 
714 	if ((ret = ena_admin_submit_cmd(ena, &cmd, resp, &ctx)) != 0) {
715 		ena_err(ena, "failed to submit Get ENI Stats command: %d", ret);
716 		return (ret);
717 	}
718 
719 	if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) {
720 		ena_err(ena, "failed to Get ENI Stats: %d", ret);
721 		return (ret);
722 	}
723 
724 	return (0);
725 }
726