1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2021 Oxide Computer Company 14 */ 15 16 /* 17 * This file contains everything having to do with communicating with 18 * the admin queue for sending commands to the device. 19 */ 20 21 #include "ena_hw.h" 22 #include "ena.h" 23 24 /* 25 * Mark the context as complete (a response has been received). 26 */ 27 static void 28 ena_complete_cmd_ctx(ena_cmd_ctx_t *ctx, enahw_resp_desc_t *hwresp) 29 { 30 bcopy(hwresp, ctx->ectx_resp, sizeof (*hwresp)); 31 ctx->ectx_pending = B_FALSE; 32 } 33 34 /* 35 * Reset and release the context back to the free list. 36 */ 37 static void 38 ena_release_cmd_ctx(ena_t *ena, ena_cmd_ctx_t *ctx) 39 { 40 ASSERT(ctx->ectx_pending == B_FALSE); 41 ctx->ectx_resp = NULL; 42 ctx->ectx_cmd_opcode = ENAHW_CMD_NONE; 43 44 mutex_enter(&ena->ena_aq.ea_sq_lock); 45 list_insert_head(&ena->ena_aq.ea_cmd_ctxs_free, ctx); 46 ena->ena_aq.ea_pending_cmds--; 47 mutex_exit(&ena->ena_aq.ea_sq_lock); 48 } 49 50 /* 51 * Acquire the next avaiable command context. 52 */ 53 static ena_cmd_ctx_t * 54 ena_acquire_cmd_ctx(ena_adminq_t *aq) 55 { 56 VERIFY(MUTEX_HELD(&aq->ea_sq_lock)); 57 ASSERT3U(aq->ea_pending_cmds, <, aq->ea_qlen); 58 ena_cmd_ctx_t *ctx = list_remove_head(&aq->ea_cmd_ctxs_free); 59 60 ctx->ectx_pending = B_TRUE; 61 return (ctx); 62 } 63 64 /* 65 * Submit a command to the admin queue. 66 */ 67 int 68 ena_admin_submit_cmd(ena_t *ena, enahw_cmd_desc_t *cmd, enahw_resp_desc_t *resp, 69 ena_cmd_ctx_t **ctx) 70 { 71 VERIFY3U(cmd->ecd_opcode, !=, 0); 72 ena_adminq_t *aq = &ena->ena_aq; 73 ena_admin_sq_t *sq = &aq->ea_sq; 74 uint16_t modulo_mask = aq->ea_qlen - 1; 75 ena_cmd_ctx_t *lctx = NULL; 76 77 mutex_enter(&aq->ea_sq_lock); 78 uint16_t tail_mod = sq->eas_tail & modulo_mask; 79 80 if (aq->ea_pending_cmds >= aq->ea_qlen) { 81 mutex_enter(&aq->ea_stat_lock); 82 aq->ea_stats.queue_full++; 83 mutex_exit(&aq->ea_stat_lock); 84 mutex_exit(&aq->ea_sq_lock); 85 return (ENOSPC); 86 } 87 88 lctx = ena_acquire_cmd_ctx(aq); 89 lctx->ectx_cmd_opcode = cmd->ecd_opcode; 90 lctx->ectx_resp = resp; 91 92 cmd->ecd_flags = sq->eas_phase & ENAHW_CMD_PHASE_MASK; 93 ENAHW_CMD_ID(cmd, lctx->ectx_id); 94 bcopy(cmd, &sq->eas_entries[tail_mod], sizeof (*cmd)); 95 ENA_DMA_SYNC(sq->eas_dma, DDI_DMA_SYNC_FORDEV); 96 sq->eas_tail++; 97 aq->ea_pending_cmds++; 98 99 mutex_enter(&aq->ea_stat_lock); 100 aq->ea_stats.cmds_submitted++; 101 mutex_exit(&aq->ea_stat_lock); 102 103 DTRACE_PROBE4(cmd__submit, enahw_cmd_desc_t *, cmd, ena_cmd_ctx_t *, 104 lctx, uint16_t, tail_mod, uint8_t, sq->eas_phase); 105 106 if ((sq->eas_tail & modulo_mask) == 0) { 107 sq->eas_phase = !sq->eas_phase; 108 } 109 110 ena_hw_abs_write32(ena, sq->eas_dbaddr, sq->eas_tail); 111 mutex_exit(&aq->ea_sq_lock); 112 *ctx = lctx; 113 return (0); 114 } 115 116 /* 117 * Read a single response from the admin queue. 118 */ 119 static void 120 ena_admin_read_resp(ena_t *ena, enahw_resp_desc_t *hwresp) 121 { 122 ena_adminq_t *aq = &ena->ena_aq; 123 ena_admin_cq_t *cq = &aq->ea_cq; 124 ena_cmd_ctx_t *ctx = NULL; 125 uint16_t modulo_mask = aq->ea_qlen - 1; 126 VERIFY(MUTEX_HELD(&aq->ea_cq_lock)); 127 128 uint16_t head_mod = cq->eac_head & modulo_mask; 129 uint8_t phase = cq->eac_phase & ENAHW_RESP_PHASE_MASK; 130 uint16_t cmd_id = ENAHW_RESP_CMD_ID(hwresp); 131 ctx = &aq->ea_cmd_ctxs[cmd_id]; 132 ASSERT3U(ctx->ectx_id, ==, cmd_id); 133 ena_complete_cmd_ctx(ctx, hwresp); 134 135 if (hwresp->erd_status != ENAHW_RESP_SUCCESS) { 136 mutex_enter(&aq->ea_stat_lock); 137 aq->ea_stats.cmds_fail++; 138 mutex_exit(&aq->ea_stat_lock); 139 DTRACE_PROBE4(cmd__fail, enahw_resp_desc_t *, hwresp, 140 ena_cmd_ctx_t *, ctx, uint16_t, head_mod, uint8_t, phase); 141 return; 142 } 143 144 DTRACE_PROBE4(cmd__success, enahw_resp_desc_t *, hwresp, 145 ena_cmd_ctx_t *, ctx, uint16_t, head_mod, uint8_t, phase); 146 mutex_enter(&aq->ea_stat_lock); 147 aq->ea_stats.cmds_success++; 148 mutex_exit(&aq->ea_stat_lock); 149 } 150 151 static void 152 ena_admin_process_responses(ena_t *ena) 153 { 154 ena_adminq_t *aq = &ena->ena_aq; 155 ena_admin_cq_t *cq = &aq->ea_cq; 156 uint16_t modulo_mask = aq->ea_qlen - 1; 157 enahw_resp_desc_t *hwresp; 158 159 mutex_enter(&aq->ea_cq_lock); 160 uint16_t head_mod = cq->eac_head & modulo_mask; 161 uint8_t phase = cq->eac_phase & ENAHW_RESP_PHASE_MASK; 162 163 ENA_DMA_SYNC(cq->eac_dma, DDI_DMA_SYNC_FORKERNEL); 164 hwresp = &cq->eac_entries[head_mod]; 165 while ((hwresp->erd_flags & ENAHW_RESP_PHASE_MASK) == phase) { 166 ena_admin_read_resp(ena, hwresp); 167 168 cq->eac_head++; 169 head_mod = cq->eac_head & modulo_mask; 170 171 if (head_mod == 0) { 172 phase = !phase; 173 } 174 175 hwresp = &cq->eac_entries[head_mod]; 176 } 177 178 cq->eac_phase = phase; 179 mutex_exit(&aq->ea_cq_lock); 180 } 181 182 /* 183 * Wait for the command described by ctx to complete by polling for 184 * status updates. 185 */ 186 int 187 ena_admin_poll_for_resp(ena_t *ena, ena_cmd_ctx_t *ctx) 188 { 189 int ret = 0; 190 hrtime_t expire = gethrtime() + ena->ena_aq.ea_cmd_timeout_ns; 191 192 while (1) { 193 ena_admin_process_responses(ena); 194 195 if (!ctx->ectx_pending) { 196 break; 197 } 198 199 /* Wait for 1 millisecond. */ 200 delay(drv_usectohz(1000)); 201 202 if (gethrtime() > expire) { 203 /* 204 * We have no visibility into the device to 205 * confirm it is making progress on this 206 * command. At this point the driver and 207 * device cannot agree on the state of the 208 * world: perhaps the device is still making 209 * progress but not fast enough, perhaps the 210 * device completed the command but there was 211 * a failure to deliver the reply, perhaps the 212 * command failed but once again the reply was 213 * not delivered. With this unknown state the 214 * best thing to do is to reset the device and 215 * start from scratch. But as we don't have 216 * that capability at the moment the next best 217 * thing to do is to spin or panic; we choose 218 * to panic. 219 */ 220 panic("timed out waiting for admin response"); 221 } 222 } 223 224 ret = enahw_resp_status_to_errno(ena, ctx->ectx_resp->erd_status); 225 ena_release_cmd_ctx(ena, ctx); 226 return (ret); 227 } 228 229 void 230 ena_free_host_info(ena_t *ena) 231 { 232 ena_dma_free(&ena->ena_host_info); 233 } 234 235 boolean_t 236 ena_init_host_info(ena_t *ena) 237 { 238 enahw_host_info_t *ehi; 239 int ret = 0; 240 int *regs; 241 uint_t nregs; 242 ena_dma_buf_t *hi_dma; 243 enahw_cmd_desc_t cmd; 244 enahw_feat_host_attr_t *ha_cmd = 245 &cmd.ecd_cmd.ecd_set_feat.ecsf_feat.ecsf_host_attr; 246 enahw_resp_desc_t resp; 247 ena_dma_conf_t conf = { 248 .edc_size = ENAHW_HOST_INFO_ALLOC_SZ, 249 .edc_align = ENAHW_HOST_INFO_ALIGNMENT, 250 .edc_sgl = 1, 251 .edc_endian = DDI_NEVERSWAP_ACC, 252 .edc_stream = B_FALSE, 253 }; 254 255 hi_dma = &ena->ena_host_info; 256 257 if (!ena_dma_alloc(ena, hi_dma, &conf, 4096)) { 258 ena_err(ena, "failed to allocate DMA for host info"); 259 return (B_FALSE); 260 } 261 262 ehi = (void *)hi_dma->edb_va; 263 ehi->ehi_ena_spec_version = 264 ((ENA_SPEC_VERSION_MAJOR << ENAHW_HOST_INFO_SPEC_MAJOR_SHIFT) | 265 (ENA_SPEC_VERSION_MINOR)); 266 267 ehi->ehi_bdf = 0; 268 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, ena->ena_dip, 269 DDI_PROP_DONTPASS, "reg", ®s, &nregs) == DDI_PROP_SUCCESS) { 270 if (nregs != 0) { 271 ehi->ehi_bdf |= PCI_REG_BUS_G(regs[0]) << 8; 272 ehi->ehi_bdf |= PCI_REG_DEV_G(regs[0]) << 3; 273 ehi->ehi_bdf |= PCI_REG_FUNC_G(regs[0]); 274 } 275 276 ddi_prop_free(regs); 277 } 278 279 /* 280 * There is no illumos OS type, it would be nice to ping 281 * someone at Amazon and see if we can't get one added. 282 */ 283 ehi->ehi_os_type = ENAHW_OS_FREEBSD; 284 ehi->ehi_kernel_ver = 511; /* If you know you know */ 285 (void) strlcpy((char *)ehi->ehi_kernel_ver_str, utsname.version, 286 sizeof (ehi->ehi_kernel_ver_str)); 287 ehi->ehi_os_dist = 0; /* What everyone else does. */ 288 ehi->ehi_driver_ver = 289 (ENA_MODULE_VER_MAJOR) | 290 (ENA_MODULE_VER_MINOR << ENAHW_HOST_INFO_MINOR_SHIFT) | 291 (ENA_MODULE_VER_SUBMINOR << ENAHW_HOST_INFO_SUB_MINOR_SHIFT); 292 ehi->ehi_num_cpus = ncpus_online; 293 294 /* 295 * ENA devices are not created equal. Some will support 296 * features not found in others. This field tells the device 297 * which features the driver supports. 298 * 299 * ENAHW_HOST_INFO_RX_OFFSET 300 * 301 * Some ENA devices will write the frame data at an offset 302 * in the buffer, presumably for alignment purposes. We 303 * support this feature for the sole reason that the Linux 304 * driver does as well. 305 * 306 * ENAHW_HOST_INFO_INTERRUPT_MODERATION 307 * 308 * Based on the Linux history this flag indicates that the 309 * driver "supports interrupt moderation properly". What 310 * that means is anyone's guess. The Linux driver seems to 311 * have some "adaptive" interrupt moderation, so perhaps 312 * it's that? In any case, FreeBSD doesn't bother with 313 * setting this flag, so we'll leave it be for now as well. 314 * 315 * If you're curious to know if the device supports 316 * interrupt moderation: the FEAT_INTERRUPT_MODERATION flag 317 * will be set in ena_hw.eh_supported_features. 318 * 319 * ENAHW_HOST_INFO_RX_BUF_MIRRORING 320 * 321 * Support traffic mirroring by allowing the hypervisor to 322 * read the buffer memory directly. This probably has to do 323 * with AWS flow logs, allowing more efficient mirroring. 324 * But it's hard to say for sure given we only have the 325 * Linux commit log to go off of. In any case, the only 326 * requirement for this feature is that the Rx DMA buffers 327 * be read/write, which they are. 328 * 329 * ENAHW_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY 330 * 331 * The device supports the retrieving and updating of the 332 * RSS function and hash key. As we don't yet implement RSS 333 * this is disabled. 334 */ 335 ehi->ehi_driver_supported_features = 336 ENAHW_HOST_INFO_RX_OFFSET_MASK | 337 ENAHW_HOST_INFO_RX_BUF_MIRRORING_MASK; 338 339 ENA_DMA_SYNC(*hi_dma, DDI_DMA_SYNC_FORDEV); 340 bzero(&cmd, sizeof (cmd)); 341 ena_set_dma_addr(ena, hi_dma->edb_cookie->dmac_laddress, 342 &ha_cmd->efha_os_addr); 343 344 /* 345 * You might notice the "debug area" is not allocated or 346 * configured, that is on purpose. 347 * 348 * The "debug area" is a region of host memory that contains 349 * the String Set (SS) tables used to report statistics to 350 * tools like ethtool (on Linux). This table consists of one 351 * of more entries of a 32-byte string (the name of the 352 * statistic) along with its associated 64-bit value. The 353 * stats reported here contain both the host-side stats as 354 * well as device-reported stats (ENAHW_GET_STATS_TYPE_ENI). I 355 * believe the reason for calling it the "debug area" is that 356 * it can be accessed from outside of the guest, allowing an 357 * AWS user (?) or Amazon employee to get basic information 358 * about the state of the device from the guest's point of 359 * view. 360 * 361 * In the fullness of time, our driver should probably support 362 * this aspect of ENA. For the time being, all testing 363 * indicates the driver and device function fine without it. 364 */ 365 366 ret = ena_set_feature(ena, &cmd, &resp, ENAHW_FEAT_HOST_ATTR_CONFIG, 367 ENAHW_FEAT_HOST_ATTR_CONFIG_VER); 368 if (ret != 0) { 369 ena_err(ena, "failed to set host attributes: %d", ret); 370 ena_dma_free(hi_dma); 371 return (B_FALSE); 372 } 373 374 return (B_TRUE); 375 } 376 377 int 378 ena_create_cq(ena_t *ena, uint16_t num_descs, uint64_t phys_addr, 379 boolean_t is_tx, uint32_t vector, uint16_t *hw_index, 380 uint32_t **unmask_addr, uint32_t **headdb, uint32_t **numanode) 381 { 382 int ret; 383 enahw_cmd_desc_t cmd; 384 enahw_cmd_create_cq_t *cmd_cq = &cmd.ecd_cmd.ecd_create_cq; 385 enahw_resp_desc_t resp; 386 enahw_resp_create_cq_t *resp_cq = &resp.erd_resp.erd_create_cq; 387 ena_cmd_ctx_t *ctx = NULL; 388 uint8_t desc_size = is_tx ? sizeof (enahw_tx_cdesc_t) : 389 sizeof (enahw_rx_cdesc_t); 390 391 bzero(&cmd, sizeof (cmd)); 392 bzero(&resp, sizeof (resp)); 393 394 cmd.ecd_opcode = ENAHW_CMD_CREATE_CQ; 395 ENAHW_CMD_CREATE_CQ_INTERRUPT_MODE_ENABLE(cmd_cq); 396 ASSERT3U(desc_size % 4, ==, 0); 397 ENAHW_CMD_CREATE_CQ_DESC_SIZE_WORDS(cmd_cq, desc_size / 4); 398 cmd_cq->ecq_num_descs = num_descs; 399 cmd_cq->ecq_msix_vector = vector; 400 ena_set_dma_addr(ena, phys_addr, &cmd_cq->ecq_addr); 401 402 if ((ret = ena_admin_submit_cmd(ena, &cmd, &resp, &ctx)) != 0) { 403 ena_err(ena, "failed to submit Create CQ command: %d", ret); 404 return (ret); 405 } 406 407 if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) { 408 ena_err(ena, "failed to Create CQ: %d", ret); 409 return (ret); 410 } 411 412 *hw_index = resp_cq->ercq_idx; 413 *unmask_addr = (uint32_t *)(ena->ena_reg_base + 414 resp_cq->ercq_interrupt_mask_reg_offset); 415 416 if (resp_cq->ercq_head_db_reg_offset != 0) { 417 *headdb = (uint32_t *)(ena->ena_reg_base + 418 resp_cq->ercq_head_db_reg_offset); 419 } else { 420 *headdb = NULL; 421 } 422 423 if (resp_cq->ercq_numa_node_reg_offset != 0) { 424 *numanode = (uint32_t *)(ena->ena_reg_base + 425 resp_cq->ercq_numa_node_reg_offset); 426 } else { 427 *numanode = NULL; 428 } 429 430 return (0); 431 } 432 433 int 434 ena_destroy_cq(ena_t *ena, uint16_t hw_idx) 435 { 436 enahw_cmd_desc_t cmd; 437 enahw_resp_desc_t resp; 438 ena_cmd_ctx_t *ctx = NULL; 439 int ret; 440 441 bzero(&cmd, sizeof (cmd)); 442 bzero(&resp, sizeof (resp)); 443 cmd.ecd_opcode = ENAHW_CMD_DESTROY_CQ; 444 cmd.ecd_cmd.ecd_destroy_cq.edcq_idx = hw_idx; 445 446 if ((ret = ena_admin_submit_cmd(ena, &cmd, &resp, &ctx)) != 0) { 447 ena_err(ena, "failed to submit Destroy CQ command: %d", ret); 448 return (ret); 449 } 450 451 if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) { 452 ena_err(ena, "failed to Destroy CQ: %d", ret); 453 return (ret); 454 } 455 456 return (0); 457 } 458 459 int 460 ena_create_sq(ena_t *ena, uint16_t num_descs, uint64_t phys_addr, 461 boolean_t is_tx, uint16_t cq_index, uint16_t *hw_index, uint32_t **db_addr) 462 { 463 int ret; 464 enahw_cmd_desc_t cmd; 465 enahw_cmd_create_sq_t *cmd_sq = &cmd.ecd_cmd.ecd_create_sq; 466 enahw_resp_desc_t resp; 467 enahw_resp_create_sq_t *resp_sq = &resp.erd_resp.erd_create_sq; 468 enahw_sq_direction_t dir = 469 is_tx ? ENAHW_SQ_DIRECTION_TX : ENAHW_SQ_DIRECTION_RX; 470 ena_cmd_ctx_t *ctx = NULL; 471 472 if (!ISP2(num_descs)) { 473 ena_err(ena, "the number of descs must be a power of 2, but " 474 " is %d", num_descs); 475 return (B_FALSE); 476 } 477 478 bzero(&cmd, sizeof (cmd)); 479 bzero(&resp, sizeof (resp)); 480 cmd.ecd_opcode = ENAHW_CMD_CREATE_SQ; 481 ENAHW_CMD_CREATE_SQ_DIR(cmd_sq, dir); 482 ENAHW_CMD_CREATE_SQ_PLACEMENT_POLICY(cmd_sq, 483 ENAHW_PLACEMENT_POLICY_HOST); 484 ENAHW_CMD_CREATE_SQ_COMPLETION_POLICY(cmd_sq, 485 ENAHW_COMPLETION_POLICY_DESC); 486 /* 487 * We limit all SQ descriptor rings to an SGL of 1, therefore 488 * they are always physically contiguous. 489 */ 490 ENAHW_CMD_CREATE_SQ_PHYSMEM_CONTIG(cmd_sq); 491 cmd_sq->ecsq_cq_idx = cq_index; 492 cmd_sq->ecsq_num_descs = num_descs; 493 494 /* 495 * If we ever use a non-host placement policy, then guard this 496 * code against placement type (this value should not be set 497 * for device placement). 498 */ 499 ena_set_dma_addr(ena, phys_addr, &cmd_sq->ecsq_base); 500 501 if ((ret = ena_admin_submit_cmd(ena, &cmd, &resp, &ctx)) != 0) { 502 ena_err(ena, "failed to submit Create SQ command: %d", ret); 503 return (ret); 504 } 505 506 if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) { 507 ena_err(ena, "failed to Create SQ: %d", ret); 508 return (ret); 509 } 510 511 *hw_index = resp_sq->ersq_idx; 512 *db_addr = (uint32_t *)(ena->ena_reg_base + 513 resp_sq->ersq_db_reg_offset); 514 return (0); 515 } 516 517 int 518 ena_destroy_sq(ena_t *ena, uint16_t hw_idx, boolean_t is_tx) 519 { 520 enahw_cmd_desc_t cmd; 521 enahw_cmd_destroy_sq_t *cmd_sq = &cmd.ecd_cmd.ecd_destroy_sq; 522 enahw_resp_desc_t resp; 523 ena_cmd_ctx_t *ctx = NULL; 524 int ret; 525 526 bzero(&cmd, sizeof (cmd)); 527 bzero(&resp, sizeof (resp)); 528 cmd.ecd_opcode = ENAHW_CMD_DESTROY_SQ; 529 cmd_sq->edsq_idx = hw_idx; 530 ENAHW_CMD_DESTROY_SQ_DIR(cmd_sq, is_tx); 531 532 if ((ret = ena_admin_submit_cmd(ena, &cmd, &resp, &ctx)) != 0) { 533 ena_err(ena, "failed to submit Destroy SQ command: %d", ret); 534 return (ret); 535 } 536 537 if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) { 538 ena_err(ena, "failed Destroy SQ: %d", ret); 539 return (ret); 540 } 541 542 return (0); 543 } 544 545 /* 546 * Determine if a given feature is available on this device. 547 */ 548 static boolean_t 549 ena_is_feature_avail(ena_t *ena, const enahw_feature_id_t feat_id) 550 { 551 VERIFY3U(feat_id, <=, ENAHW_FEAT_NUM); 552 uint32_t mask = 1U << feat_id; 553 554 /* 555 * The device attributes feature is always supported, as 556 * indicated by the common code. 557 */ 558 if (feat_id == ENAHW_FEAT_DEVICE_ATTRIBUTES) { 559 return (B_TRUE); 560 } 561 562 return ((ena->ena_supported_features & mask) != 0); 563 } 564 565 int 566 ena_set_feature(ena_t *ena, enahw_cmd_desc_t *cmd, enahw_resp_desc_t *resp, 567 const enahw_feature_id_t feat_id, const uint8_t feat_ver) 568 { 569 enahw_cmd_set_feat_t *cmd_sf = &cmd->ecd_cmd.ecd_set_feat; 570 ena_cmd_ctx_t *ctx = NULL; 571 int ret = 0; 572 573 if (!ena_is_feature_avail(ena, feat_id)) { 574 ena_err(ena, "attempted to set unsupported feature: 0x%x %d" 575 " (0x%x)", feat_id, feat_ver, ena->ena_supported_features); 576 return (ENOTSUP); 577 } 578 579 cmd->ecd_opcode = ENAHW_CMD_SET_FEATURE; 580 cmd_sf->ecsf_comm.efc_id = feat_id; 581 cmd_sf->ecsf_comm.efc_version = feat_ver; 582 cmd_sf->ecsf_comm.efc_flags = 0; 583 584 if ((ret = ena_admin_submit_cmd(ena, cmd, resp, &ctx)) != 0) { 585 ena_err(ena, "failed to submit Set Feature command: %d", ret); 586 return (ret); 587 } 588 589 return (ena_admin_poll_for_resp(ena, ctx)); 590 } 591 592 int 593 ena_get_feature(ena_t *ena, enahw_resp_desc_t *resp, 594 const enahw_feature_id_t feat_id, const uint8_t feat_ver) 595 { 596 enahw_cmd_desc_t cmd; 597 enahw_cmd_get_feat_t *cmd_gf = &cmd.ecd_cmd.ecd_get_feat; 598 ena_cmd_ctx_t *ctx = NULL; 599 int ret = 0; 600 601 if (!ena_is_feature_avail(ena, feat_id)) { 602 return (ENOTSUP); 603 } 604 605 bzero(&cmd, sizeof (cmd)); 606 cmd.ecd_opcode = ENAHW_CMD_GET_FEATURE; 607 cmd_gf->ecgf_comm.efc_id = feat_id; 608 cmd_gf->ecgf_comm.efc_version = feat_ver; 609 ENAHW_GET_FEAT_FLAGS_GET_CURR_VAL(cmd_gf); 610 611 if ((ret = ena_admin_submit_cmd(ena, &cmd, resp, &ctx)) != 0) { 612 ena_err(ena, "failed to submit Get Feature command: %d", ret); 613 return (ret); 614 } 615 616 return (ena_admin_poll_for_resp(ena, ctx)); 617 } 618 619 int 620 ena_admin_get_basic_stats(ena_t *ena, enahw_resp_desc_t *resp) 621 { 622 int ret = 0; 623 enahw_cmd_desc_t cmd; 624 enahw_cmd_get_stats_t *cmd_stats = &cmd.ecd_cmd.ecd_get_stats; 625 ena_cmd_ctx_t *ctx = NULL; 626 627 bzero(&cmd, sizeof (cmd)); 628 bzero(resp, sizeof (*resp)); 629 cmd.ecd_opcode = ENAHW_CMD_GET_STATS; 630 cmd_stats->ecgs_type = ENAHW_GET_STATS_TYPE_BASIC; 631 cmd_stats->ecgs_scope = ENAHW_GET_STATS_SCOPE_ETH; 632 cmd_stats->ecgs_device_id = ENAHW_CMD_GET_STATS_MY_DEVICE_ID; 633 634 if ((ret = ena_admin_submit_cmd(ena, &cmd, resp, &ctx)) != 0) { 635 ena_err(ena, "failed to submit Get Basic Stats command: %d", 636 ret); 637 return (ret); 638 } 639 640 if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) { 641 ena_err(ena, "failed to Get Basic Stats: %d", ret); 642 return (ret); 643 } 644 645 return (0); 646 } 647 648 int 649 ena_admin_get_eni_stats(ena_t *ena, enahw_resp_desc_t *resp) 650 { 651 int ret = 0; 652 enahw_cmd_desc_t cmd; 653 enahw_cmd_get_stats_t *cmd_stats = &cmd.ecd_cmd.ecd_get_stats; 654 ena_cmd_ctx_t *ctx = NULL; 655 656 bzero(&cmd, sizeof (cmd)); 657 bzero(resp, sizeof (*resp)); 658 cmd.ecd_opcode = ENAHW_CMD_GET_STATS; 659 cmd_stats->ecgs_type = ENAHW_GET_STATS_TYPE_ENI; 660 cmd_stats->ecgs_scope = ENAHW_GET_STATS_SCOPE_ETH; 661 cmd_stats->ecgs_device_id = ENAHW_CMD_GET_STATS_MY_DEVICE_ID; 662 663 if ((ret = ena_admin_submit_cmd(ena, &cmd, resp, &ctx)) != 0) { 664 ena_err(ena, "failed to submit Get ENI Stats command: %d", ret); 665 return (ret); 666 } 667 668 if ((ret = ena_admin_poll_for_resp(ena, ctx)) != 0) { 669 ena_err(ena, "failed to Get ENI Stats: %d", ret); 670 return (ret); 671 } 672 673 return (0); 674 } 675