1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "ena_com.h" 35 36 /*****************************************************************************/ 37 /*****************************************************************************/ 38 39 /* Timeout in micro-sec */ 40 #define ADMIN_CMD_TIMEOUT_US (3000000) 41 42 #define ENA_ASYNC_QUEUE_DEPTH 16 43 #define ENA_ADMIN_QUEUE_DEPTH 32 44 45 #ifdef ENA_EXTENDED_STATS 46 47 #define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08 48 #define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF) 49 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16) 50 51 #endif /* ENA_EXTENDED_STATS */ 52 53 #define ENA_CTRL_MAJOR 0 54 #define ENA_CTRL_MINOR 0 55 #define ENA_CTRL_SUB_MINOR 1 56 57 #define MIN_ENA_CTRL_VER \ 58 (((ENA_CTRL_MAJOR) << \ 59 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ 60 ((ENA_CTRL_MINOR) << \ 61 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ 62 (ENA_CTRL_SUB_MINOR)) 63 64 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) 65 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) 66 67 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF 68 69 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 70 71 #define ENA_REGS_ADMIN_INTR_MASK 1 72 73 #define ENA_POLL_MS 5 74 75 /*****************************************************************************/ 76 /*****************************************************************************/ 77 /*****************************************************************************/ 78 79 enum ena_cmd_status { 80 ENA_CMD_SUBMITTED, 81 ENA_CMD_COMPLETED, 82 /* Abort - canceled by the driver */ 83 ENA_CMD_ABORTED, 84 }; 85 86 struct ena_comp_ctx { 87 ena_wait_event_t wait_event; 88 struct ena_admin_acq_entry *user_cqe; 89 u32 comp_size; 90 enum ena_cmd_status status; 91 /* status from the device */ 92 u8 comp_status; 93 u8 cmd_opcode; 94 bool occupied; 95 }; 96 97 struct ena_com_stats_ctx { 98 struct ena_admin_aq_get_stats_cmd get_cmd; 99 struct ena_admin_acq_get_stats_resp get_resp; 100 }; 101 102 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, 103 struct ena_common_mem_addr *ena_addr, 104 dma_addr_t addr) 105 { 106 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { 107 ena_trc_err("dma address has more bits that the device supports\n"); 108 return ENA_COM_INVAL; 109 } 110 111 ena_addr->mem_addr_low = lower_32_bits(addr); 112 ena_addr->mem_addr_high = (u16)upper_32_bits(addr); 113 114 return 0; 115 } 116 117 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) 118 { 119 struct ena_com_admin_sq *sq = &queue->sq; 120 u16 size = ADMIN_SQ_SIZE(queue->q_depth); 121 122 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr, 123 sq->mem_handle); 124 125 if (!sq->entries) { 126 ena_trc_err("memory allocation failed\n"); 127 return ENA_COM_NO_MEM; 128 } 129 130 sq->head = 0; 131 sq->tail = 0; 132 sq->phase = 1; 133 134 sq->db_addr = NULL; 135 136 return 0; 137 } 138 139 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) 140 { 141 struct ena_com_admin_cq *cq = &queue->cq; 142 u16 size = ADMIN_CQ_SIZE(queue->q_depth); 143 144 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr, 145 cq->mem_handle); 146 147 if (!cq->entries) { 148 ena_trc_err("memory allocation failed\n"); 149 return ENA_COM_NO_MEM; 150 } 151 152 cq->head = 0; 153 cq->phase = 1; 154 155 return 0; 156 } 157 158 static int ena_com_admin_init_aenq(struct ena_com_dev *dev, 159 struct ena_aenq_handlers *aenq_handlers) 160 { 161 struct ena_com_aenq *aenq = &dev->aenq; 162 u32 addr_low, addr_high, aenq_caps; 163 u16 size; 164 165 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 166 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 167 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size, 168 aenq->entries, 169 aenq->dma_addr, 170 aenq->mem_handle); 171 172 if (!aenq->entries) { 173 ena_trc_err("memory allocation failed\n"); 174 return ENA_COM_NO_MEM; 175 } 176 177 aenq->head = aenq->q_depth; 178 aenq->phase = 1; 179 180 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); 181 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); 182 183 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); 184 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); 185 186 aenq_caps = 0; 187 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; 188 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) << 189 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & 190 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; 191 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); 192 193 if (unlikely(!aenq_handlers)) { 194 ena_trc_err("aenq handlers pointer is NULL\n"); 195 return ENA_COM_INVAL; 196 } 197 198 aenq->aenq_handlers = aenq_handlers; 199 200 return 0; 201 } 202 203 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue, 204 struct ena_comp_ctx *comp_ctx) 205 { 206 comp_ctx->occupied = false; 207 ATOMIC32_DEC(&queue->outstanding_cmds); 208 } 209 210 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, 211 u16 command_id, bool capture) 212 { 213 if (unlikely(command_id >= queue->q_depth)) { 214 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", 215 command_id, queue->q_depth); 216 return NULL; 217 } 218 219 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { 220 ena_trc_err("Completion context is occupied\n"); 221 return NULL; 222 } 223 224 if (capture) { 225 ATOMIC32_INC(&queue->outstanding_cmds); 226 queue->comp_ctx[command_id].occupied = true; 227 } 228 229 return &queue->comp_ctx[command_id]; 230 } 231 232 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 233 struct ena_admin_aq_entry *cmd, 234 size_t cmd_size_in_bytes, 235 struct ena_admin_acq_entry *comp, 236 size_t comp_size_in_bytes) 237 { 238 struct ena_comp_ctx *comp_ctx; 239 u16 tail_masked, cmd_id; 240 u16 queue_size_mask; 241 u16 cnt; 242 243 queue_size_mask = admin_queue->q_depth - 1; 244 245 tail_masked = admin_queue->sq.tail & queue_size_mask; 246 247 /* In case of queue FULL */ 248 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds); 249 if (cnt >= admin_queue->q_depth) { 250 ena_trc_dbg("admin queue is full.\n"); 251 admin_queue->stats.out_of_space++; 252 return ERR_PTR(ENA_COM_NO_SPACE); 253 } 254 255 cmd_id = admin_queue->curr_cmd_id; 256 257 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & 258 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; 259 260 cmd->aq_common_descriptor.command_id |= cmd_id & 261 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; 262 263 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); 264 if (unlikely(!comp_ctx)) 265 return ERR_PTR(ENA_COM_INVAL); 266 267 comp_ctx->status = ENA_CMD_SUBMITTED; 268 comp_ctx->comp_size = (u32)comp_size_in_bytes; 269 comp_ctx->user_cqe = comp; 270 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; 271 272 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event); 273 274 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); 275 276 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & 277 queue_size_mask; 278 279 admin_queue->sq.tail++; 280 admin_queue->stats.submitted_cmd++; 281 282 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) 283 admin_queue->sq.phase = !admin_queue->sq.phase; 284 285 ENA_DB_SYNC(&admin_queue->sq.mem_handle); 286 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail, 287 admin_queue->sq.db_addr); 288 289 return comp_ctx; 290 } 291 292 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) 293 { 294 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); 295 struct ena_comp_ctx *comp_ctx; 296 u16 i; 297 298 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size); 299 if (unlikely(!queue->comp_ctx)) { 300 ena_trc_err("memory allocation failed\n"); 301 return ENA_COM_NO_MEM; 302 } 303 304 for (i = 0; i < queue->q_depth; i++) { 305 comp_ctx = get_comp_ctxt(queue, i, false); 306 if (comp_ctx) 307 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event); 308 } 309 310 return 0; 311 } 312 313 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 314 struct ena_admin_aq_entry *cmd, 315 size_t cmd_size_in_bytes, 316 struct ena_admin_acq_entry *comp, 317 size_t comp_size_in_bytes) 318 { 319 unsigned long flags = 0; 320 struct ena_comp_ctx *comp_ctx; 321 322 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 323 if (unlikely(!admin_queue->running_state)) { 324 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 325 return ERR_PTR(ENA_COM_NO_DEVICE); 326 } 327 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, 328 cmd_size_in_bytes, 329 comp, 330 comp_size_in_bytes); 331 if (IS_ERR(comp_ctx)) 332 admin_queue->running_state = false; 333 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 334 335 return comp_ctx; 336 } 337 338 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, 339 struct ena_com_create_io_ctx *ctx, 340 struct ena_com_io_sq *io_sq) 341 { 342 size_t size; 343 int dev_node = 0; 344 345 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); 346 347 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; 348 io_sq->desc_entry_size = 349 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 350 sizeof(struct ena_eth_io_tx_desc) : 351 sizeof(struct ena_eth_io_rx_desc); 352 353 size = io_sq->desc_entry_size * io_sq->q_depth; 354 io_sq->bus = ena_dev->bus; 355 356 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 357 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, 358 size, 359 io_sq->desc_addr.virt_addr, 360 io_sq->desc_addr.phys_addr, 361 io_sq->desc_addr.mem_handle, 362 ctx->numa_node, 363 dev_node); 364 if (!io_sq->desc_addr.virt_addr) { 365 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 366 size, 367 io_sq->desc_addr.virt_addr, 368 io_sq->desc_addr.phys_addr, 369 io_sq->desc_addr.mem_handle); 370 } 371 372 if (!io_sq->desc_addr.virt_addr) { 373 ena_trc_err("memory allocation failed\n"); 374 return ENA_COM_NO_MEM; 375 } 376 } 377 378 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 379 /* Allocate bounce buffers */ 380 io_sq->bounce_buf_ctrl.buffer_size = 381 ena_dev->llq_info.desc_list_entry_size; 382 io_sq->bounce_buf_ctrl.buffers_num = 383 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; 384 io_sq->bounce_buf_ctrl.next_to_use = 0; 385 386 size = io_sq->bounce_buf_ctrl.buffer_size * 387 io_sq->bounce_buf_ctrl.buffers_num; 388 389 ENA_MEM_ALLOC_NODE(ena_dev->dmadev, 390 size, 391 io_sq->bounce_buf_ctrl.base_buffer, 392 ctx->numa_node, 393 dev_node); 394 if (!io_sq->bounce_buf_ctrl.base_buffer) 395 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size); 396 397 if (!io_sq->bounce_buf_ctrl.base_buffer) { 398 ena_trc_err("bounce buffer memory allocation failed\n"); 399 return ENA_COM_NO_MEM; 400 } 401 402 memcpy(&io_sq->llq_info, &ena_dev->llq_info, 403 sizeof(io_sq->llq_info)); 404 405 /* Initiate the first bounce buffer */ 406 io_sq->llq_buf_ctrl.curr_bounce_buf = 407 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); 408 memset(io_sq->llq_buf_ctrl.curr_bounce_buf, 409 0x0, io_sq->llq_info.desc_list_entry_size); 410 io_sq->llq_buf_ctrl.descs_left_in_line = 411 io_sq->llq_info.descs_num_before_header; 412 413 if (io_sq->llq_info.max_entries_in_tx_burst > 0) 414 io_sq->entries_in_tx_burst_left = 415 io_sq->llq_info.max_entries_in_tx_burst; 416 } 417 418 io_sq->tail = 0; 419 io_sq->next_to_comp = 0; 420 io_sq->phase = 1; 421 422 return 0; 423 } 424 425 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, 426 struct ena_com_create_io_ctx *ctx, 427 struct ena_com_io_cq *io_cq) 428 { 429 size_t size; 430 int prev_node = 0; 431 432 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); 433 434 /* Use the basic completion descriptor for Rx */ 435 io_cq->cdesc_entry_size_in_bytes = 436 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 437 sizeof(struct ena_eth_io_tx_cdesc) : 438 sizeof(struct ena_eth_io_rx_cdesc_base); 439 440 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 441 io_cq->bus = ena_dev->bus; 442 443 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, 444 size, 445 io_cq->cdesc_addr.virt_addr, 446 io_cq->cdesc_addr.phys_addr, 447 io_cq->cdesc_addr.mem_handle, 448 ctx->numa_node, 449 prev_node); 450 if (!io_cq->cdesc_addr.virt_addr) { 451 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 452 size, 453 io_cq->cdesc_addr.virt_addr, 454 io_cq->cdesc_addr.phys_addr, 455 io_cq->cdesc_addr.mem_handle); 456 } 457 458 if (!io_cq->cdesc_addr.virt_addr) { 459 ena_trc_err("memory allocation failed\n"); 460 return ENA_COM_NO_MEM; 461 } 462 463 io_cq->phase = 1; 464 io_cq->head = 0; 465 466 return 0; 467 } 468 469 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, 470 struct ena_admin_acq_entry *cqe) 471 { 472 struct ena_comp_ctx *comp_ctx; 473 u16 cmd_id; 474 475 cmd_id = cqe->acq_common_descriptor.command & 476 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; 477 478 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); 479 if (unlikely(!comp_ctx)) { 480 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n"); 481 admin_queue->running_state = false; 482 return; 483 } 484 485 comp_ctx->status = ENA_CMD_COMPLETED; 486 comp_ctx->comp_status = cqe->acq_common_descriptor.status; 487 488 if (comp_ctx->user_cqe) 489 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); 490 491 if (!admin_queue->polling) 492 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); 493 } 494 495 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) 496 { 497 struct ena_admin_acq_entry *cqe = NULL; 498 u16 comp_num = 0; 499 u16 head_masked; 500 u8 phase; 501 502 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); 503 phase = admin_queue->cq.phase; 504 505 cqe = &admin_queue->cq.entries[head_masked]; 506 507 /* Go over all the completions */ 508 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) & 509 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { 510 /* Do not read the rest of the completion entry before the 511 * phase bit was validated 512 */ 513 dma_rmb(); 514 ena_com_handle_single_admin_completion(admin_queue, cqe); 515 516 head_masked++; 517 comp_num++; 518 if (unlikely(head_masked == admin_queue->q_depth)) { 519 head_masked = 0; 520 phase = !phase; 521 } 522 523 cqe = &admin_queue->cq.entries[head_masked]; 524 } 525 526 admin_queue->cq.head += comp_num; 527 admin_queue->cq.phase = phase; 528 admin_queue->sq.head += comp_num; 529 admin_queue->stats.completed_cmd += comp_num; 530 } 531 532 static int ena_com_comp_status_to_errno(u8 comp_status) 533 { 534 if (unlikely(comp_status != 0)) 535 ena_trc_err("admin command failed[%u]\n", comp_status); 536 537 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) 538 return ENA_COM_INVAL; 539 540 switch (comp_status) { 541 case ENA_ADMIN_SUCCESS: 542 return 0; 543 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: 544 return ENA_COM_NO_MEM; 545 case ENA_ADMIN_UNSUPPORTED_OPCODE: 546 return ENA_COM_UNSUPPORTED; 547 case ENA_ADMIN_BAD_OPCODE: 548 case ENA_ADMIN_MALFORMED_REQUEST: 549 case ENA_ADMIN_ILLEGAL_PARAMETER: 550 case ENA_ADMIN_UNKNOWN_ERROR: 551 return ENA_COM_INVAL; 552 } 553 554 return 0; 555 } 556 557 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, 558 struct ena_com_admin_queue *admin_queue) 559 { 560 unsigned long flags = 0; 561 unsigned long timeout; 562 int ret; 563 564 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout); 565 566 while (1) { 567 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 568 ena_com_handle_admin_completion(admin_queue); 569 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 570 571 if (comp_ctx->status != ENA_CMD_SUBMITTED) 572 break; 573 574 if (ENA_TIME_EXPIRE(timeout)) { 575 ena_trc_err("Wait for completion (polling) timeout\n"); 576 /* ENA didn't have any completion */ 577 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 578 admin_queue->stats.no_completion++; 579 admin_queue->running_state = false; 580 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 581 582 ret = ENA_COM_TIMER_EXPIRED; 583 goto err; 584 } 585 586 ENA_MSLEEP(ENA_POLL_MS); 587 } 588 589 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { 590 ena_trc_err("Command was aborted\n"); 591 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 592 admin_queue->stats.aborted_cmd++; 593 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 594 ret = ENA_COM_NO_DEVICE; 595 goto err; 596 } 597 598 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED, 599 "Invalid comp status %d\n", comp_ctx->status); 600 601 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 602 err: 603 comp_ctxt_release(admin_queue, comp_ctx); 604 return ret; 605 } 606 607 /** 608 * Set the LLQ configurations of the firmware 609 * 610 * The driver provides only the enabled feature values to the device, 611 * which in turn, checks if they are supported. 612 */ 613 static int ena_com_set_llq(struct ena_com_dev *ena_dev) 614 { 615 struct ena_com_admin_queue *admin_queue; 616 struct ena_admin_set_feat_cmd cmd; 617 struct ena_admin_set_feat_resp resp; 618 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 619 int ret; 620 621 memset(&cmd, 0x0, sizeof(cmd)); 622 admin_queue = &ena_dev->admin_queue; 623 624 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 625 cmd.feat_common.feature_id = ENA_ADMIN_LLQ; 626 627 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; 628 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; 629 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; 630 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; 631 632 ret = ena_com_execute_admin_command(admin_queue, 633 (struct ena_admin_aq_entry *)&cmd, 634 sizeof(cmd), 635 (struct ena_admin_acq_entry *)&resp, 636 sizeof(resp)); 637 638 if (unlikely(ret)) 639 ena_trc_err("Failed to set LLQ configurations: %d\n", ret); 640 641 return ret; 642 } 643 644 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, 645 struct ena_admin_feature_llq_desc *llq_features, 646 struct ena_llq_configurations *llq_default_cfg) 647 { 648 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 649 u16 supported_feat; 650 int rc; 651 652 memset(llq_info, 0, sizeof(*llq_info)); 653 654 supported_feat = llq_features->header_location_ctrl_supported; 655 656 if (likely(supported_feat & llq_default_cfg->llq_header_location)) { 657 llq_info->header_location_ctrl = 658 llq_default_cfg->llq_header_location; 659 } else { 660 ena_trc_err("Invalid header location control, supported: 0x%x\n", 661 supported_feat); 662 return -EINVAL; 663 } 664 665 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { 666 supported_feat = llq_features->descriptors_stride_ctrl_supported; 667 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { 668 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; 669 } else { 670 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { 671 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 672 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { 673 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; 674 } else { 675 ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n", 676 supported_feat); 677 return -EINVAL; 678 } 679 680 ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 681 llq_default_cfg->llq_stride_ctrl, 682 supported_feat, 683 llq_info->desc_stride_ctrl); 684 } 685 } else { 686 llq_info->desc_stride_ctrl = 0; 687 } 688 689 supported_feat = llq_features->entry_size_ctrl_supported; 690 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { 691 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; 692 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; 693 } else { 694 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { 695 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 696 llq_info->desc_list_entry_size = 128; 697 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { 698 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; 699 llq_info->desc_list_entry_size = 192; 700 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { 701 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; 702 llq_info->desc_list_entry_size = 256; 703 } else { 704 ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat); 705 return -EINVAL; 706 } 707 708 ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 709 llq_default_cfg->llq_ring_entry_size, 710 supported_feat, 711 llq_info->desc_list_entry_size); 712 } 713 if (unlikely(llq_info->desc_list_entry_size & 0x7)) { 714 /* The desc list entry size should be whole multiply of 8 715 * This requirement comes from __iowrite64_copy() 716 */ 717 ena_trc_err("illegal entry size %d\n", 718 llq_info->desc_list_entry_size); 719 return -EINVAL; 720 } 721 722 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) 723 llq_info->descs_per_entry = llq_info->desc_list_entry_size / 724 sizeof(struct ena_eth_io_tx_desc); 725 else 726 llq_info->descs_per_entry = 1; 727 728 supported_feat = llq_features->desc_num_before_header_supported; 729 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { 730 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; 731 } else { 732 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { 733 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 734 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { 735 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; 736 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { 737 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; 738 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { 739 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; 740 } else { 741 ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n", 742 supported_feat); 743 return -EINVAL; 744 } 745 746 ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 747 llq_default_cfg->llq_num_decs_before_header, 748 supported_feat, 749 llq_info->descs_num_before_header); 750 } 751 752 llq_info->max_entries_in_tx_burst = 753 (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value); 754 755 rc = ena_com_set_llq(ena_dev); 756 if (rc) 757 ena_trc_err("Cannot set LLQ configuration: %d\n", rc); 758 759 return 0; 760 } 761 762 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, 763 struct ena_com_admin_queue *admin_queue) 764 { 765 unsigned long flags = 0; 766 int ret; 767 768 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event, 769 admin_queue->completion_timeout); 770 771 /* In case the command wasn't completed find out the root cause. 772 * There might be 2 kinds of errors 773 * 1) No completion (timeout reached) 774 * 2) There is completion but the device didn't get any msi-x interrupt. 775 */ 776 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { 777 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 778 ena_com_handle_admin_completion(admin_queue); 779 admin_queue->stats.no_completion++; 780 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 781 782 if (comp_ctx->status == ENA_CMD_COMPLETED) 783 ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n", 784 comp_ctx->cmd_opcode); 785 else 786 ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n", 787 comp_ctx->cmd_opcode, comp_ctx->status); 788 789 admin_queue->running_state = false; 790 ret = ENA_COM_TIMER_EXPIRED; 791 goto err; 792 } 793 794 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 795 err: 796 comp_ctxt_release(admin_queue, comp_ctx); 797 return ret; 798 } 799 800 /* This method read the hardware device register through posting writes 801 * and waiting for response 802 * On timeout the function will return ENA_MMIO_READ_TIMEOUT 803 */ 804 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) 805 { 806 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 807 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = 808 mmio_read->read_resp; 809 u32 mmio_read_reg, ret, i; 810 unsigned long flags = 0; 811 u32 timeout = mmio_read->reg_read_to; 812 813 ENA_MIGHT_SLEEP(); 814 815 if (timeout == 0) 816 timeout = ENA_REG_READ_TIMEOUT; 817 818 /* If readless is disabled, perform regular read */ 819 if (!mmio_read->readless_supported) 820 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset); 821 822 ENA_SPINLOCK_LOCK(mmio_read->lock, flags); 823 mmio_read->seq_num++; 824 825 read_resp->req_id = mmio_read->seq_num + 0xDEAD; 826 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & 827 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; 828 mmio_read_reg |= mmio_read->seq_num & 829 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; 830 831 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, 832 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); 833 834 for (i = 0; i < timeout; i++) { 835 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num) 836 break; 837 838 ENA_UDELAY(1); 839 } 840 841 if (unlikely(i == timeout)) { 842 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", 843 mmio_read->seq_num, 844 offset, 845 read_resp->req_id, 846 read_resp->reg_off); 847 ret = ENA_MMIO_READ_TIMEOUT; 848 goto err; 849 } 850 851 if (read_resp->reg_off != offset) { 852 ena_trc_err("Read failure: wrong offset provided\n"); 853 ret = ENA_MMIO_READ_TIMEOUT; 854 } else { 855 ret = read_resp->reg_val; 856 } 857 err: 858 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags); 859 860 return ret; 861 } 862 863 /* There are two types to wait for completion. 864 * Polling mode - wait until the completion is available. 865 * Async mode - wait on wait queue until the completion is ready 866 * (or the timeout expired). 867 * It is expected that the IRQ called ena_com_handle_admin_completion 868 * to mark the completions. 869 */ 870 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, 871 struct ena_com_admin_queue *admin_queue) 872 { 873 if (admin_queue->polling) 874 return ena_com_wait_and_process_admin_cq_polling(comp_ctx, 875 admin_queue); 876 877 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, 878 admin_queue); 879 } 880 881 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, 882 struct ena_com_io_sq *io_sq) 883 { 884 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 885 struct ena_admin_aq_destroy_sq_cmd destroy_cmd; 886 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; 887 u8 direction; 888 int ret; 889 890 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 891 892 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 893 direction = ENA_ADMIN_SQ_DIRECTION_TX; 894 else 895 direction = ENA_ADMIN_SQ_DIRECTION_RX; 896 897 destroy_cmd.sq.sq_identity |= (direction << 898 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & 899 ENA_ADMIN_SQ_SQ_DIRECTION_MASK; 900 901 destroy_cmd.sq.sq_idx = io_sq->idx; 902 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; 903 904 ret = ena_com_execute_admin_command(admin_queue, 905 (struct ena_admin_aq_entry *)&destroy_cmd, 906 sizeof(destroy_cmd), 907 (struct ena_admin_acq_entry *)&destroy_resp, 908 sizeof(destroy_resp)); 909 910 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) 911 ena_trc_err("failed to destroy io sq error: %d\n", ret); 912 913 return ret; 914 } 915 916 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, 917 struct ena_com_io_sq *io_sq, 918 struct ena_com_io_cq *io_cq) 919 { 920 size_t size; 921 922 if (io_cq->cdesc_addr.virt_addr) { 923 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 924 925 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 926 size, 927 io_cq->cdesc_addr.virt_addr, 928 io_cq->cdesc_addr.phys_addr, 929 io_cq->cdesc_addr.mem_handle); 930 931 io_cq->cdesc_addr.virt_addr = NULL; 932 } 933 934 if (io_sq->desc_addr.virt_addr) { 935 size = io_sq->desc_entry_size * io_sq->q_depth; 936 937 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 938 size, 939 io_sq->desc_addr.virt_addr, 940 io_sq->desc_addr.phys_addr, 941 io_sq->desc_addr.mem_handle); 942 943 io_sq->desc_addr.virt_addr = NULL; 944 } 945 946 if (io_sq->bounce_buf_ctrl.base_buffer) { 947 ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); 948 io_sq->bounce_buf_ctrl.base_buffer = NULL; 949 } 950 } 951 952 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, 953 u16 exp_state) 954 { 955 u32 val, i; 956 957 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */ 958 timeout = (timeout * 100) / ENA_POLL_MS; 959 960 for (i = 0; i < timeout; i++) { 961 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 962 963 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { 964 ena_trc_err("Reg read timeout occurred\n"); 965 return ENA_COM_TIMER_EXPIRED; 966 } 967 968 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == 969 exp_state) 970 return 0; 971 972 ENA_MSLEEP(ENA_POLL_MS); 973 } 974 975 return ENA_COM_TIMER_EXPIRED; 976 } 977 978 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, 979 enum ena_admin_aq_feature_id feature_id) 980 { 981 u32 feature_mask = 1 << feature_id; 982 983 /* Device attributes is always supported */ 984 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && 985 !(ena_dev->supported_features & feature_mask)) 986 return false; 987 988 return true; 989 } 990 991 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, 992 struct ena_admin_get_feat_resp *get_resp, 993 enum ena_admin_aq_feature_id feature_id, 994 dma_addr_t control_buf_dma_addr, 995 u32 control_buff_size, 996 u8 feature_ver) 997 { 998 struct ena_com_admin_queue *admin_queue; 999 struct ena_admin_get_feat_cmd get_cmd; 1000 int ret; 1001 1002 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { 1003 ena_trc_dbg("Feature %d isn't supported\n", feature_id); 1004 return ENA_COM_UNSUPPORTED; 1005 } 1006 1007 memset(&get_cmd, 0x0, sizeof(get_cmd)); 1008 admin_queue = &ena_dev->admin_queue; 1009 1010 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; 1011 1012 if (control_buff_size) 1013 get_cmd.aq_common_descriptor.flags = 1014 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 1015 else 1016 get_cmd.aq_common_descriptor.flags = 0; 1017 1018 ret = ena_com_mem_addr_set(ena_dev, 1019 &get_cmd.control_buffer.address, 1020 control_buf_dma_addr); 1021 if (unlikely(ret)) { 1022 ena_trc_err("memory address set failed\n"); 1023 return ret; 1024 } 1025 1026 get_cmd.control_buffer.length = control_buff_size; 1027 get_cmd.feat_common.feature_version = feature_ver; 1028 get_cmd.feat_common.feature_id = feature_id; 1029 1030 ret = ena_com_execute_admin_command(admin_queue, 1031 (struct ena_admin_aq_entry *) 1032 &get_cmd, 1033 sizeof(get_cmd), 1034 (struct ena_admin_acq_entry *) 1035 get_resp, 1036 sizeof(*get_resp)); 1037 1038 if (unlikely(ret)) 1039 ena_trc_err("Failed to submit get_feature command %d error: %d\n", 1040 feature_id, ret); 1041 1042 return ret; 1043 } 1044 1045 static int ena_com_get_feature(struct ena_com_dev *ena_dev, 1046 struct ena_admin_get_feat_resp *get_resp, 1047 enum ena_admin_aq_feature_id feature_id, 1048 u8 feature_ver) 1049 { 1050 return ena_com_get_feature_ex(ena_dev, 1051 get_resp, 1052 feature_id, 1053 0, 1054 0, 1055 feature_ver); 1056 } 1057 1058 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) 1059 { 1060 struct ena_rss *rss = &ena_dev->rss; 1061 1062 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1063 sizeof(*rss->hash_key), 1064 rss->hash_key, 1065 rss->hash_key_dma_addr, 1066 rss->hash_key_mem_handle); 1067 1068 if (unlikely(!rss->hash_key)) 1069 return ENA_COM_NO_MEM; 1070 1071 return 0; 1072 } 1073 1074 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) 1075 { 1076 struct ena_rss *rss = &ena_dev->rss; 1077 1078 if (rss->hash_key) 1079 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1080 sizeof(*rss->hash_key), 1081 rss->hash_key, 1082 rss->hash_key_dma_addr, 1083 rss->hash_key_mem_handle); 1084 rss->hash_key = NULL; 1085 } 1086 1087 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) 1088 { 1089 struct ena_rss *rss = &ena_dev->rss; 1090 1091 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1092 sizeof(*rss->hash_ctrl), 1093 rss->hash_ctrl, 1094 rss->hash_ctrl_dma_addr, 1095 rss->hash_ctrl_mem_handle); 1096 1097 if (unlikely(!rss->hash_ctrl)) 1098 return ENA_COM_NO_MEM; 1099 1100 return 0; 1101 } 1102 1103 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) 1104 { 1105 struct ena_rss *rss = &ena_dev->rss; 1106 1107 if (rss->hash_ctrl) 1108 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1109 sizeof(*rss->hash_ctrl), 1110 rss->hash_ctrl, 1111 rss->hash_ctrl_dma_addr, 1112 rss->hash_ctrl_mem_handle); 1113 rss->hash_ctrl = NULL; 1114 } 1115 1116 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, 1117 u16 log_size) 1118 { 1119 struct ena_rss *rss = &ena_dev->rss; 1120 struct ena_admin_get_feat_resp get_resp; 1121 size_t tbl_size; 1122 int ret; 1123 1124 ret = ena_com_get_feature(ena_dev, &get_resp, 1125 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0); 1126 if (unlikely(ret)) 1127 return ret; 1128 1129 if ((get_resp.u.ind_table.min_size > log_size) || 1130 (get_resp.u.ind_table.max_size < log_size)) { 1131 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", 1132 1 << log_size, 1133 1 << get_resp.u.ind_table.min_size, 1134 1 << get_resp.u.ind_table.max_size); 1135 return ENA_COM_INVAL; 1136 } 1137 1138 tbl_size = (1ULL << log_size) * 1139 sizeof(struct ena_admin_rss_ind_table_entry); 1140 1141 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1142 tbl_size, 1143 rss->rss_ind_tbl, 1144 rss->rss_ind_tbl_dma_addr, 1145 rss->rss_ind_tbl_mem_handle); 1146 if (unlikely(!rss->rss_ind_tbl)) 1147 goto mem_err1; 1148 1149 tbl_size = (1ULL << log_size) * sizeof(u16); 1150 rss->host_rss_ind_tbl = 1151 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size); 1152 if (unlikely(!rss->host_rss_ind_tbl)) 1153 goto mem_err2; 1154 1155 rss->tbl_log_size = log_size; 1156 1157 return 0; 1158 1159 mem_err2: 1160 tbl_size = (1ULL << log_size) * 1161 sizeof(struct ena_admin_rss_ind_table_entry); 1162 1163 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1164 tbl_size, 1165 rss->rss_ind_tbl, 1166 rss->rss_ind_tbl_dma_addr, 1167 rss->rss_ind_tbl_mem_handle); 1168 rss->rss_ind_tbl = NULL; 1169 mem_err1: 1170 rss->tbl_log_size = 0; 1171 return ENA_COM_NO_MEM; 1172 } 1173 1174 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) 1175 { 1176 struct ena_rss *rss = &ena_dev->rss; 1177 size_t tbl_size = (1ULL << rss->tbl_log_size) * 1178 sizeof(struct ena_admin_rss_ind_table_entry); 1179 1180 if (rss->rss_ind_tbl) 1181 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1182 tbl_size, 1183 rss->rss_ind_tbl, 1184 rss->rss_ind_tbl_dma_addr, 1185 rss->rss_ind_tbl_mem_handle); 1186 rss->rss_ind_tbl = NULL; 1187 1188 if (rss->host_rss_ind_tbl) 1189 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl); 1190 rss->host_rss_ind_tbl = NULL; 1191 } 1192 1193 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, 1194 struct ena_com_io_sq *io_sq, u16 cq_idx) 1195 { 1196 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1197 struct ena_admin_aq_create_sq_cmd create_cmd; 1198 struct ena_admin_acq_create_sq_resp_desc cmd_completion; 1199 u8 direction; 1200 int ret; 1201 1202 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1203 1204 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; 1205 1206 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1207 direction = ENA_ADMIN_SQ_DIRECTION_TX; 1208 else 1209 direction = ENA_ADMIN_SQ_DIRECTION_RX; 1210 1211 create_cmd.sq_identity |= (direction << 1212 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & 1213 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; 1214 1215 create_cmd.sq_caps_2 |= io_sq->mem_queue_type & 1216 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; 1217 1218 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << 1219 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & 1220 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; 1221 1222 create_cmd.sq_caps_3 |= 1223 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; 1224 1225 create_cmd.cq_idx = cq_idx; 1226 create_cmd.sq_depth = io_sq->q_depth; 1227 1228 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 1229 ret = ena_com_mem_addr_set(ena_dev, 1230 &create_cmd.sq_ba, 1231 io_sq->desc_addr.phys_addr); 1232 if (unlikely(ret)) { 1233 ena_trc_err("memory address set failed\n"); 1234 return ret; 1235 } 1236 } 1237 1238 ret = ena_com_execute_admin_command(admin_queue, 1239 (struct ena_admin_aq_entry *)&create_cmd, 1240 sizeof(create_cmd), 1241 (struct ena_admin_acq_entry *)&cmd_completion, 1242 sizeof(cmd_completion)); 1243 if (unlikely(ret)) { 1244 ena_trc_err("Failed to create IO SQ. error: %d\n", ret); 1245 return ret; 1246 } 1247 1248 io_sq->idx = cmd_completion.sq_idx; 1249 1250 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1251 (uintptr_t)cmd_completion.sq_doorbell_offset); 1252 1253 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1254 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar 1255 + cmd_completion.llq_headers_offset); 1256 1257 io_sq->desc_addr.pbuf_dev_addr = 1258 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + 1259 cmd_completion.llq_descriptors_offset); 1260 } 1261 1262 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); 1263 1264 return ret; 1265 } 1266 1267 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) 1268 { 1269 struct ena_rss *rss = &ena_dev->rss; 1270 struct ena_com_io_sq *io_sq; 1271 u16 qid; 1272 int i; 1273 1274 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1275 qid = rss->host_rss_ind_tbl[i]; 1276 if (qid >= ENA_TOTAL_NUM_QUEUES) 1277 return ENA_COM_INVAL; 1278 1279 io_sq = &ena_dev->io_sq_queues[qid]; 1280 1281 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) 1282 return ENA_COM_INVAL; 1283 1284 rss->rss_ind_tbl[i].cq_idx = io_sq->idx; 1285 } 1286 1287 return 0; 1288 } 1289 1290 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) 1291 { 1292 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; 1293 struct ena_rss *rss = &ena_dev->rss; 1294 u8 idx; 1295 u16 i; 1296 1297 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) 1298 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; 1299 1300 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1301 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) 1302 return ENA_COM_INVAL; 1303 idx = (u8)rss->rss_ind_tbl[i].cq_idx; 1304 1305 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) 1306 return ENA_COM_INVAL; 1307 1308 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; 1309 } 1310 1311 return 0; 1312 } 1313 1314 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) 1315 { 1316 size_t size; 1317 1318 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS; 1319 1320 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size); 1321 if (!ena_dev->intr_moder_tbl) 1322 return ENA_COM_NO_MEM; 1323 1324 ena_com_config_default_interrupt_moderation_table(ena_dev); 1325 1326 return 0; 1327 } 1328 1329 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, 1330 u16 intr_delay_resolution) 1331 { 1332 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 1333 unsigned int i; 1334 1335 if (!intr_delay_resolution) { 1336 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); 1337 intr_delay_resolution = 1; 1338 } 1339 ena_dev->intr_delay_resolution = intr_delay_resolution; 1340 1341 /* update Rx */ 1342 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++) 1343 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution; 1344 1345 /* update Tx */ 1346 ena_dev->intr_moder_tx_interval /= intr_delay_resolution; 1347 } 1348 1349 /*****************************************************************************/ 1350 /******************************* API ******************************/ 1351 /*****************************************************************************/ 1352 1353 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, 1354 struct ena_admin_aq_entry *cmd, 1355 size_t cmd_size, 1356 struct ena_admin_acq_entry *comp, 1357 size_t comp_size) 1358 { 1359 struct ena_comp_ctx *comp_ctx; 1360 int ret; 1361 1362 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, 1363 comp, comp_size); 1364 if (IS_ERR(comp_ctx)) { 1365 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE)) 1366 ena_trc_dbg("Failed to submit command [%ld]\n", 1367 PTR_ERR(comp_ctx)); 1368 else 1369 ena_trc_err("Failed to submit command [%ld]\n", 1370 PTR_ERR(comp_ctx)); 1371 1372 return PTR_ERR(comp_ctx); 1373 } 1374 1375 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); 1376 if (unlikely(ret)) { 1377 if (admin_queue->running_state) 1378 ena_trc_err("Failed to process command. ret = %d\n", 1379 ret); 1380 else 1381 ena_trc_dbg("Failed to process command. ret = %d\n", 1382 ret); 1383 } 1384 return ret; 1385 } 1386 1387 int ena_com_create_io_cq(struct ena_com_dev *ena_dev, 1388 struct ena_com_io_cq *io_cq) 1389 { 1390 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1391 struct ena_admin_aq_create_cq_cmd create_cmd; 1392 struct ena_admin_acq_create_cq_resp_desc cmd_completion; 1393 int ret; 1394 1395 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1396 1397 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; 1398 1399 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & 1400 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; 1401 create_cmd.cq_caps_1 |= 1402 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; 1403 1404 create_cmd.msix_vector = io_cq->msix_vector; 1405 create_cmd.cq_depth = io_cq->q_depth; 1406 1407 ret = ena_com_mem_addr_set(ena_dev, 1408 &create_cmd.cq_ba, 1409 io_cq->cdesc_addr.phys_addr); 1410 if (unlikely(ret)) { 1411 ena_trc_err("memory address set failed\n"); 1412 return ret; 1413 } 1414 1415 ret = ena_com_execute_admin_command(admin_queue, 1416 (struct ena_admin_aq_entry *)&create_cmd, 1417 sizeof(create_cmd), 1418 (struct ena_admin_acq_entry *)&cmd_completion, 1419 sizeof(cmd_completion)); 1420 if (unlikely(ret)) { 1421 ena_trc_err("Failed to create IO CQ. error: %d\n", ret); 1422 return ret; 1423 } 1424 1425 io_cq->idx = cmd_completion.cq_idx; 1426 1427 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1428 cmd_completion.cq_interrupt_unmask_register_offset); 1429 1430 if (cmd_completion.cq_head_db_register_offset) 1431 io_cq->cq_head_db_reg = 1432 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1433 cmd_completion.cq_head_db_register_offset); 1434 1435 if (cmd_completion.numa_node_register_offset) 1436 io_cq->numa_node_cfg_reg = 1437 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1438 cmd_completion.numa_node_register_offset); 1439 1440 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); 1441 1442 return ret; 1443 } 1444 1445 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, 1446 struct ena_com_io_sq **io_sq, 1447 struct ena_com_io_cq **io_cq) 1448 { 1449 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1450 ena_trc_err("Invalid queue number %d but the max is %d\n", 1451 qid, ENA_TOTAL_NUM_QUEUES); 1452 return ENA_COM_INVAL; 1453 } 1454 1455 *io_sq = &ena_dev->io_sq_queues[qid]; 1456 *io_cq = &ena_dev->io_cq_queues[qid]; 1457 1458 return 0; 1459 } 1460 1461 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) 1462 { 1463 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1464 struct ena_comp_ctx *comp_ctx; 1465 u16 i; 1466 1467 if (!admin_queue->comp_ctx) 1468 return; 1469 1470 for (i = 0; i < admin_queue->q_depth; i++) { 1471 comp_ctx = get_comp_ctxt(admin_queue, i, false); 1472 if (unlikely(!comp_ctx)) 1473 break; 1474 1475 comp_ctx->status = ENA_CMD_ABORTED; 1476 1477 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); 1478 } 1479 } 1480 1481 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) 1482 { 1483 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1484 unsigned long flags = 0; 1485 1486 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 1487 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) { 1488 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 1489 ENA_MSLEEP(ENA_POLL_MS); 1490 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 1491 } 1492 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 1493 } 1494 1495 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, 1496 struct ena_com_io_cq *io_cq) 1497 { 1498 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1499 struct ena_admin_aq_destroy_cq_cmd destroy_cmd; 1500 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; 1501 int ret; 1502 1503 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 1504 1505 destroy_cmd.cq_idx = io_cq->idx; 1506 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; 1507 1508 ret = ena_com_execute_admin_command(admin_queue, 1509 (struct ena_admin_aq_entry *)&destroy_cmd, 1510 sizeof(destroy_cmd), 1511 (struct ena_admin_acq_entry *)&destroy_resp, 1512 sizeof(destroy_resp)); 1513 1514 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) 1515 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret); 1516 1517 return ret; 1518 } 1519 1520 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) 1521 { 1522 return ena_dev->admin_queue.running_state; 1523 } 1524 1525 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) 1526 { 1527 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1528 unsigned long flags = 0; 1529 1530 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 1531 ena_dev->admin_queue.running_state = state; 1532 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 1533 } 1534 1535 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) 1536 { 1537 u16 depth = ena_dev->aenq.q_depth; 1538 1539 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); 1540 1541 /* Init head_db to mark that all entries in the queue 1542 * are initially available 1543 */ 1544 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 1545 } 1546 1547 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) 1548 { 1549 struct ena_com_admin_queue *admin_queue; 1550 struct ena_admin_set_feat_cmd cmd; 1551 struct ena_admin_set_feat_resp resp; 1552 struct ena_admin_get_feat_resp get_resp; 1553 int ret; 1554 1555 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0); 1556 if (ret) { 1557 ena_trc_info("Can't get aenq configuration\n"); 1558 return ret; 1559 } 1560 1561 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { 1562 ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", 1563 get_resp.u.aenq.supported_groups, 1564 groups_flag); 1565 return ENA_COM_UNSUPPORTED; 1566 } 1567 1568 memset(&cmd, 0x0, sizeof(cmd)); 1569 admin_queue = &ena_dev->admin_queue; 1570 1571 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 1572 cmd.aq_common_descriptor.flags = 0; 1573 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; 1574 cmd.u.aenq.enabled_groups = groups_flag; 1575 1576 ret = ena_com_execute_admin_command(admin_queue, 1577 (struct ena_admin_aq_entry *)&cmd, 1578 sizeof(cmd), 1579 (struct ena_admin_acq_entry *)&resp, 1580 sizeof(resp)); 1581 1582 if (unlikely(ret)) 1583 ena_trc_err("Failed to config AENQ ret: %d\n", ret); 1584 1585 return ret; 1586 } 1587 1588 int ena_com_get_dma_width(struct ena_com_dev *ena_dev) 1589 { 1590 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 1591 int width; 1592 1593 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { 1594 ena_trc_err("Reg read timeout occurred\n"); 1595 return ENA_COM_TIMER_EXPIRED; 1596 } 1597 1598 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> 1599 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; 1600 1601 ena_trc_dbg("ENA dma width: %d\n", width); 1602 1603 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { 1604 ena_trc_err("DMA width illegal value: %d\n", width); 1605 return ENA_COM_INVAL; 1606 } 1607 1608 ena_dev->dma_addr_bits = width; 1609 1610 return width; 1611 } 1612 1613 int ena_com_validate_version(struct ena_com_dev *ena_dev) 1614 { 1615 u32 ver; 1616 u32 ctrl_ver; 1617 u32 ctrl_ver_masked; 1618 1619 /* Make sure the ENA version and the controller version are at least 1620 * as the driver expects 1621 */ 1622 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); 1623 ctrl_ver = ena_com_reg_bar_read32(ena_dev, 1624 ENA_REGS_CONTROLLER_VERSION_OFF); 1625 1626 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || 1627 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { 1628 ena_trc_err("Reg read timeout occurred\n"); 1629 return ENA_COM_TIMER_EXPIRED; 1630 } 1631 1632 ena_trc_info("ena device version: %d.%d\n", 1633 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> 1634 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, 1635 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); 1636 1637 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n", 1638 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) 1639 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, 1640 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) 1641 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, 1642 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), 1643 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> 1644 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); 1645 1646 ctrl_ver_masked = 1647 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | 1648 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | 1649 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); 1650 1651 /* Validate the ctrl version without the implementation ID */ 1652 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { 1653 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); 1654 return -1; 1655 } 1656 1657 return 0; 1658 } 1659 1660 void ena_com_admin_destroy(struct ena_com_dev *ena_dev) 1661 { 1662 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1663 struct ena_com_admin_cq *cq = &admin_queue->cq; 1664 struct ena_com_admin_sq *sq = &admin_queue->sq; 1665 struct ena_com_aenq *aenq = &ena_dev->aenq; 1666 u16 size; 1667 1668 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event); 1669 if (admin_queue->comp_ctx) 1670 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx); 1671 admin_queue->comp_ctx = NULL; 1672 size = ADMIN_SQ_SIZE(admin_queue->q_depth); 1673 if (sq->entries) 1674 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries, 1675 sq->dma_addr, sq->mem_handle); 1676 sq->entries = NULL; 1677 1678 size = ADMIN_CQ_SIZE(admin_queue->q_depth); 1679 if (cq->entries) 1680 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries, 1681 cq->dma_addr, cq->mem_handle); 1682 cq->entries = NULL; 1683 1684 size = ADMIN_AENQ_SIZE(aenq->q_depth); 1685 if (ena_dev->aenq.entries) 1686 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries, 1687 aenq->dma_addr, aenq->mem_handle); 1688 aenq->entries = NULL; 1689 ENA_SPINLOCK_DESTROY(admin_queue->q_lock); 1690 } 1691 1692 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) 1693 { 1694 u32 mask_value = 0; 1695 1696 if (polling) 1697 mask_value = ENA_REGS_ADMIN_INTR_MASK; 1698 1699 ENA_REG_WRITE32(ena_dev->bus, mask_value, 1700 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); 1701 ena_dev->admin_queue.polling = polling; 1702 } 1703 1704 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) 1705 { 1706 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1707 1708 ENA_SPINLOCK_INIT(mmio_read->lock); 1709 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1710 sizeof(*mmio_read->read_resp), 1711 mmio_read->read_resp, 1712 mmio_read->read_resp_dma_addr, 1713 mmio_read->read_resp_mem_handle); 1714 if (unlikely(!mmio_read->read_resp)) 1715 goto err; 1716 1717 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 1718 1719 mmio_read->read_resp->req_id = 0x0; 1720 mmio_read->seq_num = 0x0; 1721 mmio_read->readless_supported = true; 1722 1723 return 0; 1724 1725 err: 1726 ENA_SPINLOCK_DESTROY(mmio_read->lock); 1727 return ENA_COM_NO_MEM; 1728 } 1729 1730 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) 1731 { 1732 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1733 1734 mmio_read->readless_supported = readless_supported; 1735 } 1736 1737 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) 1738 { 1739 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1740 1741 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1742 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1743 1744 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1745 sizeof(*mmio_read->read_resp), 1746 mmio_read->read_resp, 1747 mmio_read->read_resp_dma_addr, 1748 mmio_read->read_resp_mem_handle); 1749 1750 mmio_read->read_resp = NULL; 1751 ENA_SPINLOCK_DESTROY(mmio_read->lock); 1752 } 1753 1754 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) 1755 { 1756 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1757 u32 addr_low, addr_high; 1758 1759 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); 1760 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); 1761 1762 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1763 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1764 } 1765 1766 int ena_com_admin_init(struct ena_com_dev *ena_dev, 1767 struct ena_aenq_handlers *aenq_handlers) 1768 { 1769 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1770 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; 1771 int ret; 1772 1773 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 1774 1775 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { 1776 ena_trc_err("Reg read timeout occurred\n"); 1777 return ENA_COM_TIMER_EXPIRED; 1778 } 1779 1780 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { 1781 ena_trc_err("Device isn't ready, abort com init\n"); 1782 return ENA_COM_NO_DEVICE; 1783 } 1784 1785 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; 1786 1787 admin_queue->bus = ena_dev->bus; 1788 admin_queue->q_dmadev = ena_dev->dmadev; 1789 admin_queue->polling = false; 1790 admin_queue->curr_cmd_id = 0; 1791 1792 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0); 1793 1794 ENA_SPINLOCK_INIT(admin_queue->q_lock); 1795 1796 ret = ena_com_init_comp_ctxt(admin_queue); 1797 if (ret) 1798 goto error; 1799 1800 ret = ena_com_admin_init_sq(admin_queue); 1801 if (ret) 1802 goto error; 1803 1804 ret = ena_com_admin_init_cq(admin_queue); 1805 if (ret) 1806 goto error; 1807 1808 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1809 ENA_REGS_AQ_DB_OFF); 1810 1811 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); 1812 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); 1813 1814 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); 1815 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); 1816 1817 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); 1818 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); 1819 1820 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); 1821 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); 1822 1823 aq_caps = 0; 1824 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; 1825 aq_caps |= (sizeof(struct ena_admin_aq_entry) << 1826 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & 1827 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; 1828 1829 acq_caps = 0; 1830 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; 1831 acq_caps |= (sizeof(struct ena_admin_acq_entry) << 1832 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & 1833 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; 1834 1835 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); 1836 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); 1837 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); 1838 if (ret) 1839 goto error; 1840 1841 admin_queue->running_state = true; 1842 1843 return 0; 1844 error: 1845 ena_com_admin_destroy(ena_dev); 1846 1847 return ret; 1848 } 1849 1850 int ena_com_create_io_queue(struct ena_com_dev *ena_dev, 1851 struct ena_com_create_io_ctx *ctx) 1852 { 1853 struct ena_com_io_sq *io_sq; 1854 struct ena_com_io_cq *io_cq; 1855 int ret; 1856 1857 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { 1858 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", 1859 ctx->qid, ENA_TOTAL_NUM_QUEUES); 1860 return ENA_COM_INVAL; 1861 } 1862 1863 io_sq = &ena_dev->io_sq_queues[ctx->qid]; 1864 io_cq = &ena_dev->io_cq_queues[ctx->qid]; 1865 1866 memset(io_sq, 0x0, sizeof(*io_sq)); 1867 memset(io_cq, 0x0, sizeof(*io_cq)); 1868 1869 /* Init CQ */ 1870 io_cq->q_depth = ctx->queue_size; 1871 io_cq->direction = ctx->direction; 1872 io_cq->qid = ctx->qid; 1873 1874 io_cq->msix_vector = ctx->msix_vector; 1875 1876 io_sq->q_depth = ctx->queue_size; 1877 io_sq->direction = ctx->direction; 1878 io_sq->qid = ctx->qid; 1879 1880 io_sq->mem_queue_type = ctx->mem_queue_type; 1881 1882 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1883 /* header length is limited to 8 bits */ 1884 io_sq->tx_max_header_size = 1885 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256); 1886 1887 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); 1888 if (ret) 1889 goto error; 1890 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); 1891 if (ret) 1892 goto error; 1893 1894 ret = ena_com_create_io_cq(ena_dev, io_cq); 1895 if (ret) 1896 goto error; 1897 1898 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); 1899 if (ret) 1900 goto destroy_io_cq; 1901 1902 return 0; 1903 1904 destroy_io_cq: 1905 ena_com_destroy_io_cq(ena_dev, io_cq); 1906 error: 1907 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1908 return ret; 1909 } 1910 1911 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) 1912 { 1913 struct ena_com_io_sq *io_sq; 1914 struct ena_com_io_cq *io_cq; 1915 1916 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1917 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", 1918 qid, ENA_TOTAL_NUM_QUEUES); 1919 return; 1920 } 1921 1922 io_sq = &ena_dev->io_sq_queues[qid]; 1923 io_cq = &ena_dev->io_cq_queues[qid]; 1924 1925 ena_com_destroy_io_sq(ena_dev, io_sq); 1926 ena_com_destroy_io_cq(ena_dev, io_cq); 1927 1928 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1929 } 1930 1931 int ena_com_get_link_params(struct ena_com_dev *ena_dev, 1932 struct ena_admin_get_feat_resp *resp) 1933 { 1934 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0); 1935 } 1936 1937 int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev) 1938 { 1939 struct ena_admin_get_feat_resp resp; 1940 struct ena_extra_properties_strings *extra_properties_strings = 1941 &ena_dev->extra_properties_strings; 1942 u32 rc; 1943 extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT * 1944 ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN; 1945 1946 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1947 extra_properties_strings->size, 1948 extra_properties_strings->virt_addr, 1949 extra_properties_strings->dma_addr, 1950 extra_properties_strings->dma_handle); 1951 if (unlikely(!extra_properties_strings->virt_addr)) { 1952 ena_trc_err("Failed to allocate extra properties strings\n"); 1953 return 0; 1954 } 1955 1956 rc = ena_com_get_feature_ex(ena_dev, &resp, 1957 ENA_ADMIN_EXTRA_PROPERTIES_STRINGS, 1958 extra_properties_strings->dma_addr, 1959 extra_properties_strings->size, 0); 1960 if (rc) { 1961 ena_trc_dbg("Failed to get extra properties strings\n"); 1962 goto err; 1963 } 1964 1965 return resp.u.extra_properties_strings.count; 1966 err: 1967 ena_com_delete_extra_properties_strings(ena_dev); 1968 return 0; 1969 } 1970 1971 void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev) 1972 { 1973 struct ena_extra_properties_strings *extra_properties_strings = 1974 &ena_dev->extra_properties_strings; 1975 1976 if (extra_properties_strings->virt_addr) { 1977 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1978 extra_properties_strings->size, 1979 extra_properties_strings->virt_addr, 1980 extra_properties_strings->dma_addr, 1981 extra_properties_strings->dma_handle); 1982 extra_properties_strings->virt_addr = NULL; 1983 } 1984 } 1985 1986 int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev, 1987 struct ena_admin_get_feat_resp *resp) 1988 { 1989 return ena_com_get_feature(ena_dev, resp, 1990 ENA_ADMIN_EXTRA_PROPERTIES_FLAGS, 0); 1991 } 1992 1993 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, 1994 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1995 { 1996 struct ena_admin_get_feat_resp get_resp; 1997 int rc; 1998 1999 rc = ena_com_get_feature(ena_dev, &get_resp, 2000 ENA_ADMIN_DEVICE_ATTRIBUTES, 0); 2001 if (rc) 2002 return rc; 2003 2004 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, 2005 sizeof(get_resp.u.dev_attr)); 2006 ena_dev->supported_features = get_resp.u.dev_attr.supported_features; 2007 2008 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 2009 rc = ena_com_get_feature(ena_dev, &get_resp, 2010 ENA_ADMIN_MAX_QUEUES_EXT, 2011 ENA_FEATURE_MAX_QUEUE_EXT_VER); 2012 if (rc) 2013 return rc; 2014 2015 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER) 2016 return -EINVAL; 2017 2018 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, 2019 sizeof(get_resp.u.max_queue_ext)); 2020 ena_dev->tx_max_header_size = 2021 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size; 2022 } else { 2023 rc = ena_com_get_feature(ena_dev, &get_resp, 2024 ENA_ADMIN_MAX_QUEUES_NUM, 0); 2025 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, 2026 sizeof(get_resp.u.max_queue)); 2027 ena_dev->tx_max_header_size = 2028 get_resp.u.max_queue.max_header_size; 2029 2030 if (rc) 2031 return rc; 2032 } 2033 2034 rc = ena_com_get_feature(ena_dev, &get_resp, 2035 ENA_ADMIN_AENQ_CONFIG, 0); 2036 if (rc) 2037 return rc; 2038 2039 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, 2040 sizeof(get_resp.u.aenq)); 2041 2042 rc = ena_com_get_feature(ena_dev, &get_resp, 2043 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); 2044 if (rc) 2045 return rc; 2046 2047 memcpy(&get_feat_ctx->offload, &get_resp.u.offload, 2048 sizeof(get_resp.u.offload)); 2049 2050 /* Driver hints isn't mandatory admin command. So in case the 2051 * command isn't supported set driver hints to 0 2052 */ 2053 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0); 2054 2055 if (!rc) 2056 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, 2057 sizeof(get_resp.u.hw_hints)); 2058 else if (rc == ENA_COM_UNSUPPORTED) 2059 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints)); 2060 else 2061 return rc; 2062 2063 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0); 2064 if (!rc) 2065 memcpy(&get_feat_ctx->llq, &get_resp.u.llq, 2066 sizeof(get_resp.u.llq)); 2067 else if (rc == ENA_COM_UNSUPPORTED) 2068 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); 2069 else 2070 return rc; 2071 2072 rc = ena_com_get_feature(ena_dev, &get_resp, 2073 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0); 2074 if (!rc) 2075 memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table, 2076 sizeof(get_resp.u.ind_table)); 2077 else if (rc == ENA_COM_UNSUPPORTED) 2078 memset(&get_feat_ctx->ind_table, 0x0, 2079 sizeof(get_feat_ctx->ind_table)); 2080 else 2081 return rc; 2082 2083 return 0; 2084 } 2085 2086 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) 2087 { 2088 ena_com_handle_admin_completion(&ena_dev->admin_queue); 2089 } 2090 2091 /* ena_handle_specific_aenq_event: 2092 * return the handler that is relevant to the specific event group 2093 */ 2094 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, 2095 u16 group) 2096 { 2097 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers; 2098 2099 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) 2100 return aenq_handlers->handlers[group]; 2101 2102 return aenq_handlers->unimplemented_handler; 2103 } 2104 2105 /* ena_aenq_intr_handler: 2106 * handles the aenq incoming events. 2107 * pop events from the queue and apply the specific handler 2108 */ 2109 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) 2110 { 2111 struct ena_admin_aenq_entry *aenq_e; 2112 struct ena_admin_aenq_common_desc *aenq_common; 2113 struct ena_com_aenq *aenq = &dev->aenq; 2114 unsigned long long timestamp; 2115 ena_aenq_handler handler_cb; 2116 u16 masked_head, processed = 0; 2117 u8 phase; 2118 2119 masked_head = aenq->head & (aenq->q_depth - 1); 2120 phase = aenq->phase; 2121 aenq_e = &aenq->entries[masked_head]; /* Get first entry */ 2122 aenq_common = &aenq_e->aenq_common_desc; 2123 2124 /* Go over all the events */ 2125 while ((READ_ONCE8(aenq_common->flags) & 2126 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { 2127 /* Make sure the phase bit (ownership) is as expected before 2128 * reading the rest of the descriptor. 2129 */ 2130 dma_rmb(); 2131 2132 timestamp = (unsigned long long)aenq_common->timestamp_low | 2133 ((unsigned long long)aenq_common->timestamp_high << 32); 2134 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", 2135 aenq_common->group, 2136 aenq_common->syndrom, 2137 timestamp); 2138 2139 /* Handle specific event*/ 2140 handler_cb = ena_com_get_specific_aenq_cb(dev, 2141 aenq_common->group); 2142 handler_cb(data, aenq_e); /* call the actual event handler*/ 2143 2144 /* Get next event entry */ 2145 masked_head++; 2146 processed++; 2147 2148 if (unlikely(masked_head == aenq->q_depth)) { 2149 masked_head = 0; 2150 phase = !phase; 2151 } 2152 aenq_e = &aenq->entries[masked_head]; 2153 aenq_common = &aenq_e->aenq_common_desc; 2154 } 2155 2156 aenq->head += processed; 2157 aenq->phase = phase; 2158 2159 /* Don't update aenq doorbell if there weren't any processed events */ 2160 if (!processed) 2161 return; 2162 2163 /* write the aenq doorbell after all AENQ descriptors were read */ 2164 mb(); 2165 ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head, 2166 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 2167 mmiowb(); 2168 } 2169 #ifdef ENA_EXTENDED_STATS 2170 /* 2171 * Sets the function Idx and Queue Idx to be used for 2172 * get full statistics feature 2173 * 2174 */ 2175 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev, 2176 u32 func_queue) 2177 { 2178 2179 /* Function & Queue is acquired from user in the following format : 2180 * Bottom Half word: funct 2181 * Top Half Word: queue 2182 */ 2183 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue); 2184 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue); 2185 2186 return 0; 2187 } 2188 2189 #endif /* ENA_EXTENDED_STATS */ 2190 2191 int ena_com_dev_reset(struct ena_com_dev *ena_dev, 2192 enum ena_regs_reset_reason_types reset_reason) 2193 { 2194 u32 stat, timeout, cap, reset_val; 2195 int rc; 2196 2197 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 2198 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 2199 2200 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || 2201 (cap == ENA_MMIO_READ_TIMEOUT))) { 2202 ena_trc_err("Reg read32 timeout occurred\n"); 2203 return ENA_COM_TIMER_EXPIRED; 2204 } 2205 2206 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { 2207 ena_trc_err("Device isn't ready, can't reset device\n"); 2208 return ENA_COM_INVAL; 2209 } 2210 2211 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> 2212 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; 2213 if (timeout == 0) { 2214 ena_trc_err("Invalid timeout value\n"); 2215 return ENA_COM_INVAL; 2216 } 2217 2218 /* start reset */ 2219 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; 2220 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) & 2221 ENA_REGS_DEV_CTL_RESET_REASON_MASK; 2222 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 2223 2224 /* Write again the MMIO read request address */ 2225 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 2226 2227 rc = wait_for_reset_state(ena_dev, timeout, 2228 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); 2229 if (rc != 0) { 2230 ena_trc_err("Reset indication didn't turn on\n"); 2231 return rc; 2232 } 2233 2234 /* reset done */ 2235 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 2236 rc = wait_for_reset_state(ena_dev, timeout, 0); 2237 if (rc != 0) { 2238 ena_trc_err("Reset indication didn't turn off\n"); 2239 return rc; 2240 } 2241 2242 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> 2243 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; 2244 if (timeout) 2245 /* the resolution of timeout reg is 100ms */ 2246 ena_dev->admin_queue.completion_timeout = timeout * 100000; 2247 else 2248 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; 2249 2250 return 0; 2251 } 2252 2253 static int ena_get_dev_stats(struct ena_com_dev *ena_dev, 2254 struct ena_com_stats_ctx *ctx, 2255 enum ena_admin_get_stats_type type) 2256 { 2257 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; 2258 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; 2259 struct ena_com_admin_queue *admin_queue; 2260 int ret; 2261 2262 admin_queue = &ena_dev->admin_queue; 2263 2264 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; 2265 get_cmd->aq_common_descriptor.flags = 0; 2266 get_cmd->type = type; 2267 2268 ret = ena_com_execute_admin_command(admin_queue, 2269 (struct ena_admin_aq_entry *)get_cmd, 2270 sizeof(*get_cmd), 2271 (struct ena_admin_acq_entry *)get_resp, 2272 sizeof(*get_resp)); 2273 2274 if (unlikely(ret)) 2275 ena_trc_err("Failed to get stats. error: %d\n", ret); 2276 2277 return ret; 2278 } 2279 2280 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, 2281 struct ena_admin_basic_stats *stats) 2282 { 2283 struct ena_com_stats_ctx ctx; 2284 int ret; 2285 2286 memset(&ctx, 0x0, sizeof(ctx)); 2287 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); 2288 if (likely(ret == 0)) 2289 memcpy(stats, &ctx.get_resp.basic_stats, 2290 sizeof(ctx.get_resp.basic_stats)); 2291 2292 return ret; 2293 } 2294 #ifdef ENA_EXTENDED_STATS 2295 2296 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff, 2297 u32 len) 2298 { 2299 struct ena_com_stats_ctx ctx; 2300 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd; 2301 ena_mem_handle_t mem_handle; 2302 void *virt_addr; 2303 dma_addr_t phys_addr; 2304 int ret; 2305 2306 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len, 2307 virt_addr, phys_addr, mem_handle); 2308 if (!virt_addr) { 2309 ret = ENA_COM_NO_MEM; 2310 goto done; 2311 } 2312 memset(&ctx, 0x0, sizeof(ctx)); 2313 ret = ena_com_mem_addr_set(ena_dev, 2314 &get_cmd->u.control_buffer.address, 2315 phys_addr); 2316 if (unlikely(ret)) { 2317 ena_trc_err("memory address set failed\n"); 2318 goto free_ext_stats_mem; 2319 } 2320 get_cmd->u.control_buffer.length = len; 2321 2322 get_cmd->device_id = ena_dev->stats_func; 2323 get_cmd->queue_idx = ena_dev->stats_queue; 2324 2325 ret = ena_get_dev_stats(ena_dev, &ctx, 2326 ENA_ADMIN_GET_STATS_TYPE_EXTENDED); 2327 if (ret < 0) 2328 goto free_ext_stats_mem; 2329 2330 ret = snprintf(buff, len, "%s", (char *)virt_addr); 2331 2332 free_ext_stats_mem: 2333 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr, 2334 mem_handle); 2335 done: 2336 return ret; 2337 } 2338 #endif 2339 2340 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) 2341 { 2342 struct ena_com_admin_queue *admin_queue; 2343 struct ena_admin_set_feat_cmd cmd; 2344 struct ena_admin_set_feat_resp resp; 2345 int ret; 2346 2347 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { 2348 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU); 2349 return ENA_COM_UNSUPPORTED; 2350 } 2351 2352 memset(&cmd, 0x0, sizeof(cmd)); 2353 admin_queue = &ena_dev->admin_queue; 2354 2355 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2356 cmd.aq_common_descriptor.flags = 0; 2357 cmd.feat_common.feature_id = ENA_ADMIN_MTU; 2358 cmd.u.mtu.mtu = mtu; 2359 2360 ret = ena_com_execute_admin_command(admin_queue, 2361 (struct ena_admin_aq_entry *)&cmd, 2362 sizeof(cmd), 2363 (struct ena_admin_acq_entry *)&resp, 2364 sizeof(resp)); 2365 2366 if (unlikely(ret)) 2367 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret); 2368 2369 return ret; 2370 } 2371 2372 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, 2373 struct ena_admin_feature_offload_desc *offload) 2374 { 2375 int ret; 2376 struct ena_admin_get_feat_resp resp; 2377 2378 ret = ena_com_get_feature(ena_dev, &resp, 2379 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); 2380 if (unlikely(ret)) { 2381 ena_trc_err("Failed to get offload capabilities %d\n", ret); 2382 return ret; 2383 } 2384 2385 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); 2386 2387 return 0; 2388 } 2389 2390 int ena_com_set_hash_function(struct ena_com_dev *ena_dev) 2391 { 2392 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2393 struct ena_rss *rss = &ena_dev->rss; 2394 struct ena_admin_set_feat_cmd cmd; 2395 struct ena_admin_set_feat_resp resp; 2396 struct ena_admin_get_feat_resp get_resp; 2397 int ret; 2398 2399 if (!ena_com_check_supported_feature_id(ena_dev, 2400 ENA_ADMIN_RSS_HASH_FUNCTION)) { 2401 ena_trc_dbg("Feature %d isn't supported\n", 2402 ENA_ADMIN_RSS_HASH_FUNCTION); 2403 return ENA_COM_UNSUPPORTED; 2404 } 2405 2406 /* Validate hash function is supported */ 2407 ret = ena_com_get_feature(ena_dev, &get_resp, 2408 ENA_ADMIN_RSS_HASH_FUNCTION, 0); 2409 if (unlikely(ret)) 2410 return ret; 2411 2412 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { 2413 ena_trc_err("Func hash %d isn't supported by device, abort\n", 2414 rss->hash_func); 2415 return ENA_COM_UNSUPPORTED; 2416 } 2417 2418 memset(&cmd, 0x0, sizeof(cmd)); 2419 2420 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2421 cmd.aq_common_descriptor.flags = 2422 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2423 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; 2424 cmd.u.flow_hash_func.init_val = rss->hash_init_val; 2425 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; 2426 2427 ret = ena_com_mem_addr_set(ena_dev, 2428 &cmd.control_buffer.address, 2429 rss->hash_key_dma_addr); 2430 if (unlikely(ret)) { 2431 ena_trc_err("memory address set failed\n"); 2432 return ret; 2433 } 2434 2435 cmd.control_buffer.length = sizeof(*rss->hash_key); 2436 2437 ret = ena_com_execute_admin_command(admin_queue, 2438 (struct ena_admin_aq_entry *)&cmd, 2439 sizeof(cmd), 2440 (struct ena_admin_acq_entry *)&resp, 2441 sizeof(resp)); 2442 if (unlikely(ret)) { 2443 ena_trc_err("Failed to set hash function %d. error: %d\n", 2444 rss->hash_func, ret); 2445 return ENA_COM_INVAL; 2446 } 2447 2448 return 0; 2449 } 2450 2451 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, 2452 enum ena_admin_hash_functions func, 2453 const u8 *key, u16 key_len, u32 init_val) 2454 { 2455 struct ena_rss *rss = &ena_dev->rss; 2456 struct ena_admin_get_feat_resp get_resp; 2457 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2458 rss->hash_key; 2459 int rc; 2460 2461 /* Make sure size is a mult of DWs */ 2462 if (unlikely(key_len & 0x3)) 2463 return ENA_COM_INVAL; 2464 2465 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2466 ENA_ADMIN_RSS_HASH_FUNCTION, 2467 rss->hash_key_dma_addr, 2468 sizeof(*rss->hash_key), 0); 2469 if (unlikely(rc)) 2470 return rc; 2471 2472 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { 2473 ena_trc_err("Flow hash function %d isn't supported\n", func); 2474 return ENA_COM_UNSUPPORTED; 2475 } 2476 2477 switch (func) { 2478 case ENA_ADMIN_TOEPLITZ: 2479 if (key_len > sizeof(hash_key->key)) { 2480 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n", 2481 key_len, sizeof(hash_key->key)); 2482 return ENA_COM_INVAL; 2483 } 2484 2485 memcpy(hash_key->key, key, key_len); 2486 rss->hash_init_val = init_val; 2487 hash_key->keys_num = key_len >> 2; 2488 break; 2489 case ENA_ADMIN_CRC32: 2490 rss->hash_init_val = init_val; 2491 break; 2492 default: 2493 ena_trc_err("Invalid hash function (%d)\n", func); 2494 return ENA_COM_INVAL; 2495 } 2496 2497 rss->hash_func = func; 2498 rc = ena_com_set_hash_function(ena_dev); 2499 2500 /* Restore the old function */ 2501 if (unlikely(rc)) 2502 ena_com_get_hash_function(ena_dev, NULL, NULL); 2503 2504 return rc; 2505 } 2506 2507 int ena_com_get_hash_function(struct ena_com_dev *ena_dev, 2508 enum ena_admin_hash_functions *func, 2509 u8 *key) 2510 { 2511 struct ena_rss *rss = &ena_dev->rss; 2512 struct ena_admin_get_feat_resp get_resp; 2513 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2514 rss->hash_key; 2515 int rc; 2516 2517 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2518 ENA_ADMIN_RSS_HASH_FUNCTION, 2519 rss->hash_key_dma_addr, 2520 sizeof(*rss->hash_key), 0); 2521 if (unlikely(rc)) 2522 return rc; 2523 2524 rss->hash_func = get_resp.u.flow_hash_func.selected_func; 2525 if (func) 2526 *func = rss->hash_func; 2527 2528 if (key) 2529 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); 2530 2531 return 0; 2532 } 2533 2534 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, 2535 enum ena_admin_flow_hash_proto proto, 2536 u16 *fields) 2537 { 2538 struct ena_rss *rss = &ena_dev->rss; 2539 struct ena_admin_get_feat_resp get_resp; 2540 int rc; 2541 2542 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2543 ENA_ADMIN_RSS_HASH_INPUT, 2544 rss->hash_ctrl_dma_addr, 2545 sizeof(*rss->hash_ctrl), 0); 2546 if (unlikely(rc)) 2547 return rc; 2548 2549 if (fields) 2550 *fields = rss->hash_ctrl->selected_fields[proto].fields; 2551 2552 return 0; 2553 } 2554 2555 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) 2556 { 2557 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2558 struct ena_rss *rss = &ena_dev->rss; 2559 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2560 struct ena_admin_set_feat_cmd cmd; 2561 struct ena_admin_set_feat_resp resp; 2562 int ret; 2563 2564 if (!ena_com_check_supported_feature_id(ena_dev, 2565 ENA_ADMIN_RSS_HASH_INPUT)) { 2566 ena_trc_dbg("Feature %d isn't supported\n", 2567 ENA_ADMIN_RSS_HASH_INPUT); 2568 return ENA_COM_UNSUPPORTED; 2569 } 2570 2571 memset(&cmd, 0x0, sizeof(cmd)); 2572 2573 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2574 cmd.aq_common_descriptor.flags = 2575 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2576 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; 2577 cmd.u.flow_hash_input.enabled_input_sort = 2578 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | 2579 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; 2580 2581 ret = ena_com_mem_addr_set(ena_dev, 2582 &cmd.control_buffer.address, 2583 rss->hash_ctrl_dma_addr); 2584 if (unlikely(ret)) { 2585 ena_trc_err("memory address set failed\n"); 2586 return ret; 2587 } 2588 cmd.control_buffer.length = sizeof(*hash_ctrl); 2589 2590 ret = ena_com_execute_admin_command(admin_queue, 2591 (struct ena_admin_aq_entry *)&cmd, 2592 sizeof(cmd), 2593 (struct ena_admin_acq_entry *)&resp, 2594 sizeof(resp)); 2595 if (unlikely(ret)) 2596 ena_trc_err("Failed to set hash input. error: %d\n", ret); 2597 2598 return ret; 2599 } 2600 2601 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) 2602 { 2603 struct ena_rss *rss = &ena_dev->rss; 2604 struct ena_admin_feature_rss_hash_control *hash_ctrl = 2605 rss->hash_ctrl; 2606 u16 available_fields = 0; 2607 int rc, i; 2608 2609 /* Get the supported hash input */ 2610 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2611 if (unlikely(rc)) 2612 return rc; 2613 2614 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = 2615 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2616 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2617 2618 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = 2619 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2620 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2621 2622 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = 2623 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2624 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2625 2626 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = 2627 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2628 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2629 2630 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = 2631 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2632 2633 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = 2634 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2635 2636 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = 2637 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2638 2639 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = 2640 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; 2641 2642 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { 2643 available_fields = hash_ctrl->selected_fields[i].fields & 2644 hash_ctrl->supported_fields[i].fields; 2645 if (available_fields != hash_ctrl->selected_fields[i].fields) { 2646 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", 2647 i, hash_ctrl->supported_fields[i].fields, 2648 hash_ctrl->selected_fields[i].fields); 2649 return ENA_COM_UNSUPPORTED; 2650 } 2651 } 2652 2653 rc = ena_com_set_hash_ctrl(ena_dev); 2654 2655 /* In case of failure, restore the old hash ctrl */ 2656 if (unlikely(rc)) 2657 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2658 2659 return rc; 2660 } 2661 2662 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, 2663 enum ena_admin_flow_hash_proto proto, 2664 u16 hash_fields) 2665 { 2666 struct ena_rss *rss = &ena_dev->rss; 2667 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2668 u16 supported_fields; 2669 int rc; 2670 2671 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { 2672 ena_trc_err("Invalid proto num (%u)\n", proto); 2673 return ENA_COM_INVAL; 2674 } 2675 2676 /* Get the ctrl table */ 2677 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); 2678 if (unlikely(rc)) 2679 return rc; 2680 2681 /* Make sure all the fields are supported */ 2682 supported_fields = hash_ctrl->supported_fields[proto].fields; 2683 if ((hash_fields & supported_fields) != hash_fields) { 2684 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n", 2685 proto, hash_fields, supported_fields); 2686 } 2687 2688 hash_ctrl->selected_fields[proto].fields = hash_fields; 2689 2690 rc = ena_com_set_hash_ctrl(ena_dev); 2691 2692 /* In case of failure, restore the old hash ctrl */ 2693 if (unlikely(rc)) 2694 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2695 2696 return 0; 2697 } 2698 2699 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, 2700 u16 entry_idx, u16 entry_value) 2701 { 2702 struct ena_rss *rss = &ena_dev->rss; 2703 2704 if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) 2705 return ENA_COM_INVAL; 2706 2707 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) 2708 return ENA_COM_INVAL; 2709 2710 rss->host_rss_ind_tbl[entry_idx] = entry_value; 2711 2712 return 0; 2713 } 2714 2715 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) 2716 { 2717 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2718 struct ena_rss *rss = &ena_dev->rss; 2719 struct ena_admin_set_feat_cmd cmd; 2720 struct ena_admin_set_feat_resp resp; 2721 int ret; 2722 2723 if (!ena_com_check_supported_feature_id(ena_dev, 2724 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { 2725 ena_trc_dbg("Feature %d isn't supported\n", 2726 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); 2727 return ENA_COM_UNSUPPORTED; 2728 } 2729 2730 ret = ena_com_ind_tbl_convert_to_device(ena_dev); 2731 if (ret) { 2732 ena_trc_err("Failed to convert host indirection table to device table\n"); 2733 return ret; 2734 } 2735 2736 memset(&cmd, 0x0, sizeof(cmd)); 2737 2738 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2739 cmd.aq_common_descriptor.flags = 2740 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2741 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG; 2742 cmd.u.ind_table.size = rss->tbl_log_size; 2743 cmd.u.ind_table.inline_index = 0xFFFFFFFF; 2744 2745 ret = ena_com_mem_addr_set(ena_dev, 2746 &cmd.control_buffer.address, 2747 rss->rss_ind_tbl_dma_addr); 2748 if (unlikely(ret)) { 2749 ena_trc_err("memory address set failed\n"); 2750 return ret; 2751 } 2752 2753 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * 2754 sizeof(struct ena_admin_rss_ind_table_entry); 2755 2756 ret = ena_com_execute_admin_command(admin_queue, 2757 (struct ena_admin_aq_entry *)&cmd, 2758 sizeof(cmd), 2759 (struct ena_admin_acq_entry *)&resp, 2760 sizeof(resp)); 2761 2762 if (unlikely(ret)) 2763 ena_trc_err("Failed to set indirect table. error: %d\n", ret); 2764 2765 return ret; 2766 } 2767 2768 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) 2769 { 2770 struct ena_rss *rss = &ena_dev->rss; 2771 struct ena_admin_get_feat_resp get_resp; 2772 u32 tbl_size; 2773 int i, rc; 2774 2775 tbl_size = (1ULL << rss->tbl_log_size) * 2776 sizeof(struct ena_admin_rss_ind_table_entry); 2777 2778 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2779 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 2780 rss->rss_ind_tbl_dma_addr, 2781 tbl_size, 0); 2782 if (unlikely(rc)) 2783 return rc; 2784 2785 if (!ind_tbl) 2786 return 0; 2787 2788 rc = ena_com_ind_tbl_convert_from_device(ena_dev); 2789 if (unlikely(rc)) 2790 return rc; 2791 2792 for (i = 0; i < (1 << rss->tbl_log_size); i++) 2793 ind_tbl[i] = rss->host_rss_ind_tbl[i]; 2794 2795 return 0; 2796 } 2797 2798 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) 2799 { 2800 int rc; 2801 2802 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2803 2804 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); 2805 if (unlikely(rc)) 2806 goto err_indr_tbl; 2807 2808 rc = ena_com_hash_key_allocate(ena_dev); 2809 if (unlikely(rc)) 2810 goto err_hash_key; 2811 2812 rc = ena_com_hash_ctrl_init(ena_dev); 2813 if (unlikely(rc)) 2814 goto err_hash_ctrl; 2815 2816 return 0; 2817 2818 err_hash_ctrl: 2819 ena_com_hash_key_destroy(ena_dev); 2820 err_hash_key: 2821 ena_com_indirect_table_destroy(ena_dev); 2822 err_indr_tbl: 2823 2824 return rc; 2825 } 2826 2827 void ena_com_rss_destroy(struct ena_com_dev *ena_dev) 2828 { 2829 ena_com_indirect_table_destroy(ena_dev); 2830 ena_com_hash_key_destroy(ena_dev); 2831 ena_com_hash_ctrl_destroy(ena_dev); 2832 2833 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2834 } 2835 2836 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) 2837 { 2838 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2839 2840 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 2841 SZ_4K, 2842 host_attr->host_info, 2843 host_attr->host_info_dma_addr, 2844 host_attr->host_info_dma_handle); 2845 if (unlikely(!host_attr->host_info)) 2846 return ENA_COM_NO_MEM; 2847 2848 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << 2849 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | 2850 (ENA_COMMON_SPEC_VERSION_MINOR)); 2851 2852 return 0; 2853 } 2854 2855 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, 2856 u32 debug_area_size) 2857 { 2858 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2859 2860 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 2861 debug_area_size, 2862 host_attr->debug_area_virt_addr, 2863 host_attr->debug_area_dma_addr, 2864 host_attr->debug_area_dma_handle); 2865 if (unlikely(!host_attr->debug_area_virt_addr)) { 2866 host_attr->debug_area_size = 0; 2867 return ENA_COM_NO_MEM; 2868 } 2869 2870 host_attr->debug_area_size = debug_area_size; 2871 2872 return 0; 2873 } 2874 2875 void ena_com_delete_host_info(struct ena_com_dev *ena_dev) 2876 { 2877 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2878 2879 if (host_attr->host_info) { 2880 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 2881 SZ_4K, 2882 host_attr->host_info, 2883 host_attr->host_info_dma_addr, 2884 host_attr->host_info_dma_handle); 2885 host_attr->host_info = NULL; 2886 } 2887 } 2888 2889 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) 2890 { 2891 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2892 2893 if (host_attr->debug_area_virt_addr) { 2894 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 2895 host_attr->debug_area_size, 2896 host_attr->debug_area_virt_addr, 2897 host_attr->debug_area_dma_addr, 2898 host_attr->debug_area_dma_handle); 2899 host_attr->debug_area_virt_addr = NULL; 2900 } 2901 } 2902 2903 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) 2904 { 2905 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2906 struct ena_com_admin_queue *admin_queue; 2907 struct ena_admin_set_feat_cmd cmd; 2908 struct ena_admin_set_feat_resp resp; 2909 2910 int ret; 2911 2912 /* Host attribute config is called before ena_com_get_dev_attr_feat 2913 * so ena_com can't check if the feature is supported. 2914 */ 2915 2916 memset(&cmd, 0x0, sizeof(cmd)); 2917 admin_queue = &ena_dev->admin_queue; 2918 2919 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2920 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; 2921 2922 ret = ena_com_mem_addr_set(ena_dev, 2923 &cmd.u.host_attr.debug_ba, 2924 host_attr->debug_area_dma_addr); 2925 if (unlikely(ret)) { 2926 ena_trc_err("memory address set failed\n"); 2927 return ret; 2928 } 2929 2930 ret = ena_com_mem_addr_set(ena_dev, 2931 &cmd.u.host_attr.os_info_ba, 2932 host_attr->host_info_dma_addr); 2933 if (unlikely(ret)) { 2934 ena_trc_err("memory address set failed\n"); 2935 return ret; 2936 } 2937 2938 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; 2939 2940 ret = ena_com_execute_admin_command(admin_queue, 2941 (struct ena_admin_aq_entry *)&cmd, 2942 sizeof(cmd), 2943 (struct ena_admin_acq_entry *)&resp, 2944 sizeof(resp)); 2945 2946 if (unlikely(ret)) 2947 ena_trc_err("Failed to set host attributes: %d\n", ret); 2948 2949 return ret; 2950 } 2951 2952 /* Interrupt moderation */ 2953 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) 2954 { 2955 return ena_com_check_supported_feature_id(ena_dev, 2956 ENA_ADMIN_INTERRUPT_MODERATION); 2957 } 2958 2959 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, 2960 u32 tx_coalesce_usecs) 2961 { 2962 if (!ena_dev->intr_delay_resolution) { 2963 ena_trc_err("Illegal interrupt delay granularity value\n"); 2964 return ENA_COM_FAULT; 2965 } 2966 2967 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs / 2968 ena_dev->intr_delay_resolution; 2969 2970 return 0; 2971 } 2972 2973 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, 2974 u32 rx_coalesce_usecs) 2975 { 2976 if (!ena_dev->intr_delay_resolution) { 2977 ena_trc_err("Illegal interrupt delay granularity value\n"); 2978 return ENA_COM_FAULT; 2979 } 2980 2981 /* We use LOWEST entry of moderation table for storing 2982 * nonadaptive interrupt coalescing values 2983 */ 2984 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = 2985 rx_coalesce_usecs / ena_dev->intr_delay_resolution; 2986 2987 return 0; 2988 } 2989 2990 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev) 2991 { 2992 if (ena_dev->intr_moder_tbl) 2993 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl); 2994 ena_dev->intr_moder_tbl = NULL; 2995 } 2996 2997 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) 2998 { 2999 struct ena_admin_get_feat_resp get_resp; 3000 u16 delay_resolution; 3001 int rc; 3002 3003 rc = ena_com_get_feature(ena_dev, &get_resp, 3004 ENA_ADMIN_INTERRUPT_MODERATION, 0); 3005 3006 if (rc) { 3007 if (rc == ENA_COM_UNSUPPORTED) { 3008 ena_trc_dbg("Feature %d isn't supported\n", 3009 ENA_ADMIN_INTERRUPT_MODERATION); 3010 rc = 0; 3011 } else { 3012 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n", 3013 rc); 3014 } 3015 3016 /* no moderation supported, disable adaptive support */ 3017 ena_com_disable_adaptive_moderation(ena_dev); 3018 return rc; 3019 } 3020 3021 rc = ena_com_init_interrupt_moderation_table(ena_dev); 3022 if (rc) 3023 goto err; 3024 3025 /* if moderation is supported by device we set adaptive moderation */ 3026 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; 3027 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); 3028 ena_com_enable_adaptive_moderation(ena_dev); 3029 3030 return 0; 3031 err: 3032 ena_com_destroy_interrupt_moderation(ena_dev); 3033 return rc; 3034 } 3035 3036 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) 3037 { 3038 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 3039 3040 if (!intr_moder_tbl) 3041 return; 3042 3043 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = 3044 ENA_INTR_LOWEST_USECS; 3045 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval = 3046 ENA_INTR_LOWEST_PKTS; 3047 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval = 3048 ENA_INTR_LOWEST_BYTES; 3049 3050 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval = 3051 ENA_INTR_LOW_USECS; 3052 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval = 3053 ENA_INTR_LOW_PKTS; 3054 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval = 3055 ENA_INTR_LOW_BYTES; 3056 3057 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval = 3058 ENA_INTR_MID_USECS; 3059 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval = 3060 ENA_INTR_MID_PKTS; 3061 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval = 3062 ENA_INTR_MID_BYTES; 3063 3064 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval = 3065 ENA_INTR_HIGH_USECS; 3066 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval = 3067 ENA_INTR_HIGH_PKTS; 3068 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval = 3069 ENA_INTR_HIGH_BYTES; 3070 3071 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval = 3072 ENA_INTR_HIGHEST_USECS; 3073 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval = 3074 ENA_INTR_HIGHEST_PKTS; 3075 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval = 3076 ENA_INTR_HIGHEST_BYTES; 3077 } 3078 3079 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) 3080 { 3081 return ena_dev->intr_moder_tx_interval; 3082 } 3083 3084 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) 3085 { 3086 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 3087 3088 if (intr_moder_tbl) 3089 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval; 3090 3091 return 0; 3092 } 3093 3094 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, 3095 enum ena_intr_moder_level level, 3096 struct ena_intr_moder_entry *entry) 3097 { 3098 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 3099 3100 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) 3101 return; 3102 3103 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval; 3104 if (ena_dev->intr_delay_resolution) 3105 intr_moder_tbl[level].intr_moder_interval /= 3106 ena_dev->intr_delay_resolution; 3107 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval; 3108 3109 /* use hardcoded value until ethtool supports bytecount parameter */ 3110 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED) 3111 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; 3112 } 3113 3114 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, 3115 enum ena_intr_moder_level level, 3116 struct ena_intr_moder_entry *entry) 3117 { 3118 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 3119 3120 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) 3121 return; 3122 3123 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval; 3124 if (ena_dev->intr_delay_resolution) 3125 entry->intr_moder_interval *= ena_dev->intr_delay_resolution; 3126 entry->pkts_per_interval = 3127 intr_moder_tbl[level].pkts_per_interval; 3128 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; 3129 } 3130 3131 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, 3132 struct ena_admin_feature_llq_desc *llq_features, 3133 struct ena_llq_configurations *llq_default_cfg) 3134 { 3135 int rc; 3136 int size; 3137 3138 if (!llq_features->max_llq_num) { 3139 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3140 return 0; 3141 } 3142 3143 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); 3144 if (rc) 3145 return rc; 3146 3147 /* Validate the descriptor is not too big */ 3148 size = ena_dev->tx_max_header_size; 3149 size += ena_dev->llq_info.descs_num_before_header * 3150 sizeof(struct ena_eth_io_tx_desc); 3151 3152 if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) { 3153 ena_trc_err("the size of the LLQ entry is smaller than needed\n"); 3154 return ENA_COM_INVAL; 3155 } 3156 3157 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; 3158 3159 return 0; 3160 } 3161