1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "ena_com.h" 35 #ifdef ENA_INTERNAL 36 #include "ena_gen_info.h" 37 #endif 38 39 /*****************************************************************************/ 40 /*****************************************************************************/ 41 42 /* Timeout in micro-sec */ 43 #define ADMIN_CMD_TIMEOUT_US (3000000) 44 45 #define ENA_ASYNC_QUEUE_DEPTH 16 46 #define ENA_ADMIN_QUEUE_DEPTH 32 47 48 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ 49 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ 50 | (ENA_COMMON_SPEC_VERSION_MINOR)) 51 52 #define ENA_CTRL_MAJOR 0 53 #define ENA_CTRL_MINOR 0 54 #define ENA_CTRL_SUB_MINOR 1 55 56 #define MIN_ENA_CTRL_VER \ 57 (((ENA_CTRL_MAJOR) << \ 58 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ 59 ((ENA_CTRL_MINOR) << \ 60 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ 61 (ENA_CTRL_SUB_MINOR)) 62 63 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) 64 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) 65 66 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF 67 68 /*****************************************************************************/ 69 /*****************************************************************************/ 70 /*****************************************************************************/ 71 72 enum ena_cmd_status { 73 ENA_CMD_SUBMITTED, 74 ENA_CMD_COMPLETED, 75 /* Abort - canceled by the driver */ 76 ENA_CMD_ABORTED, 77 }; 78 79 struct ena_comp_ctx { 80 ena_wait_event_t wait_event; 81 struct ena_admin_acq_entry *user_cqe; 82 u32 comp_size; 83 enum ena_cmd_status status; 84 /* status from the device */ 85 u8 comp_status; 86 u8 cmd_opcode; 87 bool occupied; 88 }; 89 90 struct ena_com_stats_ctx { 91 struct ena_admin_aq_get_stats_cmd get_cmd; 92 struct ena_admin_acq_get_stats_resp get_resp; 93 }; 94 95 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, 96 struct ena_common_mem_addr *ena_addr, 97 dma_addr_t addr) 98 { 99 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { 100 ena_trc_err("dma address has more bits that the device supports\n"); 101 return ENA_COM_INVAL; 102 } 103 104 ena_addr->mem_addr_low = (u32)addr; 105 ena_addr->mem_addr_high = (u64)addr >> 32; 106 107 return 0; 108 } 109 110 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) 111 { 112 struct ena_com_admin_sq *sq = &queue->sq; 113 u16 size = ADMIN_SQ_SIZE(queue->q_depth); 114 115 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr, 116 sq->mem_handle); 117 118 if (!sq->entries) { 119 ena_trc_err("memory allocation failed"); 120 return ENA_COM_NO_MEM; 121 } 122 123 sq->head = 0; 124 sq->tail = 0; 125 sq->phase = 1; 126 127 sq->db_addr = NULL; 128 129 return 0; 130 } 131 132 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) 133 { 134 struct ena_com_admin_cq *cq = &queue->cq; 135 u16 size = ADMIN_CQ_SIZE(queue->q_depth); 136 137 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr, 138 cq->mem_handle); 139 140 if (!cq->entries) { 141 ena_trc_err("memory allocation failed"); 142 return ENA_COM_NO_MEM; 143 } 144 145 cq->head = 0; 146 cq->phase = 1; 147 148 return 0; 149 } 150 151 static int ena_com_admin_init_aenq(struct ena_com_dev *dev, 152 struct ena_aenq_handlers *aenq_handlers) 153 { 154 struct ena_com_aenq *aenq = &dev->aenq; 155 u32 addr_low, addr_high, aenq_caps; 156 u16 size; 157 158 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 159 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 160 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size, 161 aenq->entries, 162 aenq->dma_addr, 163 aenq->mem_handle); 164 165 if (!aenq->entries) { 166 ena_trc_err("memory allocation failed"); 167 return ENA_COM_NO_MEM; 168 } 169 170 aenq->head = aenq->q_depth; 171 aenq->phase = 1; 172 173 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); 174 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); 175 176 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); 177 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); 178 179 aenq_caps = 0; 180 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; 181 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) << 182 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & 183 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; 184 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); 185 186 if (unlikely(!aenq_handlers)) { 187 ena_trc_err("aenq handlers pointer is NULL\n"); 188 return ENA_COM_INVAL; 189 } 190 191 aenq->aenq_handlers = aenq_handlers; 192 193 return 0; 194 } 195 196 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue, 197 struct ena_comp_ctx *comp_ctx) 198 { 199 comp_ctx->occupied = false; 200 ATOMIC32_DEC(&queue->outstanding_cmds); 201 } 202 203 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, 204 u16 command_id, bool capture) 205 { 206 if (unlikely(command_id >= queue->q_depth)) { 207 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", 208 command_id, queue->q_depth); 209 return NULL; 210 } 211 212 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { 213 ena_trc_err("Completion context is occupied\n"); 214 return NULL; 215 } 216 217 if (capture) { 218 ATOMIC32_INC(&queue->outstanding_cmds); 219 queue->comp_ctx[command_id].occupied = true; 220 } 221 222 return &queue->comp_ctx[command_id]; 223 } 224 225 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 226 struct ena_admin_aq_entry *cmd, 227 size_t cmd_size_in_bytes, 228 struct ena_admin_acq_entry *comp, 229 size_t comp_size_in_bytes) 230 { 231 struct ena_comp_ctx *comp_ctx; 232 u16 tail_masked, cmd_id; 233 u16 queue_size_mask; 234 u16 cnt; 235 236 queue_size_mask = admin_queue->q_depth - 1; 237 238 tail_masked = admin_queue->sq.tail & queue_size_mask; 239 240 /* In case of queue FULL */ 241 cnt = admin_queue->sq.tail - admin_queue->sq.head; 242 if (cnt >= admin_queue->q_depth) { 243 ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n", 244 admin_queue->sq.tail, 245 admin_queue->sq.head, 246 admin_queue->q_depth); 247 admin_queue->stats.out_of_space++; 248 return ERR_PTR(ENA_COM_NO_SPACE); 249 } 250 251 cmd_id = admin_queue->curr_cmd_id; 252 253 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & 254 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; 255 256 cmd->aq_common_descriptor.command_id |= cmd_id & 257 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; 258 259 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); 260 if (unlikely(!comp_ctx)) 261 return ERR_PTR(ENA_COM_INVAL); 262 263 comp_ctx->status = ENA_CMD_SUBMITTED; 264 comp_ctx->comp_size = (u32)comp_size_in_bytes; 265 comp_ctx->user_cqe = comp; 266 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; 267 268 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event); 269 270 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); 271 272 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & 273 queue_size_mask; 274 275 admin_queue->sq.tail++; 276 admin_queue->stats.submitted_cmd++; 277 278 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) 279 admin_queue->sq.phase = !admin_queue->sq.phase; 280 281 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail, 282 admin_queue->sq.db_addr); 283 284 return comp_ctx; 285 } 286 287 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) 288 { 289 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); 290 struct ena_comp_ctx *comp_ctx; 291 u16 i; 292 293 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size); 294 if (unlikely(!queue->comp_ctx)) { 295 ena_trc_err("memory allocation failed"); 296 return ENA_COM_NO_MEM; 297 } 298 299 for (i = 0; i < queue->q_depth; i++) { 300 comp_ctx = get_comp_ctxt(queue, i, false); 301 if (comp_ctx) 302 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event); 303 } 304 305 return 0; 306 } 307 308 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 309 struct ena_admin_aq_entry *cmd, 310 size_t cmd_size_in_bytes, 311 struct ena_admin_acq_entry *comp, 312 size_t comp_size_in_bytes) 313 { 314 unsigned long flags; 315 struct ena_comp_ctx *comp_ctx; 316 317 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 318 if (unlikely(!admin_queue->running_state)) { 319 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 320 return ERR_PTR(ENA_COM_NO_DEVICE); 321 } 322 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, 323 cmd_size_in_bytes, 324 comp, 325 comp_size_in_bytes); 326 if (unlikely(IS_ERR(comp_ctx))) 327 admin_queue->running_state = false; 328 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 329 330 return comp_ctx; 331 } 332 333 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, 334 struct ena_com_create_io_ctx *ctx, 335 struct ena_com_io_sq *io_sq) 336 { 337 size_t size; 338 int dev_node = 0; 339 340 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); 341 342 io_sq->desc_entry_size = 343 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 344 sizeof(struct ena_eth_io_tx_desc) : 345 sizeof(struct ena_eth_io_rx_desc); 346 347 size = io_sq->desc_entry_size * io_sq->q_depth; 348 io_sq->bus = ena_dev->bus; 349 350 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 351 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, 352 size, 353 io_sq->desc_addr.virt_addr, 354 io_sq->desc_addr.phys_addr, 355 io_sq->desc_addr.mem_handle, 356 ctx->numa_node, 357 dev_node); 358 if (!io_sq->desc_addr.virt_addr) { 359 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 360 size, 361 io_sq->desc_addr.virt_addr, 362 io_sq->desc_addr.phys_addr, 363 io_sq->desc_addr.mem_handle); 364 } 365 } else { 366 ENA_MEM_ALLOC_NODE(ena_dev->dmadev, 367 size, 368 io_sq->desc_addr.virt_addr, 369 ctx->numa_node, 370 dev_node); 371 if (!io_sq->desc_addr.virt_addr) { 372 io_sq->desc_addr.virt_addr = 373 ENA_MEM_ALLOC(ena_dev->dmadev, size); 374 } 375 } 376 377 if (!io_sq->desc_addr.virt_addr) { 378 ena_trc_err("memory allocation failed"); 379 return ENA_COM_NO_MEM; 380 } 381 382 io_sq->tail = 0; 383 io_sq->next_to_comp = 0; 384 io_sq->phase = 1; 385 386 return 0; 387 } 388 389 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, 390 struct ena_com_create_io_ctx *ctx, 391 struct ena_com_io_cq *io_cq) 392 { 393 size_t size; 394 int prev_node = 0; 395 396 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); 397 398 /* Use the basic completion descriptor for Rx */ 399 io_cq->cdesc_entry_size_in_bytes = 400 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 401 sizeof(struct ena_eth_io_tx_cdesc) : 402 sizeof(struct ena_eth_io_rx_cdesc_base); 403 404 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 405 io_cq->bus = ena_dev->bus; 406 407 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, 408 size, 409 io_cq->cdesc_addr.virt_addr, 410 io_cq->cdesc_addr.phys_addr, 411 io_cq->cdesc_addr.mem_handle, 412 ctx->numa_node, 413 prev_node); 414 if (!io_cq->cdesc_addr.virt_addr) { 415 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 416 size, 417 io_cq->cdesc_addr.virt_addr, 418 io_cq->cdesc_addr.phys_addr, 419 io_cq->cdesc_addr.mem_handle); 420 } 421 422 if (!io_cq->cdesc_addr.virt_addr) { 423 ena_trc_err("memory allocation failed"); 424 return ENA_COM_NO_MEM; 425 } 426 427 io_cq->phase = 1; 428 io_cq->head = 0; 429 430 return 0; 431 } 432 433 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, 434 struct ena_admin_acq_entry *cqe) 435 { 436 struct ena_comp_ctx *comp_ctx; 437 u16 cmd_id; 438 439 cmd_id = cqe->acq_common_descriptor.command & 440 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; 441 442 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); 443 if (unlikely(!comp_ctx)) { 444 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n"); 445 admin_queue->running_state = false; 446 return; 447 } 448 449 comp_ctx->status = ENA_CMD_COMPLETED; 450 comp_ctx->comp_status = cqe->acq_common_descriptor.status; 451 452 if (comp_ctx->user_cqe) 453 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); 454 455 if (!admin_queue->polling) 456 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); 457 } 458 459 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) 460 { 461 struct ena_admin_acq_entry *cqe = NULL; 462 u16 comp_num = 0; 463 u16 head_masked; 464 u8 phase; 465 466 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); 467 phase = admin_queue->cq.phase; 468 469 cqe = &admin_queue->cq.entries[head_masked]; 470 471 /* Go over all the completions */ 472 while ((cqe->acq_common_descriptor.flags & 473 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { 474 /* Do not read the rest of the completion entry before the 475 * phase bit was validated 476 */ 477 rmb(); 478 ena_com_handle_single_admin_completion(admin_queue, cqe); 479 480 head_masked++; 481 comp_num++; 482 if (unlikely(head_masked == admin_queue->q_depth)) { 483 head_masked = 0; 484 phase = !phase; 485 } 486 487 cqe = &admin_queue->cq.entries[head_masked]; 488 } 489 490 admin_queue->cq.head += comp_num; 491 admin_queue->cq.phase = phase; 492 admin_queue->sq.head += comp_num; 493 admin_queue->stats.completed_cmd += comp_num; 494 } 495 496 static int ena_com_comp_status_to_errno(u8 comp_status) 497 { 498 if (unlikely(comp_status != 0)) 499 ena_trc_err("admin command failed[%u]\n", comp_status); 500 501 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) 502 return ENA_COM_INVAL; 503 504 switch (comp_status) { 505 case ENA_ADMIN_SUCCESS: 506 return 0; 507 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: 508 return ENA_COM_NO_MEM; 509 case ENA_ADMIN_UNSUPPORTED_OPCODE: 510 return ENA_COM_PERMISSION; 511 case ENA_ADMIN_BAD_OPCODE: 512 case ENA_ADMIN_MALFORMED_REQUEST: 513 case ENA_ADMIN_ILLEGAL_PARAMETER: 514 case ENA_ADMIN_UNKNOWN_ERROR: 515 return ENA_COM_INVAL; 516 } 517 518 return 0; 519 } 520 521 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, 522 struct ena_com_admin_queue *admin_queue) 523 { 524 unsigned long flags, timeout; 525 int ret; 526 527 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout); 528 529 while (1) { 530 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 531 ena_com_handle_admin_completion(admin_queue); 532 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 533 534 if (comp_ctx->status != ENA_CMD_SUBMITTED) 535 break; 536 537 if (ENA_TIME_EXPIRE(timeout)) { 538 ena_trc_err("Wait for completion (polling) timeout\n"); 539 /* ENA didn't have any completion */ 540 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 541 admin_queue->stats.no_completion++; 542 admin_queue->running_state = false; 543 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 544 545 ret = ENA_COM_TIMER_EXPIRED; 546 goto err; 547 } 548 549 ENA_MSLEEP(100); 550 } 551 552 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { 553 ena_trc_err("Command was aborted\n"); 554 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 555 admin_queue->stats.aborted_cmd++; 556 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 557 ret = ENA_COM_NO_DEVICE; 558 goto err; 559 } 560 561 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED, 562 "Invalid comp status %d\n", comp_ctx->status); 563 564 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 565 err: 566 comp_ctxt_release(admin_queue, comp_ctx); 567 return ret; 568 } 569 570 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, 571 struct ena_com_admin_queue *admin_queue) 572 { 573 unsigned long flags; 574 int ret; 575 576 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event, 577 admin_queue->completion_timeout); 578 579 /* In case the command wasn't completed find out the root cause. 580 * There might be 2 kinds of errors 581 * 1) No completion (timeout reached) 582 * 2) There is completion but the device didn't get any msi-x interrupt. 583 */ 584 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { 585 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 586 ena_com_handle_admin_completion(admin_queue); 587 admin_queue->stats.no_completion++; 588 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 589 590 if (comp_ctx->status == ENA_CMD_COMPLETED) 591 ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n", 592 comp_ctx->cmd_opcode); 593 else 594 ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n", 595 comp_ctx->cmd_opcode, comp_ctx->status); 596 597 admin_queue->running_state = false; 598 ret = ENA_COM_TIMER_EXPIRED; 599 goto err; 600 } 601 602 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 603 err: 604 comp_ctxt_release(admin_queue, comp_ctx); 605 return ret; 606 } 607 608 /* This method read the hardware device register through posting writes 609 * and waiting for response 610 * On timeout the function will return ENA_MMIO_READ_TIMEOUT 611 */ 612 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) 613 { 614 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 615 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = 616 mmio_read->read_resp; 617 u32 mmio_read_reg, timeout, ret; 618 unsigned long flags; 619 int i; 620 621 ENA_MIGHT_SLEEP(); 622 623 timeout = mmio_read->reg_read_to ? : ENA_REG_READ_TIMEOUT; 624 625 /* If readless is disabled, perform regular read */ 626 if (!mmio_read->readless_supported) 627 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset); 628 629 ENA_SPINLOCK_LOCK(mmio_read->lock, flags); 630 mmio_read->seq_num++; 631 632 read_resp->req_id = mmio_read->seq_num + 0xDEAD; 633 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & 634 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; 635 mmio_read_reg |= mmio_read->seq_num & 636 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; 637 638 /* make sure read_resp->req_id get updated before the hw can write 639 * there 640 */ 641 wmb(); 642 643 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); 644 645 for (i = 0; i < timeout; i++) { 646 if (read_resp->req_id == mmio_read->seq_num) 647 break; 648 649 ENA_UDELAY(1); 650 } 651 652 if (unlikely(i == timeout)) { 653 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", 654 mmio_read->seq_num, 655 offset, 656 read_resp->req_id, 657 read_resp->reg_off); 658 ret = ENA_MMIO_READ_TIMEOUT; 659 goto err; 660 } 661 662 if (read_resp->reg_off != offset) { 663 ena_trc_err("Read failure: wrong offset provided"); 664 ret = ENA_MMIO_READ_TIMEOUT; 665 } else { 666 ret = read_resp->reg_val; 667 } 668 err: 669 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags); 670 671 return ret; 672 } 673 674 /* There are two types to wait for completion. 675 * Polling mode - wait until the completion is available. 676 * Async mode - wait on wait queue until the completion is ready 677 * (or the timeout expired). 678 * It is expected that the IRQ called ena_com_handle_admin_completion 679 * to mark the completions. 680 */ 681 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, 682 struct ena_com_admin_queue *admin_queue) 683 { 684 if (admin_queue->polling) 685 return ena_com_wait_and_process_admin_cq_polling(comp_ctx, 686 admin_queue); 687 688 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, 689 admin_queue); 690 } 691 692 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, 693 struct ena_com_io_sq *io_sq) 694 { 695 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 696 struct ena_admin_aq_destroy_sq_cmd destroy_cmd; 697 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; 698 u8 direction; 699 int ret; 700 701 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 702 703 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 704 direction = ENA_ADMIN_SQ_DIRECTION_TX; 705 else 706 direction = ENA_ADMIN_SQ_DIRECTION_RX; 707 708 destroy_cmd.sq.sq_identity |= (direction << 709 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & 710 ENA_ADMIN_SQ_SQ_DIRECTION_MASK; 711 712 destroy_cmd.sq.sq_idx = io_sq->idx; 713 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; 714 715 ret = ena_com_execute_admin_command(admin_queue, 716 (struct ena_admin_aq_entry *)&destroy_cmd, 717 sizeof(destroy_cmd), 718 (struct ena_admin_acq_entry *)&destroy_resp, 719 sizeof(destroy_resp)); 720 721 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) 722 ena_trc_err("failed to destroy io sq error: %d\n", ret); 723 724 return ret; 725 } 726 727 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, 728 struct ena_com_io_sq *io_sq, 729 struct ena_com_io_cq *io_cq) 730 { 731 size_t size; 732 733 if (io_cq->cdesc_addr.virt_addr) { 734 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 735 736 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 737 size, 738 io_cq->cdesc_addr.virt_addr, 739 io_cq->cdesc_addr.phys_addr, 740 io_cq->cdesc_addr.mem_handle); 741 742 io_cq->cdesc_addr.virt_addr = NULL; 743 } 744 745 if (io_sq->desc_addr.virt_addr) { 746 size = io_sq->desc_entry_size * io_sq->q_depth; 747 748 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 749 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 750 size, 751 io_sq->desc_addr.virt_addr, 752 io_sq->desc_addr.phys_addr, 753 io_sq->desc_addr.mem_handle); 754 else 755 ENA_MEM_FREE(ena_dev->dmadev, io_sq->desc_addr.virt_addr); 756 757 io_sq->desc_addr.virt_addr = NULL; 758 } 759 } 760 761 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, 762 u16 exp_state) 763 { 764 u32 val, i; 765 766 for (i = 0; i < timeout; i++) { 767 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 768 769 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { 770 ena_trc_err("Reg read timeout occurred\n"); 771 return ENA_COM_TIMER_EXPIRED; 772 } 773 774 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == 775 exp_state) 776 return 0; 777 778 /* The resolution of the timeout is 100ms */ 779 ENA_MSLEEP(100); 780 } 781 782 return ENA_COM_TIMER_EXPIRED; 783 } 784 785 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, 786 enum ena_admin_aq_feature_id feature_id) 787 { 788 u32 feature_mask = 1 << feature_id; 789 790 /* Device attributes is always supported */ 791 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && 792 !(ena_dev->supported_features & feature_mask)) 793 return false; 794 795 return true; 796 } 797 798 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, 799 struct ena_admin_get_feat_resp *get_resp, 800 enum ena_admin_aq_feature_id feature_id, 801 dma_addr_t control_buf_dma_addr, 802 u32 control_buff_size) 803 { 804 struct ena_com_admin_queue *admin_queue; 805 struct ena_admin_get_feat_cmd get_cmd; 806 int ret; 807 808 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { 809 ena_trc_dbg("Feature %d isn't supported\n", feature_id); 810 return ENA_COM_PERMISSION; 811 } 812 813 memset(&get_cmd, 0x0, sizeof(get_cmd)); 814 admin_queue = &ena_dev->admin_queue; 815 816 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; 817 818 if (control_buff_size) 819 get_cmd.aq_common_descriptor.flags = 820 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 821 else 822 get_cmd.aq_common_descriptor.flags = 0; 823 824 ret = ena_com_mem_addr_set(ena_dev, 825 &get_cmd.control_buffer.address, 826 control_buf_dma_addr); 827 if (unlikely(ret)) { 828 ena_trc_err("memory address set failed\n"); 829 return ret; 830 } 831 832 get_cmd.control_buffer.length = control_buff_size; 833 834 get_cmd.feat_common.feature_id = feature_id; 835 836 ret = ena_com_execute_admin_command(admin_queue, 837 (struct ena_admin_aq_entry *) 838 &get_cmd, 839 sizeof(get_cmd), 840 (struct ena_admin_acq_entry *) 841 get_resp, 842 sizeof(*get_resp)); 843 844 if (unlikely(ret)) 845 ena_trc_err("Failed to submit get_feature command %d error: %d\n", 846 feature_id, ret); 847 848 return ret; 849 } 850 851 static int ena_com_get_feature(struct ena_com_dev *ena_dev, 852 struct ena_admin_get_feat_resp *get_resp, 853 enum ena_admin_aq_feature_id feature_id) 854 { 855 return ena_com_get_feature_ex(ena_dev, 856 get_resp, 857 feature_id, 858 0, 859 0); 860 } 861 862 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) 863 { 864 struct ena_rss *rss = &ena_dev->rss; 865 866 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 867 sizeof(*rss->hash_key), 868 rss->hash_key, 869 rss->hash_key_dma_addr, 870 rss->hash_key_mem_handle); 871 872 if (unlikely(!rss->hash_key)) 873 return ENA_COM_NO_MEM; 874 875 return 0; 876 } 877 878 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) 879 { 880 struct ena_rss *rss = &ena_dev->rss; 881 882 if (rss->hash_key) 883 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 884 sizeof(*rss->hash_key), 885 rss->hash_key, 886 rss->hash_key_dma_addr, 887 rss->hash_key_mem_handle); 888 rss->hash_key = NULL; 889 } 890 891 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) 892 { 893 struct ena_rss *rss = &ena_dev->rss; 894 895 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 896 sizeof(*rss->hash_ctrl), 897 rss->hash_ctrl, 898 rss->hash_ctrl_dma_addr, 899 rss->hash_ctrl_mem_handle); 900 901 if (unlikely(!rss->hash_ctrl)) 902 return ENA_COM_NO_MEM; 903 904 return 0; 905 } 906 907 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) 908 { 909 struct ena_rss *rss = &ena_dev->rss; 910 911 if (rss->hash_ctrl) 912 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 913 sizeof(*rss->hash_ctrl), 914 rss->hash_ctrl, 915 rss->hash_ctrl_dma_addr, 916 rss->hash_ctrl_mem_handle); 917 rss->hash_ctrl = NULL; 918 } 919 920 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, 921 u16 log_size) 922 { 923 struct ena_rss *rss = &ena_dev->rss; 924 struct ena_admin_get_feat_resp get_resp; 925 size_t tbl_size; 926 int ret; 927 928 ret = ena_com_get_feature(ena_dev, &get_resp, 929 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); 930 if (unlikely(ret)) 931 return ret; 932 933 if ((get_resp.u.ind_table.min_size > log_size) || 934 (get_resp.u.ind_table.max_size < log_size)) { 935 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", 936 1 << log_size, 937 1 << get_resp.u.ind_table.min_size, 938 1 << get_resp.u.ind_table.max_size); 939 return ENA_COM_INVAL; 940 } 941 942 tbl_size = (1ULL << log_size) * 943 sizeof(struct ena_admin_rss_ind_table_entry); 944 945 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 946 tbl_size, 947 rss->rss_ind_tbl, 948 rss->rss_ind_tbl_dma_addr, 949 rss->rss_ind_tbl_mem_handle); 950 if (unlikely(!rss->rss_ind_tbl)) 951 goto mem_err1; 952 953 tbl_size = (1ULL << log_size) * sizeof(u16); 954 rss->host_rss_ind_tbl = 955 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size); 956 if (unlikely(!rss->host_rss_ind_tbl)) 957 goto mem_err2; 958 959 rss->tbl_log_size = log_size; 960 961 return 0; 962 963 mem_err2: 964 tbl_size = (1ULL << log_size) * 965 sizeof(struct ena_admin_rss_ind_table_entry); 966 967 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 968 tbl_size, 969 rss->rss_ind_tbl, 970 rss->rss_ind_tbl_dma_addr, 971 rss->rss_ind_tbl_mem_handle); 972 rss->rss_ind_tbl = NULL; 973 mem_err1: 974 rss->tbl_log_size = 0; 975 return ENA_COM_NO_MEM; 976 } 977 978 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) 979 { 980 struct ena_rss *rss = &ena_dev->rss; 981 size_t tbl_size = (1ULL << rss->tbl_log_size) * 982 sizeof(struct ena_admin_rss_ind_table_entry); 983 984 if (rss->rss_ind_tbl) 985 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 986 tbl_size, 987 rss->rss_ind_tbl, 988 rss->rss_ind_tbl_dma_addr, 989 rss->rss_ind_tbl_mem_handle); 990 rss->rss_ind_tbl = NULL; 991 992 if (rss->host_rss_ind_tbl) 993 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl); 994 rss->host_rss_ind_tbl = NULL; 995 } 996 997 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, 998 struct ena_com_io_sq *io_sq, u16 cq_idx) 999 { 1000 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1001 struct ena_admin_aq_create_sq_cmd create_cmd; 1002 struct ena_admin_acq_create_sq_resp_desc cmd_completion; 1003 u8 direction; 1004 int ret; 1005 1006 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1007 1008 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; 1009 1010 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1011 direction = ENA_ADMIN_SQ_DIRECTION_TX; 1012 else 1013 direction = ENA_ADMIN_SQ_DIRECTION_RX; 1014 1015 create_cmd.sq_identity |= (direction << 1016 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & 1017 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; 1018 1019 create_cmd.sq_caps_2 |= io_sq->mem_queue_type & 1020 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; 1021 1022 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << 1023 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & 1024 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; 1025 1026 create_cmd.sq_caps_3 |= 1027 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; 1028 1029 create_cmd.cq_idx = cq_idx; 1030 create_cmd.sq_depth = io_sq->q_depth; 1031 1032 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 1033 ret = ena_com_mem_addr_set(ena_dev, 1034 &create_cmd.sq_ba, 1035 io_sq->desc_addr.phys_addr); 1036 if (unlikely(ret)) { 1037 ena_trc_err("memory address set failed\n"); 1038 return ret; 1039 } 1040 } 1041 1042 ret = ena_com_execute_admin_command(admin_queue, 1043 (struct ena_admin_aq_entry *)&create_cmd, 1044 sizeof(create_cmd), 1045 (struct ena_admin_acq_entry *)&cmd_completion, 1046 sizeof(cmd_completion)); 1047 if (unlikely(ret)) { 1048 ena_trc_err("Failed to create IO SQ. error: %d\n", ret); 1049 return ret; 1050 } 1051 1052 io_sq->idx = cmd_completion.sq_idx; 1053 1054 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1055 (uintptr_t)cmd_completion.sq_doorbell_offset); 1056 1057 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1058 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar 1059 + cmd_completion.llq_headers_offset); 1060 1061 io_sq->desc_addr.pbuf_dev_addr = 1062 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + 1063 cmd_completion.llq_descriptors_offset); 1064 } 1065 1066 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); 1067 1068 return ret; 1069 } 1070 1071 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) 1072 { 1073 struct ena_rss *rss = &ena_dev->rss; 1074 struct ena_com_io_sq *io_sq; 1075 u16 qid; 1076 int i; 1077 1078 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1079 qid = rss->host_rss_ind_tbl[i]; 1080 if (qid >= ENA_TOTAL_NUM_QUEUES) 1081 return ENA_COM_INVAL; 1082 1083 io_sq = &ena_dev->io_sq_queues[qid]; 1084 1085 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) 1086 return ENA_COM_INVAL; 1087 1088 rss->rss_ind_tbl[i].cq_idx = io_sq->idx; 1089 } 1090 1091 return 0; 1092 } 1093 1094 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) 1095 { 1096 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; 1097 struct ena_rss *rss = &ena_dev->rss; 1098 u8 idx; 1099 u16 i; 1100 1101 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) 1102 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; 1103 1104 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1105 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) 1106 return ENA_COM_INVAL; 1107 idx = (u8)rss->rss_ind_tbl[i].cq_idx; 1108 1109 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) 1110 return ENA_COM_INVAL; 1111 1112 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; 1113 } 1114 1115 return 0; 1116 } 1117 1118 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) 1119 { 1120 size_t size; 1121 1122 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS; 1123 1124 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size); 1125 if (!ena_dev->intr_moder_tbl) 1126 return ENA_COM_NO_MEM; 1127 1128 ena_com_config_default_interrupt_moderation_table(ena_dev); 1129 1130 return 0; 1131 } 1132 1133 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, 1134 u16 intr_delay_resolution) 1135 { 1136 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 1137 unsigned int i; 1138 1139 if (!intr_delay_resolution) { 1140 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); 1141 intr_delay_resolution = 1; 1142 } 1143 ena_dev->intr_delay_resolution = intr_delay_resolution; 1144 1145 /* update Rx */ 1146 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++) 1147 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution; 1148 1149 /* update Tx */ 1150 ena_dev->intr_moder_tx_interval /= intr_delay_resolution; 1151 } 1152 1153 /*****************************************************************************/ 1154 /******************************* API ******************************/ 1155 /*****************************************************************************/ 1156 1157 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, 1158 struct ena_admin_aq_entry *cmd, 1159 size_t cmd_size, 1160 struct ena_admin_acq_entry *comp, 1161 size_t comp_size) 1162 { 1163 struct ena_comp_ctx *comp_ctx; 1164 int ret; 1165 1166 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, 1167 comp, comp_size); 1168 if (unlikely(IS_ERR(comp_ctx))) { 1169 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE)) 1170 ena_trc_dbg("Failed to submit command [%ld]\n", 1171 PTR_ERR(comp_ctx)); 1172 else 1173 ena_trc_err("Failed to submit command [%ld]\n", 1174 PTR_ERR(comp_ctx)); 1175 1176 return PTR_ERR(comp_ctx); 1177 } 1178 1179 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); 1180 if (unlikely(ret)) { 1181 if (admin_queue->running_state) 1182 ena_trc_err("Failed to process command. ret = %d\n", 1183 ret); 1184 else 1185 ena_trc_dbg("Failed to process command. ret = %d\n", 1186 ret); 1187 } 1188 return ret; 1189 } 1190 1191 int ena_com_create_io_cq(struct ena_com_dev *ena_dev, 1192 struct ena_com_io_cq *io_cq) 1193 { 1194 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1195 struct ena_admin_aq_create_cq_cmd create_cmd; 1196 struct ena_admin_acq_create_cq_resp_desc cmd_completion; 1197 int ret; 1198 1199 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1200 1201 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; 1202 1203 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & 1204 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; 1205 create_cmd.cq_caps_1 |= 1206 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; 1207 1208 create_cmd.msix_vector = io_cq->msix_vector; 1209 create_cmd.cq_depth = io_cq->q_depth; 1210 1211 ret = ena_com_mem_addr_set(ena_dev, 1212 &create_cmd.cq_ba, 1213 io_cq->cdesc_addr.phys_addr); 1214 if (unlikely(ret)) { 1215 ena_trc_err("memory address set failed\n"); 1216 return ret; 1217 } 1218 1219 ret = ena_com_execute_admin_command(admin_queue, 1220 (struct ena_admin_aq_entry *)&create_cmd, 1221 sizeof(create_cmd), 1222 (struct ena_admin_acq_entry *)&cmd_completion, 1223 sizeof(cmd_completion)); 1224 if (unlikely(ret)) { 1225 ena_trc_err("Failed to create IO CQ. error: %d\n", ret); 1226 return ret; 1227 } 1228 1229 io_cq->idx = cmd_completion.cq_idx; 1230 1231 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1232 cmd_completion.cq_interrupt_unmask_register_offset); 1233 1234 if (cmd_completion.cq_head_db_register_offset) 1235 io_cq->cq_head_db_reg = 1236 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1237 cmd_completion.cq_head_db_register_offset); 1238 1239 if (cmd_completion.numa_node_register_offset) 1240 io_cq->numa_node_cfg_reg = 1241 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1242 cmd_completion.numa_node_register_offset); 1243 1244 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); 1245 1246 return ret; 1247 } 1248 1249 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, 1250 struct ena_com_io_sq **io_sq, 1251 struct ena_com_io_cq **io_cq) 1252 { 1253 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1254 ena_trc_err("Invalid queue number %d but the max is %d\n", 1255 qid, ENA_TOTAL_NUM_QUEUES); 1256 return ENA_COM_INVAL; 1257 } 1258 1259 *io_sq = &ena_dev->io_sq_queues[qid]; 1260 *io_cq = &ena_dev->io_cq_queues[qid]; 1261 1262 return 0; 1263 } 1264 1265 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) 1266 { 1267 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1268 struct ena_comp_ctx *comp_ctx; 1269 u16 i; 1270 1271 if (!admin_queue->comp_ctx) 1272 return; 1273 1274 for (i = 0; i < admin_queue->q_depth; i++) { 1275 comp_ctx = get_comp_ctxt(admin_queue, i, false); 1276 if (unlikely(!comp_ctx)) 1277 break; 1278 1279 comp_ctx->status = ENA_CMD_ABORTED; 1280 1281 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); 1282 } 1283 } 1284 1285 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) 1286 { 1287 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1288 unsigned long flags; 1289 1290 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 1291 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) { 1292 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 1293 ENA_MSLEEP(20); 1294 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 1295 } 1296 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 1297 } 1298 1299 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, 1300 struct ena_com_io_cq *io_cq) 1301 { 1302 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1303 struct ena_admin_aq_destroy_cq_cmd destroy_cmd; 1304 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; 1305 int ret; 1306 1307 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 1308 1309 destroy_cmd.cq_idx = io_cq->idx; 1310 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; 1311 1312 ret = ena_com_execute_admin_command(admin_queue, 1313 (struct ena_admin_aq_entry *)&destroy_cmd, 1314 sizeof(destroy_cmd), 1315 (struct ena_admin_acq_entry *)&destroy_resp, 1316 sizeof(destroy_resp)); 1317 1318 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) 1319 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret); 1320 1321 return ret; 1322 } 1323 1324 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) 1325 { 1326 return ena_dev->admin_queue.running_state; 1327 } 1328 1329 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) 1330 { 1331 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1332 unsigned long flags; 1333 1334 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 1335 ena_dev->admin_queue.running_state = state; 1336 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 1337 } 1338 1339 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) 1340 { 1341 u16 depth = ena_dev->aenq.q_depth; 1342 1343 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); 1344 1345 /* Init head_db to mark that all entries in the queue 1346 * are initially available 1347 */ 1348 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 1349 } 1350 1351 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) 1352 { 1353 struct ena_com_admin_queue *admin_queue; 1354 struct ena_admin_set_feat_cmd cmd; 1355 struct ena_admin_set_feat_resp resp; 1356 struct ena_admin_get_feat_resp get_resp; 1357 int ret; 1358 1359 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG); 1360 if (ret) { 1361 ena_trc_info("Can't get aenq configuration\n"); 1362 return ret; 1363 } 1364 1365 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { 1366 ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", 1367 get_resp.u.aenq.supported_groups, 1368 groups_flag); 1369 return ENA_COM_PERMISSION; 1370 } 1371 1372 memset(&cmd, 0x0, sizeof(cmd)); 1373 admin_queue = &ena_dev->admin_queue; 1374 1375 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 1376 cmd.aq_common_descriptor.flags = 0; 1377 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; 1378 cmd.u.aenq.enabled_groups = groups_flag; 1379 1380 ret = ena_com_execute_admin_command(admin_queue, 1381 (struct ena_admin_aq_entry *)&cmd, 1382 sizeof(cmd), 1383 (struct ena_admin_acq_entry *)&resp, 1384 sizeof(resp)); 1385 1386 if (unlikely(ret)) 1387 ena_trc_err("Failed to config AENQ ret: %d\n", ret); 1388 1389 return ret; 1390 } 1391 1392 int ena_com_get_dma_width(struct ena_com_dev *ena_dev) 1393 { 1394 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 1395 int width; 1396 1397 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { 1398 ena_trc_err("Reg read timeout occurred\n"); 1399 return ENA_COM_TIMER_EXPIRED; 1400 } 1401 1402 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> 1403 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; 1404 1405 ena_trc_dbg("ENA dma width: %d\n", width); 1406 1407 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { 1408 ena_trc_err("DMA width illegal value: %d\n", width); 1409 return ENA_COM_INVAL; 1410 } 1411 1412 ena_dev->dma_addr_bits = width; 1413 1414 return width; 1415 } 1416 1417 int ena_com_validate_version(struct ena_com_dev *ena_dev) 1418 { 1419 u32 ver; 1420 u32 ctrl_ver; 1421 u32 ctrl_ver_masked; 1422 1423 /* Make sure the ENA version and the controller version are at least 1424 * as the driver expects 1425 */ 1426 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); 1427 ctrl_ver = ena_com_reg_bar_read32(ena_dev, 1428 ENA_REGS_CONTROLLER_VERSION_OFF); 1429 1430 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || 1431 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { 1432 ena_trc_err("Reg read timeout occurred\n"); 1433 return ENA_COM_TIMER_EXPIRED; 1434 } 1435 1436 ena_trc_info("ena device version: %d.%d\n", 1437 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> 1438 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, 1439 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); 1440 1441 if (ver < MIN_ENA_VER) { 1442 ena_trc_err("ENA version is lower than the minimal version the driver supports\n"); 1443 return -1; 1444 } 1445 1446 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n", 1447 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) 1448 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, 1449 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) 1450 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, 1451 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), 1452 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> 1453 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); 1454 1455 ctrl_ver_masked = 1456 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | 1457 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | 1458 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); 1459 1460 /* Validate the ctrl version without the implementation ID */ 1461 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { 1462 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); 1463 return -1; 1464 } 1465 1466 return 0; 1467 } 1468 1469 void ena_com_admin_destroy(struct ena_com_dev *ena_dev) 1470 { 1471 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1472 struct ena_com_admin_cq *cq = &admin_queue->cq; 1473 struct ena_com_admin_sq *sq = &admin_queue->sq; 1474 struct ena_com_aenq *aenq = &ena_dev->aenq; 1475 u16 size; 1476 1477 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event); 1478 1479 ENA_SPINLOCK_DESTROY(admin_queue->q_lock); 1480 1481 if (admin_queue->comp_ctx) 1482 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx); 1483 1484 admin_queue->comp_ctx = NULL; 1485 size = ADMIN_SQ_SIZE(admin_queue->q_depth); 1486 if (sq->entries) 1487 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries, 1488 sq->dma_addr, sq->mem_handle); 1489 sq->entries = NULL; 1490 1491 size = ADMIN_CQ_SIZE(admin_queue->q_depth); 1492 if (cq->entries) 1493 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries, 1494 cq->dma_addr, cq->mem_handle); 1495 cq->entries = NULL; 1496 1497 size = ADMIN_AENQ_SIZE(aenq->q_depth); 1498 if (ena_dev->aenq.entries) 1499 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries, 1500 aenq->dma_addr, aenq->mem_handle); 1501 aenq->entries = NULL; 1502 } 1503 1504 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) 1505 { 1506 ena_dev->admin_queue.polling = polling; 1507 } 1508 1509 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) 1510 { 1511 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1512 1513 ENA_SPINLOCK_INIT(mmio_read->lock); 1514 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1515 sizeof(*mmio_read->read_resp), 1516 mmio_read->read_resp, 1517 mmio_read->read_resp_dma_addr, 1518 mmio_read->read_resp_mem_handle); 1519 if (unlikely(!mmio_read->read_resp)) 1520 return ENA_COM_NO_MEM; 1521 1522 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 1523 1524 mmio_read->read_resp->req_id = 0x0; 1525 mmio_read->seq_num = 0x0; 1526 mmio_read->readless_supported = true; 1527 1528 return 0; 1529 } 1530 1531 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) 1532 { 1533 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1534 1535 mmio_read->readless_supported = readless_supported; 1536 } 1537 1538 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) 1539 { 1540 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1541 1542 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1543 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1544 1545 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1546 sizeof(*mmio_read->read_resp), 1547 mmio_read->read_resp, 1548 mmio_read->read_resp_dma_addr, 1549 mmio_read->read_resp_mem_handle); 1550 1551 mmio_read->read_resp = NULL; 1552 1553 ENA_SPINLOCK_DESTROY(mmio_read->lock); 1554 } 1555 1556 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) 1557 { 1558 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1559 u32 addr_low, addr_high; 1560 1561 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); 1562 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); 1563 1564 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1565 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1566 } 1567 1568 int ena_com_admin_init(struct ena_com_dev *ena_dev, 1569 struct ena_aenq_handlers *aenq_handlers, 1570 bool init_spinlock) 1571 { 1572 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1573 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; 1574 int ret; 1575 1576 #ifdef ENA_INTERNAL 1577 ena_trc_info("ena_defs : Version:[%s] Build date [%s]", 1578 ENA_GEN_COMMIT, ENA_GEN_DATE); 1579 #endif 1580 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 1581 1582 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { 1583 ena_trc_err("Reg read timeout occurred\n"); 1584 return ENA_COM_TIMER_EXPIRED; 1585 } 1586 1587 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { 1588 ena_trc_err("Device isn't ready, abort com init\n"); 1589 return ENA_COM_NO_DEVICE; 1590 } 1591 1592 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; 1593 1594 admin_queue->bus = ena_dev->bus; 1595 admin_queue->q_dmadev = ena_dev->dmadev; 1596 admin_queue->polling = false; 1597 admin_queue->curr_cmd_id = 0; 1598 1599 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0); 1600 1601 if (init_spinlock) 1602 ENA_SPINLOCK_INIT(admin_queue->q_lock); 1603 1604 ret = ena_com_init_comp_ctxt(admin_queue); 1605 if (ret) 1606 goto error; 1607 1608 ret = ena_com_admin_init_sq(admin_queue); 1609 if (ret) 1610 goto error; 1611 1612 ret = ena_com_admin_init_cq(admin_queue); 1613 if (ret) 1614 goto error; 1615 1616 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1617 ENA_REGS_AQ_DB_OFF); 1618 1619 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); 1620 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); 1621 1622 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); 1623 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); 1624 1625 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); 1626 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); 1627 1628 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); 1629 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); 1630 1631 aq_caps = 0; 1632 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; 1633 aq_caps |= (sizeof(struct ena_admin_aq_entry) << 1634 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & 1635 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; 1636 1637 acq_caps = 0; 1638 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; 1639 acq_caps |= (sizeof(struct ena_admin_acq_entry) << 1640 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & 1641 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; 1642 1643 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); 1644 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); 1645 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); 1646 if (ret) 1647 goto error; 1648 1649 admin_queue->running_state = true; 1650 1651 return 0; 1652 error: 1653 ena_com_admin_destroy(ena_dev); 1654 1655 return ret; 1656 } 1657 1658 int ena_com_create_io_queue(struct ena_com_dev *ena_dev, 1659 struct ena_com_create_io_ctx *ctx) 1660 { 1661 struct ena_com_io_sq *io_sq; 1662 struct ena_com_io_cq *io_cq; 1663 int ret; 1664 1665 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { 1666 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", 1667 ctx->qid, ENA_TOTAL_NUM_QUEUES); 1668 return ENA_COM_INVAL; 1669 } 1670 1671 io_sq = &ena_dev->io_sq_queues[ctx->qid]; 1672 io_cq = &ena_dev->io_cq_queues[ctx->qid]; 1673 1674 memset(io_sq, 0x0, sizeof(*io_sq)); 1675 memset(io_cq, 0x0, sizeof(*io_cq)); 1676 1677 /* Init CQ */ 1678 io_cq->q_depth = ctx->queue_size; 1679 io_cq->direction = ctx->direction; 1680 io_cq->qid = ctx->qid; 1681 1682 io_cq->msix_vector = ctx->msix_vector; 1683 1684 io_sq->q_depth = ctx->queue_size; 1685 io_sq->direction = ctx->direction; 1686 io_sq->qid = ctx->qid; 1687 1688 io_sq->mem_queue_type = ctx->mem_queue_type; 1689 1690 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1691 /* header length is limited to 8 bits */ 1692 io_sq->tx_max_header_size = 1693 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256); 1694 1695 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); 1696 if (ret) 1697 goto error; 1698 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); 1699 if (ret) 1700 goto error; 1701 1702 ret = ena_com_create_io_cq(ena_dev, io_cq); 1703 if (ret) 1704 goto error; 1705 1706 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); 1707 if (ret) 1708 goto destroy_io_cq; 1709 1710 return 0; 1711 1712 destroy_io_cq: 1713 ena_com_destroy_io_cq(ena_dev, io_cq); 1714 error: 1715 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1716 return ret; 1717 } 1718 1719 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) 1720 { 1721 struct ena_com_io_sq *io_sq; 1722 struct ena_com_io_cq *io_cq; 1723 1724 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1725 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", 1726 qid, ENA_TOTAL_NUM_QUEUES); 1727 return; 1728 } 1729 1730 io_sq = &ena_dev->io_sq_queues[qid]; 1731 io_cq = &ena_dev->io_cq_queues[qid]; 1732 1733 ena_com_destroy_io_sq(ena_dev, io_sq); 1734 ena_com_destroy_io_cq(ena_dev, io_cq); 1735 1736 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1737 } 1738 1739 int ena_com_get_link_params(struct ena_com_dev *ena_dev, 1740 struct ena_admin_get_feat_resp *resp) 1741 { 1742 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG); 1743 } 1744 1745 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, 1746 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1747 { 1748 struct ena_admin_get_feat_resp get_resp; 1749 int rc; 1750 1751 rc = ena_com_get_feature(ena_dev, &get_resp, 1752 ENA_ADMIN_DEVICE_ATTRIBUTES); 1753 if (rc) 1754 return rc; 1755 1756 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, 1757 sizeof(get_resp.u.dev_attr)); 1758 ena_dev->supported_features = get_resp.u.dev_attr.supported_features; 1759 1760 rc = ena_com_get_feature(ena_dev, &get_resp, 1761 ENA_ADMIN_MAX_QUEUES_NUM); 1762 if (rc) 1763 return rc; 1764 1765 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, 1766 sizeof(get_resp.u.max_queue)); 1767 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size; 1768 1769 rc = ena_com_get_feature(ena_dev, &get_resp, 1770 ENA_ADMIN_AENQ_CONFIG); 1771 if (rc) 1772 return rc; 1773 1774 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, 1775 sizeof(get_resp.u.aenq)); 1776 1777 rc = ena_com_get_feature(ena_dev, &get_resp, 1778 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); 1779 if (rc) 1780 return rc; 1781 1782 memcpy(&get_feat_ctx->offload, &get_resp.u.offload, 1783 sizeof(get_resp.u.offload)); 1784 1785 /* Driver hints isn't mandatory admin command. So in case the 1786 * command isn't supported set driver hints to 0 1787 */ 1788 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS); 1789 1790 if (!rc) 1791 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, 1792 sizeof(get_resp.u.hw_hints)); 1793 else if (rc == ENA_COM_PERMISSION) 1794 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints)); 1795 else 1796 return rc; 1797 1798 return 0; 1799 } 1800 1801 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) 1802 { 1803 ena_com_handle_admin_completion(&ena_dev->admin_queue); 1804 } 1805 1806 /* ena_handle_specific_aenq_event: 1807 * return the handler that is relevant to the specific event group 1808 */ 1809 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, 1810 u16 group) 1811 { 1812 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers; 1813 1814 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) 1815 return aenq_handlers->handlers[group]; 1816 1817 return aenq_handlers->unimplemented_handler; 1818 } 1819 1820 /* ena_aenq_intr_handler: 1821 * handles the aenq incoming events. 1822 * pop events from the queue and apply the specific handler 1823 */ 1824 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) 1825 { 1826 struct ena_admin_aenq_entry *aenq_e; 1827 struct ena_admin_aenq_common_desc *aenq_common; 1828 struct ena_com_aenq *aenq = &dev->aenq; 1829 ena_aenq_handler handler_cb; 1830 u16 masked_head, processed = 0; 1831 u8 phase; 1832 1833 masked_head = aenq->head & (aenq->q_depth - 1); 1834 phase = aenq->phase; 1835 aenq_e = &aenq->entries[masked_head]; /* Get first entry */ 1836 aenq_common = &aenq_e->aenq_common_desc; 1837 1838 /* Go over all the events */ 1839 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == 1840 phase) { 1841 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%jus]\n", 1842 aenq_common->group, 1843 aenq_common->syndrom, 1844 (u64)aenq_common->timestamp_low + 1845 ((u64)aenq_common->timestamp_high << 32)); 1846 1847 /* Handle specific event*/ 1848 handler_cb = ena_com_get_specific_aenq_cb(dev, 1849 aenq_common->group); 1850 handler_cb(data, aenq_e); /* call the actual event handler*/ 1851 1852 /* Get next event entry */ 1853 masked_head++; 1854 processed++; 1855 1856 if (unlikely(masked_head == aenq->q_depth)) { 1857 masked_head = 0; 1858 phase = !phase; 1859 } 1860 aenq_e = &aenq->entries[masked_head]; 1861 aenq_common = &aenq_e->aenq_common_desc; 1862 } 1863 1864 aenq->head += processed; 1865 aenq->phase = phase; 1866 1867 /* Don't update aenq doorbell if there weren't any processed events */ 1868 if (!processed) 1869 return; 1870 1871 /* write the aenq doorbell after all AENQ descriptors were read */ 1872 mb(); 1873 ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 1874 } 1875 1876 int ena_com_dev_reset(struct ena_com_dev *ena_dev) 1877 { 1878 u32 stat, timeout, cap, reset_val; 1879 int rc; 1880 1881 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 1882 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 1883 1884 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || 1885 (cap == ENA_MMIO_READ_TIMEOUT))) { 1886 ena_trc_err("Reg read32 timeout occurred\n"); 1887 return ENA_COM_TIMER_EXPIRED; 1888 } 1889 1890 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { 1891 ena_trc_err("Device isn't ready, can't reset device\n"); 1892 return ENA_COM_INVAL; 1893 } 1894 1895 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> 1896 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; 1897 if (timeout == 0) { 1898 ena_trc_err("Invalid timeout value\n"); 1899 return ENA_COM_INVAL; 1900 } 1901 1902 /* start reset */ 1903 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; 1904 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 1905 1906 /* Write again the MMIO read request address */ 1907 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 1908 1909 rc = wait_for_reset_state(ena_dev, timeout, 1910 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); 1911 if (rc != 0) { 1912 ena_trc_err("Reset indication didn't turn on\n"); 1913 return rc; 1914 } 1915 1916 /* reset done */ 1917 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 1918 rc = wait_for_reset_state(ena_dev, timeout, 0); 1919 if (rc != 0) { 1920 ena_trc_err("Reset indication didn't turn off\n"); 1921 return rc; 1922 } 1923 1924 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> 1925 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; 1926 if (timeout) 1927 /* the resolution of timeout reg is 100ms */ 1928 ena_dev->admin_queue.completion_timeout = timeout * 100000; 1929 else 1930 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; 1931 1932 return 0; 1933 } 1934 1935 static int ena_get_dev_stats(struct ena_com_dev *ena_dev, 1936 struct ena_com_stats_ctx *ctx, 1937 enum ena_admin_get_stats_type type) 1938 { 1939 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; 1940 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; 1941 struct ena_com_admin_queue *admin_queue; 1942 int ret; 1943 1944 admin_queue = &ena_dev->admin_queue; 1945 1946 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; 1947 get_cmd->aq_common_descriptor.flags = 0; 1948 get_cmd->type = type; 1949 1950 ret = ena_com_execute_admin_command(admin_queue, 1951 (struct ena_admin_aq_entry *)get_cmd, 1952 sizeof(*get_cmd), 1953 (struct ena_admin_acq_entry *)get_resp, 1954 sizeof(*get_resp)); 1955 1956 if (unlikely(ret)) 1957 ena_trc_err("Failed to get stats. error: %d\n", ret); 1958 1959 return ret; 1960 } 1961 1962 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, 1963 struct ena_admin_basic_stats *stats) 1964 { 1965 struct ena_com_stats_ctx ctx; 1966 int ret; 1967 1968 memset(&ctx, 0x0, sizeof(ctx)); 1969 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); 1970 if (likely(ret == 0)) 1971 memcpy(stats, &ctx.get_resp.basic_stats, 1972 sizeof(ctx.get_resp.basic_stats)); 1973 1974 return ret; 1975 } 1976 1977 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) 1978 { 1979 struct ena_com_admin_queue *admin_queue; 1980 struct ena_admin_set_feat_cmd cmd; 1981 struct ena_admin_set_feat_resp resp; 1982 int ret; 1983 1984 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { 1985 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU); 1986 return ENA_COM_PERMISSION; 1987 } 1988 1989 memset(&cmd, 0x0, sizeof(cmd)); 1990 admin_queue = &ena_dev->admin_queue; 1991 1992 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 1993 cmd.aq_common_descriptor.flags = 0; 1994 cmd.feat_common.feature_id = ENA_ADMIN_MTU; 1995 cmd.u.mtu.mtu = mtu; 1996 1997 ret = ena_com_execute_admin_command(admin_queue, 1998 (struct ena_admin_aq_entry *)&cmd, 1999 sizeof(cmd), 2000 (struct ena_admin_acq_entry *)&resp, 2001 sizeof(resp)); 2002 2003 if (unlikely(ret)) 2004 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret); 2005 2006 return ret; 2007 } 2008 2009 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, 2010 struct ena_admin_feature_offload_desc *offload) 2011 { 2012 int ret; 2013 struct ena_admin_get_feat_resp resp; 2014 2015 ret = ena_com_get_feature(ena_dev, &resp, 2016 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); 2017 if (unlikely(ret)) { 2018 ena_trc_err("Failed to get offload capabilities %d\n", ret); 2019 return ret; 2020 } 2021 2022 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); 2023 2024 return 0; 2025 } 2026 2027 int ena_com_set_hash_function(struct ena_com_dev *ena_dev) 2028 { 2029 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2030 struct ena_rss *rss = &ena_dev->rss; 2031 struct ena_admin_set_feat_cmd cmd; 2032 struct ena_admin_set_feat_resp resp; 2033 struct ena_admin_get_feat_resp get_resp; 2034 int ret; 2035 2036 if (!ena_com_check_supported_feature_id(ena_dev, 2037 ENA_ADMIN_RSS_HASH_FUNCTION)) { 2038 ena_trc_dbg("Feature %d isn't supported\n", 2039 ENA_ADMIN_RSS_HASH_FUNCTION); 2040 return ENA_COM_PERMISSION; 2041 } 2042 2043 /* Validate hash function is supported */ 2044 ret = ena_com_get_feature(ena_dev, &get_resp, 2045 ENA_ADMIN_RSS_HASH_FUNCTION); 2046 if (unlikely(ret)) 2047 return ret; 2048 2049 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) { 2050 ena_trc_err("Func hash %d isn't supported by device, abort\n", 2051 rss->hash_func); 2052 return ENA_COM_PERMISSION; 2053 } 2054 2055 memset(&cmd, 0x0, sizeof(cmd)); 2056 2057 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2058 cmd.aq_common_descriptor.flags = 2059 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2060 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; 2061 cmd.u.flow_hash_func.init_val = rss->hash_init_val; 2062 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; 2063 2064 ret = ena_com_mem_addr_set(ena_dev, 2065 &cmd.control_buffer.address, 2066 rss->hash_key_dma_addr); 2067 if (unlikely(ret)) { 2068 ena_trc_err("memory address set failed\n"); 2069 return ret; 2070 } 2071 2072 cmd.control_buffer.length = sizeof(*rss->hash_key); 2073 2074 ret = ena_com_execute_admin_command(admin_queue, 2075 (struct ena_admin_aq_entry *)&cmd, 2076 sizeof(cmd), 2077 (struct ena_admin_acq_entry *)&resp, 2078 sizeof(resp)); 2079 if (unlikely(ret)) { 2080 ena_trc_err("Failed to set hash function %d. error: %d\n", 2081 rss->hash_func, ret); 2082 return ENA_COM_INVAL; 2083 } 2084 2085 return 0; 2086 } 2087 2088 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, 2089 enum ena_admin_hash_functions func, 2090 const u8 *key, u16 key_len, u32 init_val) 2091 { 2092 struct ena_rss *rss = &ena_dev->rss; 2093 struct ena_admin_get_feat_resp get_resp; 2094 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2095 rss->hash_key; 2096 int rc; 2097 2098 /* Make sure size is a mult of DWs */ 2099 if (unlikely(key_len & 0x3)) 2100 return ENA_COM_INVAL; 2101 2102 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2103 ENA_ADMIN_RSS_HASH_FUNCTION, 2104 rss->hash_key_dma_addr, 2105 sizeof(*rss->hash_key)); 2106 if (unlikely(rc)) 2107 return rc; 2108 2109 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { 2110 ena_trc_err("Flow hash function %d isn't supported\n", func); 2111 return ENA_COM_PERMISSION; 2112 } 2113 2114 switch (func) { 2115 case ENA_ADMIN_TOEPLITZ: 2116 if (key_len > sizeof(hash_key->key)) { 2117 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n", 2118 key_len, sizeof(hash_key->key)); 2119 return ENA_COM_INVAL; 2120 } 2121 2122 memcpy(hash_key->key, key, key_len); 2123 rss->hash_init_val = init_val; 2124 hash_key->keys_num = key_len >> 2; 2125 break; 2126 case ENA_ADMIN_CRC32: 2127 rss->hash_init_val = init_val; 2128 break; 2129 default: 2130 ena_trc_err("Invalid hash function (%d)\n", func); 2131 return ENA_COM_INVAL; 2132 } 2133 2134 rc = ena_com_set_hash_function(ena_dev); 2135 2136 /* Restore the old function */ 2137 if (unlikely(rc)) 2138 ena_com_get_hash_function(ena_dev, NULL, NULL); 2139 2140 return rc; 2141 } 2142 2143 int ena_com_get_hash_function(struct ena_com_dev *ena_dev, 2144 enum ena_admin_hash_functions *func, 2145 u8 *key) 2146 { 2147 struct ena_rss *rss = &ena_dev->rss; 2148 struct ena_admin_get_feat_resp get_resp; 2149 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2150 rss->hash_key; 2151 int rc; 2152 2153 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2154 ENA_ADMIN_RSS_HASH_FUNCTION, 2155 rss->hash_key_dma_addr, 2156 sizeof(*rss->hash_key)); 2157 if (unlikely(rc)) 2158 return rc; 2159 2160 rss->hash_func = get_resp.u.flow_hash_func.selected_func; 2161 if (func) 2162 *func = rss->hash_func; 2163 2164 if (key) 2165 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); 2166 2167 return 0; 2168 } 2169 2170 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, 2171 enum ena_admin_flow_hash_proto proto, 2172 u16 *fields) 2173 { 2174 struct ena_rss *rss = &ena_dev->rss; 2175 struct ena_admin_get_feat_resp get_resp; 2176 int rc; 2177 2178 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2179 ENA_ADMIN_RSS_HASH_INPUT, 2180 rss->hash_ctrl_dma_addr, 2181 sizeof(*rss->hash_ctrl)); 2182 if (unlikely(rc)) 2183 return rc; 2184 2185 if (fields) 2186 *fields = rss->hash_ctrl->selected_fields[proto].fields; 2187 2188 return 0; 2189 } 2190 2191 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) 2192 { 2193 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2194 struct ena_rss *rss = &ena_dev->rss; 2195 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2196 struct ena_admin_set_feat_cmd cmd; 2197 struct ena_admin_set_feat_resp resp; 2198 int ret; 2199 2200 if (!ena_com_check_supported_feature_id(ena_dev, 2201 ENA_ADMIN_RSS_HASH_INPUT)) { 2202 ena_trc_dbg("Feature %d isn't supported\n", 2203 ENA_ADMIN_RSS_HASH_INPUT); 2204 return ENA_COM_PERMISSION; 2205 } 2206 2207 memset(&cmd, 0x0, sizeof(cmd)); 2208 2209 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2210 cmd.aq_common_descriptor.flags = 2211 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2212 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; 2213 cmd.u.flow_hash_input.enabled_input_sort = 2214 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | 2215 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; 2216 2217 ret = ena_com_mem_addr_set(ena_dev, 2218 &cmd.control_buffer.address, 2219 rss->hash_ctrl_dma_addr); 2220 if (unlikely(ret)) { 2221 ena_trc_err("memory address set failed\n"); 2222 return ret; 2223 } 2224 cmd.control_buffer.length = sizeof(*hash_ctrl); 2225 2226 ret = ena_com_execute_admin_command(admin_queue, 2227 (struct ena_admin_aq_entry *)&cmd, 2228 sizeof(cmd), 2229 (struct ena_admin_acq_entry *)&resp, 2230 sizeof(resp)); 2231 if (unlikely(ret)) 2232 ena_trc_err("Failed to set hash input. error: %d\n", ret); 2233 2234 return ret; 2235 } 2236 2237 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) 2238 { 2239 struct ena_rss *rss = &ena_dev->rss; 2240 struct ena_admin_feature_rss_hash_control *hash_ctrl = 2241 rss->hash_ctrl; 2242 u16 available_fields = 0; 2243 int rc, i; 2244 2245 /* Get the supported hash input */ 2246 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2247 if (unlikely(rc)) 2248 return rc; 2249 2250 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = 2251 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2252 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2253 2254 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = 2255 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2256 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2257 2258 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = 2259 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2260 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2261 2262 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = 2263 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2264 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2265 2266 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = 2267 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2268 2269 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = 2270 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2271 2272 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = 2273 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2274 2275 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = 2276 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; 2277 2278 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { 2279 available_fields = hash_ctrl->selected_fields[i].fields & 2280 hash_ctrl->supported_fields[i].fields; 2281 if (available_fields != hash_ctrl->selected_fields[i].fields) { 2282 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", 2283 i, hash_ctrl->supported_fields[i].fields, 2284 hash_ctrl->selected_fields[i].fields); 2285 return ENA_COM_PERMISSION; 2286 } 2287 } 2288 2289 rc = ena_com_set_hash_ctrl(ena_dev); 2290 2291 /* In case of failure, restore the old hash ctrl */ 2292 if (unlikely(rc)) 2293 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2294 2295 return rc; 2296 } 2297 2298 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, 2299 enum ena_admin_flow_hash_proto proto, 2300 u16 hash_fields) 2301 { 2302 struct ena_rss *rss = &ena_dev->rss; 2303 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2304 u16 supported_fields; 2305 int rc; 2306 2307 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { 2308 ena_trc_err("Invalid proto num (%u)\n", proto); 2309 return ENA_COM_INVAL; 2310 } 2311 2312 /* Get the ctrl table */ 2313 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); 2314 if (unlikely(rc)) 2315 return rc; 2316 2317 /* Make sure all the fields are supported */ 2318 supported_fields = hash_ctrl->supported_fields[proto].fields; 2319 if ((hash_fields & supported_fields) != hash_fields) { 2320 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n", 2321 proto, hash_fields, supported_fields); 2322 } 2323 2324 hash_ctrl->selected_fields[proto].fields = hash_fields; 2325 2326 rc = ena_com_set_hash_ctrl(ena_dev); 2327 2328 /* In case of failure, restore the old hash ctrl */ 2329 if (unlikely(rc)) 2330 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2331 2332 return 0; 2333 } 2334 2335 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, 2336 u16 entry_idx, u16 entry_value) 2337 { 2338 struct ena_rss *rss = &ena_dev->rss; 2339 2340 if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) 2341 return ENA_COM_INVAL; 2342 2343 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) 2344 return ENA_COM_INVAL; 2345 2346 rss->host_rss_ind_tbl[entry_idx] = entry_value; 2347 2348 return 0; 2349 } 2350 2351 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) 2352 { 2353 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2354 struct ena_rss *rss = &ena_dev->rss; 2355 struct ena_admin_set_feat_cmd cmd; 2356 struct ena_admin_set_feat_resp resp; 2357 int ret; 2358 2359 if (!ena_com_check_supported_feature_id(ena_dev, 2360 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { 2361 ena_trc_dbg("Feature %d isn't supported\n", 2362 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); 2363 return ENA_COM_PERMISSION; 2364 } 2365 2366 ret = ena_com_ind_tbl_convert_to_device(ena_dev); 2367 if (ret) { 2368 ena_trc_err("Failed to convert host indirection table to device table\n"); 2369 return ret; 2370 } 2371 2372 memset(&cmd, 0x0, sizeof(cmd)); 2373 2374 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2375 cmd.aq_common_descriptor.flags = 2376 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2377 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG; 2378 cmd.u.ind_table.size = rss->tbl_log_size; 2379 cmd.u.ind_table.inline_index = 0xFFFFFFFF; 2380 2381 ret = ena_com_mem_addr_set(ena_dev, 2382 &cmd.control_buffer.address, 2383 rss->rss_ind_tbl_dma_addr); 2384 if (unlikely(ret)) { 2385 ena_trc_err("memory address set failed\n"); 2386 return ret; 2387 } 2388 2389 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * 2390 sizeof(struct ena_admin_rss_ind_table_entry); 2391 2392 ret = ena_com_execute_admin_command(admin_queue, 2393 (struct ena_admin_aq_entry *)&cmd, 2394 sizeof(cmd), 2395 (struct ena_admin_acq_entry *)&resp, 2396 sizeof(resp)); 2397 2398 if (unlikely(ret)) 2399 ena_trc_err("Failed to set indirect table. error: %d\n", ret); 2400 2401 return ret; 2402 } 2403 2404 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) 2405 { 2406 struct ena_rss *rss = &ena_dev->rss; 2407 struct ena_admin_get_feat_resp get_resp; 2408 u32 tbl_size; 2409 int i, rc; 2410 2411 tbl_size = (1ULL << rss->tbl_log_size) * 2412 sizeof(struct ena_admin_rss_ind_table_entry); 2413 2414 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2415 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 2416 rss->rss_ind_tbl_dma_addr, 2417 tbl_size); 2418 if (unlikely(rc)) 2419 return rc; 2420 2421 if (!ind_tbl) 2422 return 0; 2423 2424 rc = ena_com_ind_tbl_convert_from_device(ena_dev); 2425 if (unlikely(rc)) 2426 return rc; 2427 2428 for (i = 0; i < (1 << rss->tbl_log_size); i++) 2429 ind_tbl[i] = rss->host_rss_ind_tbl[i]; 2430 2431 return 0; 2432 } 2433 2434 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) 2435 { 2436 int rc; 2437 2438 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2439 2440 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); 2441 if (unlikely(rc)) 2442 goto err_indr_tbl; 2443 2444 rc = ena_com_hash_key_allocate(ena_dev); 2445 if (unlikely(rc)) 2446 goto err_hash_key; 2447 2448 rc = ena_com_hash_ctrl_init(ena_dev); 2449 if (unlikely(rc)) 2450 goto err_hash_ctrl; 2451 2452 return 0; 2453 2454 err_hash_ctrl: 2455 ena_com_hash_key_destroy(ena_dev); 2456 err_hash_key: 2457 ena_com_indirect_table_destroy(ena_dev); 2458 err_indr_tbl: 2459 2460 return rc; 2461 } 2462 2463 void ena_com_rss_destroy(struct ena_com_dev *ena_dev) 2464 { 2465 ena_com_indirect_table_destroy(ena_dev); 2466 ena_com_hash_key_destroy(ena_dev); 2467 ena_com_hash_ctrl_destroy(ena_dev); 2468 2469 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2470 } 2471 2472 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) 2473 { 2474 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2475 2476 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 2477 SZ_4K, 2478 host_attr->host_info, 2479 host_attr->host_info_dma_addr, 2480 host_attr->host_info_dma_handle); 2481 if (unlikely(!host_attr->host_info)) 2482 return ENA_COM_NO_MEM; 2483 2484 return 0; 2485 } 2486 2487 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, 2488 u32 debug_area_size) 2489 { 2490 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2491 2492 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 2493 debug_area_size, 2494 host_attr->debug_area_virt_addr, 2495 host_attr->debug_area_dma_addr, 2496 host_attr->debug_area_dma_handle); 2497 if (unlikely(!host_attr->debug_area_virt_addr)) { 2498 host_attr->debug_area_size = 0; 2499 return ENA_COM_NO_MEM; 2500 } 2501 2502 host_attr->debug_area_size = debug_area_size; 2503 2504 return 0; 2505 } 2506 2507 void ena_com_delete_host_info(struct ena_com_dev *ena_dev) 2508 { 2509 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2510 2511 if (host_attr->host_info) { 2512 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 2513 SZ_4K, 2514 host_attr->host_info, 2515 host_attr->host_info_dma_addr, 2516 host_attr->host_info_dma_handle); 2517 host_attr->host_info = NULL; 2518 } 2519 } 2520 2521 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) 2522 { 2523 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2524 2525 if (host_attr->debug_area_virt_addr) { 2526 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 2527 host_attr->debug_area_size, 2528 host_attr->debug_area_virt_addr, 2529 host_attr->debug_area_dma_addr, 2530 host_attr->debug_area_dma_handle); 2531 host_attr->debug_area_virt_addr = NULL; 2532 } 2533 } 2534 2535 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) 2536 { 2537 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2538 struct ena_com_admin_queue *admin_queue; 2539 struct ena_admin_set_feat_cmd cmd; 2540 struct ena_admin_set_feat_resp resp; 2541 2542 int ret; 2543 2544 /* Host attribute config is called before ena_com_get_dev_attr_feat 2545 * so ena_com can't check if the feature is supported. 2546 */ 2547 2548 memset(&cmd, 0x0, sizeof(cmd)); 2549 admin_queue = &ena_dev->admin_queue; 2550 2551 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2552 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; 2553 2554 ret = ena_com_mem_addr_set(ena_dev, 2555 &cmd.u.host_attr.debug_ba, 2556 host_attr->debug_area_dma_addr); 2557 if (unlikely(ret)) { 2558 ena_trc_err("memory address set failed\n"); 2559 return ret; 2560 } 2561 2562 ret = ena_com_mem_addr_set(ena_dev, 2563 &cmd.u.host_attr.os_info_ba, 2564 host_attr->host_info_dma_addr); 2565 if (unlikely(ret)) { 2566 ena_trc_err("memory address set failed\n"); 2567 return ret; 2568 } 2569 2570 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; 2571 2572 ret = ena_com_execute_admin_command(admin_queue, 2573 (struct ena_admin_aq_entry *)&cmd, 2574 sizeof(cmd), 2575 (struct ena_admin_acq_entry *)&resp, 2576 sizeof(resp)); 2577 2578 if (unlikely(ret)) 2579 ena_trc_err("Failed to set host attributes: %d\n", ret); 2580 2581 return ret; 2582 } 2583 2584 /* Interrupt moderation */ 2585 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) 2586 { 2587 return ena_com_check_supported_feature_id(ena_dev, 2588 ENA_ADMIN_INTERRUPT_MODERATION); 2589 } 2590 2591 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, 2592 u32 tx_coalesce_usecs) 2593 { 2594 if (!ena_dev->intr_delay_resolution) { 2595 ena_trc_err("Illegal interrupt delay granularity value\n"); 2596 return ENA_COM_FAULT; 2597 } 2598 2599 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs / 2600 ena_dev->intr_delay_resolution; 2601 2602 return 0; 2603 } 2604 2605 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, 2606 u32 rx_coalesce_usecs) 2607 { 2608 if (!ena_dev->intr_delay_resolution) { 2609 ena_trc_err("Illegal interrupt delay granularity value\n"); 2610 return ENA_COM_FAULT; 2611 } 2612 2613 /* We use LOWEST entry of moderation table for storing 2614 * nonadaptive interrupt coalescing values 2615 */ 2616 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = 2617 rx_coalesce_usecs / ena_dev->intr_delay_resolution; 2618 2619 return 0; 2620 } 2621 2622 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev) 2623 { 2624 if (ena_dev->intr_moder_tbl) 2625 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl); 2626 ena_dev->intr_moder_tbl = NULL; 2627 } 2628 2629 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) 2630 { 2631 struct ena_admin_get_feat_resp get_resp; 2632 u16 delay_resolution; 2633 int rc; 2634 2635 rc = ena_com_get_feature(ena_dev, &get_resp, 2636 ENA_ADMIN_INTERRUPT_MODERATION); 2637 2638 if (rc) { 2639 if (rc == ENA_COM_PERMISSION) { 2640 ena_trc_dbg("Feature %d isn't supported\n", 2641 ENA_ADMIN_INTERRUPT_MODERATION); 2642 rc = 0; 2643 } else { 2644 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n", 2645 rc); 2646 } 2647 2648 /* no moderation supported, disable adaptive support */ 2649 ena_com_disable_adaptive_moderation(ena_dev); 2650 return rc; 2651 } 2652 2653 rc = ena_com_init_interrupt_moderation_table(ena_dev); 2654 if (rc) 2655 goto err; 2656 2657 /* if moderation is supported by device we set adaptive moderation */ 2658 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; 2659 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); 2660 ena_com_enable_adaptive_moderation(ena_dev); 2661 2662 return 0; 2663 err: 2664 ena_com_destroy_interrupt_moderation(ena_dev); 2665 return rc; 2666 } 2667 2668 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) 2669 { 2670 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2671 2672 if (!intr_moder_tbl) 2673 return; 2674 2675 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = 2676 ENA_INTR_LOWEST_USECS; 2677 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval = 2678 ENA_INTR_LOWEST_PKTS; 2679 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval = 2680 ENA_INTR_LOWEST_BYTES; 2681 2682 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval = 2683 ENA_INTR_LOW_USECS; 2684 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval = 2685 ENA_INTR_LOW_PKTS; 2686 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval = 2687 ENA_INTR_LOW_BYTES; 2688 2689 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval = 2690 ENA_INTR_MID_USECS; 2691 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval = 2692 ENA_INTR_MID_PKTS; 2693 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval = 2694 ENA_INTR_MID_BYTES; 2695 2696 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval = 2697 ENA_INTR_HIGH_USECS; 2698 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval = 2699 ENA_INTR_HIGH_PKTS; 2700 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval = 2701 ENA_INTR_HIGH_BYTES; 2702 2703 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval = 2704 ENA_INTR_HIGHEST_USECS; 2705 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval = 2706 ENA_INTR_HIGHEST_PKTS; 2707 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval = 2708 ENA_INTR_HIGHEST_BYTES; 2709 } 2710 2711 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) 2712 { 2713 return ena_dev->intr_moder_tx_interval; 2714 } 2715 2716 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) 2717 { 2718 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2719 2720 if (intr_moder_tbl) 2721 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval; 2722 2723 return 0; 2724 } 2725 2726 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, 2727 enum ena_intr_moder_level level, 2728 struct ena_intr_moder_entry *entry) 2729 { 2730 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2731 2732 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) 2733 return; 2734 2735 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval; 2736 if (ena_dev->intr_delay_resolution) 2737 intr_moder_tbl[level].intr_moder_interval /= 2738 ena_dev->intr_delay_resolution; 2739 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval; 2740 2741 /* use hardcoded value until ethtool supports bytecount parameter */ 2742 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED) 2743 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; 2744 } 2745 2746 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, 2747 enum ena_intr_moder_level level, 2748 struct ena_intr_moder_entry *entry) 2749 { 2750 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2751 2752 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) 2753 return; 2754 2755 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval; 2756 if (ena_dev->intr_delay_resolution) 2757 entry->intr_moder_interval *= ena_dev->intr_delay_resolution; 2758 entry->pkts_per_interval = 2759 intr_moder_tbl[level].pkts_per_interval; 2760 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; 2761 } 2762