1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2015-2021 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "ena_com.h" 35 36 /*****************************************************************************/ 37 /*****************************************************************************/ 38 39 /* Timeout in micro-sec */ 40 #define ADMIN_CMD_TIMEOUT_US (3000000) 41 42 #define ENA_ASYNC_QUEUE_DEPTH 16 43 #define ENA_ADMIN_QUEUE_DEPTH 32 44 45 #ifdef ENA_EXTENDED_STATS 46 47 #define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08 48 #define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF) 49 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16) 50 51 #endif /* ENA_EXTENDED_STATS */ 52 53 #define ENA_CTRL_MAJOR 0 54 #define ENA_CTRL_MINOR 0 55 #define ENA_CTRL_SUB_MINOR 1 56 57 #define MIN_ENA_CTRL_VER \ 58 (((ENA_CTRL_MAJOR) << \ 59 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ 60 ((ENA_CTRL_MINOR) << \ 61 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ 62 (ENA_CTRL_SUB_MINOR)) 63 64 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) 65 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) 66 67 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF 68 69 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 70 71 #define ENA_REGS_ADMIN_INTR_MASK 1 72 73 #define ENA_MIN_ADMIN_POLL_US 100 74 75 #define ENA_MAX_ADMIN_POLL_US 5000 76 77 /*****************************************************************************/ 78 /*****************************************************************************/ 79 /*****************************************************************************/ 80 81 enum ena_cmd_status { 82 ENA_CMD_SUBMITTED, 83 ENA_CMD_COMPLETED, 84 /* Abort - canceled by the driver */ 85 ENA_CMD_ABORTED, 86 }; 87 88 struct ena_comp_ctx { 89 ena_wait_event_t wait_event; 90 struct ena_admin_acq_entry *user_cqe; 91 u32 comp_size; 92 enum ena_cmd_status status; 93 /* status from the device */ 94 u8 comp_status; 95 u8 cmd_opcode; 96 bool occupied; 97 }; 98 99 struct ena_com_stats_ctx { 100 struct ena_admin_aq_get_stats_cmd get_cmd; 101 struct ena_admin_acq_get_stats_resp get_resp; 102 }; 103 104 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, 105 struct ena_common_mem_addr *ena_addr, 106 dma_addr_t addr) 107 { 108 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { 109 ena_trc_err(ena_dev, "DMA address has more bits that the device supports\n"); 110 return ENA_COM_INVAL; 111 } 112 113 ena_addr->mem_addr_low = lower_32_bits(addr); 114 ena_addr->mem_addr_high = (u16)upper_32_bits(addr); 115 116 return 0; 117 } 118 119 static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) 120 { 121 struct ena_com_dev *ena_dev = admin_queue->ena_dev; 122 struct ena_com_admin_sq *sq = &admin_queue->sq; 123 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); 124 125 ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr, 126 sq->mem_handle); 127 128 if (!sq->entries) { 129 ena_trc_err(ena_dev, "Memory allocation failed\n"); 130 return ENA_COM_NO_MEM; 131 } 132 133 sq->head = 0; 134 sq->tail = 0; 135 sq->phase = 1; 136 137 sq->db_addr = NULL; 138 139 return 0; 140 } 141 142 static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue) 143 { 144 struct ena_com_dev *ena_dev = admin_queue->ena_dev; 145 struct ena_com_admin_cq *cq = &admin_queue->cq; 146 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); 147 148 ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, cq->entries, cq->dma_addr, 149 cq->mem_handle); 150 151 if (!cq->entries) { 152 ena_trc_err(ena_dev, "Memory allocation failed\n"); 153 return ENA_COM_NO_MEM; 154 } 155 156 cq->head = 0; 157 cq->phase = 1; 158 159 return 0; 160 } 161 162 static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, 163 struct ena_aenq_handlers *aenq_handlers) 164 { 165 struct ena_com_aenq *aenq = &ena_dev->aenq; 166 u32 addr_low, addr_high, aenq_caps; 167 u16 size; 168 169 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 170 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 171 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size, 172 aenq->entries, 173 aenq->dma_addr, 174 aenq->mem_handle); 175 176 if (!aenq->entries) { 177 ena_trc_err(ena_dev, "Memory allocation failed\n"); 178 return ENA_COM_NO_MEM; 179 } 180 181 aenq->head = aenq->q_depth; 182 aenq->phase = 1; 183 184 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); 185 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); 186 187 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); 188 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); 189 190 aenq_caps = 0; 191 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; 192 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) << 193 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & 194 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; 195 ENA_REG_WRITE32(ena_dev->bus, aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); 196 197 if (unlikely(!aenq_handlers)) { 198 ena_trc_err(ena_dev, "AENQ handlers pointer is NULL\n"); 199 return ENA_COM_INVAL; 200 } 201 202 aenq->aenq_handlers = aenq_handlers; 203 204 return 0; 205 } 206 207 static void comp_ctxt_release(struct ena_com_admin_queue *queue, 208 struct ena_comp_ctx *comp_ctx) 209 { 210 comp_ctx->occupied = false; 211 ATOMIC32_DEC(&queue->outstanding_cmds); 212 } 213 214 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue, 215 u16 command_id, bool capture) 216 { 217 if (unlikely(command_id >= admin_queue->q_depth)) { 218 ena_trc_err(admin_queue->ena_dev, 219 "Command id is larger than the queue size. cmd_id: %u queue size %d\n", 220 command_id, admin_queue->q_depth); 221 return NULL; 222 } 223 224 if (unlikely(!admin_queue->comp_ctx)) { 225 ena_trc_err(admin_queue->ena_dev, 226 "Completion context is NULL\n"); 227 return NULL; 228 } 229 230 if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) { 231 ena_trc_err(admin_queue->ena_dev, 232 "Completion context is occupied\n"); 233 return NULL; 234 } 235 236 if (capture) { 237 ATOMIC32_INC(&admin_queue->outstanding_cmds); 238 admin_queue->comp_ctx[command_id].occupied = true; 239 } 240 241 return &admin_queue->comp_ctx[command_id]; 242 } 243 244 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 245 struct ena_admin_aq_entry *cmd, 246 size_t cmd_size_in_bytes, 247 struct ena_admin_acq_entry *comp, 248 size_t comp_size_in_bytes) 249 { 250 struct ena_comp_ctx *comp_ctx; 251 u16 tail_masked, cmd_id; 252 u16 queue_size_mask; 253 u16 cnt; 254 255 queue_size_mask = admin_queue->q_depth - 1; 256 257 tail_masked = admin_queue->sq.tail & queue_size_mask; 258 259 /* In case of queue FULL */ 260 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds); 261 if (cnt >= admin_queue->q_depth) { 262 ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n"); 263 admin_queue->stats.out_of_space++; 264 return ERR_PTR(ENA_COM_NO_SPACE); 265 } 266 267 cmd_id = admin_queue->curr_cmd_id; 268 269 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & 270 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; 271 272 cmd->aq_common_descriptor.command_id |= cmd_id & 273 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; 274 275 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); 276 if (unlikely(!comp_ctx)) 277 return ERR_PTR(ENA_COM_INVAL); 278 279 comp_ctx->status = ENA_CMD_SUBMITTED; 280 comp_ctx->comp_size = (u32)comp_size_in_bytes; 281 comp_ctx->user_cqe = comp; 282 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; 283 284 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event); 285 286 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); 287 288 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & 289 queue_size_mask; 290 291 admin_queue->sq.tail++; 292 admin_queue->stats.submitted_cmd++; 293 294 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) 295 admin_queue->sq.phase = !admin_queue->sq.phase; 296 297 ENA_DB_SYNC(&admin_queue->sq.mem_handle); 298 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail, 299 admin_queue->sq.db_addr); 300 301 return comp_ctx; 302 } 303 304 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue) 305 { 306 struct ena_com_dev *ena_dev = admin_queue->ena_dev; 307 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); 308 struct ena_comp_ctx *comp_ctx; 309 u16 i; 310 311 admin_queue->comp_ctx = ENA_MEM_ALLOC(admin_queue->q_dmadev, size); 312 if (unlikely(!admin_queue->comp_ctx)) { 313 ena_trc_err(ena_dev, "Memory allocation failed\n"); 314 return ENA_COM_NO_MEM; 315 } 316 317 for (i = 0; i < admin_queue->q_depth; i++) { 318 comp_ctx = get_comp_ctxt(admin_queue, i, false); 319 if (comp_ctx) 320 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event); 321 } 322 323 return 0; 324 } 325 326 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 327 struct ena_admin_aq_entry *cmd, 328 size_t cmd_size_in_bytes, 329 struct ena_admin_acq_entry *comp, 330 size_t comp_size_in_bytes) 331 { 332 unsigned long flags = 0; 333 struct ena_comp_ctx *comp_ctx; 334 335 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 336 if (unlikely(!admin_queue->running_state)) { 337 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 338 return ERR_PTR(ENA_COM_NO_DEVICE); 339 } 340 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, 341 cmd_size_in_bytes, 342 comp, 343 comp_size_in_bytes); 344 if (IS_ERR(comp_ctx)) 345 admin_queue->running_state = false; 346 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 347 348 return comp_ctx; 349 } 350 351 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, 352 struct ena_com_create_io_ctx *ctx, 353 struct ena_com_io_sq *io_sq) 354 { 355 size_t size; 356 int dev_node = 0; 357 358 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); 359 360 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; 361 io_sq->desc_entry_size = 362 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 363 sizeof(struct ena_eth_io_tx_desc) : 364 sizeof(struct ena_eth_io_rx_desc); 365 366 size = io_sq->desc_entry_size * io_sq->q_depth; 367 io_sq->bus = ena_dev->bus; 368 369 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 370 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, 371 size, 372 io_sq->desc_addr.virt_addr, 373 io_sq->desc_addr.phys_addr, 374 io_sq->desc_addr.mem_handle, 375 ctx->numa_node, 376 dev_node); 377 if (!io_sq->desc_addr.virt_addr) { 378 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 379 size, 380 io_sq->desc_addr.virt_addr, 381 io_sq->desc_addr.phys_addr, 382 io_sq->desc_addr.mem_handle); 383 } 384 385 if (!io_sq->desc_addr.virt_addr) { 386 ena_trc_err(ena_dev, "Memory allocation failed\n"); 387 return ENA_COM_NO_MEM; 388 } 389 } 390 391 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 392 /* Allocate bounce buffers */ 393 io_sq->bounce_buf_ctrl.buffer_size = 394 ena_dev->llq_info.desc_list_entry_size; 395 io_sq->bounce_buf_ctrl.buffers_num = 396 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; 397 io_sq->bounce_buf_ctrl.next_to_use = 0; 398 399 size = io_sq->bounce_buf_ctrl.buffer_size * 400 io_sq->bounce_buf_ctrl.buffers_num; 401 402 ENA_MEM_ALLOC_NODE(ena_dev->dmadev, 403 size, 404 io_sq->bounce_buf_ctrl.base_buffer, 405 ctx->numa_node, 406 dev_node); 407 if (!io_sq->bounce_buf_ctrl.base_buffer) 408 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size); 409 410 if (!io_sq->bounce_buf_ctrl.base_buffer) { 411 ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n"); 412 return ENA_COM_NO_MEM; 413 } 414 415 memcpy(&io_sq->llq_info, &ena_dev->llq_info, 416 sizeof(io_sq->llq_info)); 417 418 /* Initiate the first bounce buffer */ 419 io_sq->llq_buf_ctrl.curr_bounce_buf = 420 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); 421 memset(io_sq->llq_buf_ctrl.curr_bounce_buf, 422 0x0, io_sq->llq_info.desc_list_entry_size); 423 io_sq->llq_buf_ctrl.descs_left_in_line = 424 io_sq->llq_info.descs_num_before_header; 425 io_sq->disable_meta_caching = 426 io_sq->llq_info.disable_meta_caching; 427 428 if (io_sq->llq_info.max_entries_in_tx_burst > 0) 429 io_sq->entries_in_tx_burst_left = 430 io_sq->llq_info.max_entries_in_tx_burst; 431 } 432 433 io_sq->tail = 0; 434 io_sq->next_to_comp = 0; 435 io_sq->phase = 1; 436 437 return 0; 438 } 439 440 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, 441 struct ena_com_create_io_ctx *ctx, 442 struct ena_com_io_cq *io_cq) 443 { 444 size_t size; 445 int prev_node = 0; 446 447 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); 448 449 /* Use the basic completion descriptor for Rx */ 450 io_cq->cdesc_entry_size_in_bytes = 451 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 452 sizeof(struct ena_eth_io_tx_cdesc) : 453 sizeof(struct ena_eth_io_rx_cdesc_base); 454 455 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 456 io_cq->bus = ena_dev->bus; 457 458 ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev, 459 size, 460 io_cq->cdesc_addr.virt_addr, 461 io_cq->cdesc_addr.phys_addr, 462 io_cq->cdesc_addr.mem_handle, 463 ctx->numa_node, 464 prev_node, 465 ENA_CDESC_RING_SIZE_ALIGNMENT); 466 if (!io_cq->cdesc_addr.virt_addr) { 467 ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev, 468 size, 469 io_cq->cdesc_addr.virt_addr, 470 io_cq->cdesc_addr.phys_addr, 471 io_cq->cdesc_addr.mem_handle, 472 ENA_CDESC_RING_SIZE_ALIGNMENT); 473 } 474 475 if (!io_cq->cdesc_addr.virt_addr) { 476 ena_trc_err(ena_dev, "Memory allocation failed\n"); 477 return ENA_COM_NO_MEM; 478 } 479 480 io_cq->phase = 1; 481 io_cq->head = 0; 482 483 return 0; 484 } 485 486 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, 487 struct ena_admin_acq_entry *cqe) 488 { 489 struct ena_comp_ctx *comp_ctx; 490 u16 cmd_id; 491 492 cmd_id = cqe->acq_common_descriptor.command & 493 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; 494 495 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); 496 if (unlikely(!comp_ctx)) { 497 ena_trc_err(admin_queue->ena_dev, 498 "comp_ctx is NULL. Changing the admin queue running state\n"); 499 admin_queue->running_state = false; 500 return; 501 } 502 503 comp_ctx->status = ENA_CMD_COMPLETED; 504 comp_ctx->comp_status = cqe->acq_common_descriptor.status; 505 506 if (comp_ctx->user_cqe) 507 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); 508 509 if (!admin_queue->polling) 510 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); 511 } 512 513 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) 514 { 515 struct ena_admin_acq_entry *cqe = NULL; 516 u16 comp_num = 0; 517 u16 head_masked; 518 u8 phase; 519 520 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); 521 phase = admin_queue->cq.phase; 522 523 cqe = &admin_queue->cq.entries[head_masked]; 524 525 /* Go over all the completions */ 526 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) & 527 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { 528 /* Do not read the rest of the completion entry before the 529 * phase bit was validated 530 */ 531 dma_rmb(); 532 ena_com_handle_single_admin_completion(admin_queue, cqe); 533 534 head_masked++; 535 comp_num++; 536 if (unlikely(head_masked == admin_queue->q_depth)) { 537 head_masked = 0; 538 phase = !phase; 539 } 540 541 cqe = &admin_queue->cq.entries[head_masked]; 542 } 543 544 admin_queue->cq.head += comp_num; 545 admin_queue->cq.phase = phase; 546 admin_queue->sq.head += comp_num; 547 admin_queue->stats.completed_cmd += comp_num; 548 } 549 550 static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue, 551 u8 comp_status) 552 { 553 if (unlikely(comp_status != 0)) 554 ena_trc_err(admin_queue->ena_dev, 555 "Admin command failed[%u]\n", comp_status); 556 557 switch (comp_status) { 558 case ENA_ADMIN_SUCCESS: 559 return ENA_COM_OK; 560 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: 561 return ENA_COM_NO_MEM; 562 case ENA_ADMIN_UNSUPPORTED_OPCODE: 563 return ENA_COM_UNSUPPORTED; 564 case ENA_ADMIN_BAD_OPCODE: 565 case ENA_ADMIN_MALFORMED_REQUEST: 566 case ENA_ADMIN_ILLEGAL_PARAMETER: 567 case ENA_ADMIN_UNKNOWN_ERROR: 568 return ENA_COM_INVAL; 569 case ENA_ADMIN_RESOURCE_BUSY: 570 return ENA_COM_TRY_AGAIN; 571 } 572 573 return ENA_COM_INVAL; 574 } 575 576 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) 577 { 578 delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us); 579 delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); 580 ENA_USLEEP(delay_us); 581 } 582 583 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, 584 struct ena_com_admin_queue *admin_queue) 585 { 586 unsigned long flags = 0; 587 ena_time_t timeout; 588 int ret; 589 u32 exp = 0; 590 591 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout); 592 593 while (1) { 594 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 595 ena_com_handle_admin_completion(admin_queue); 596 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 597 598 if (comp_ctx->status != ENA_CMD_SUBMITTED) 599 break; 600 601 if (ENA_TIME_EXPIRE(timeout)) { 602 ena_trc_err(admin_queue->ena_dev, 603 "Wait for completion (polling) timeout\n"); 604 /* ENA didn't have any completion */ 605 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 606 admin_queue->stats.no_completion++; 607 admin_queue->running_state = false; 608 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 609 610 ret = ENA_COM_TIMER_EXPIRED; 611 goto err; 612 } 613 614 ena_delay_exponential_backoff_us(exp++, 615 admin_queue->ena_dev->ena_min_poll_delay_us); 616 } 617 618 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { 619 ena_trc_err(admin_queue->ena_dev, "Command was aborted\n"); 620 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 621 admin_queue->stats.aborted_cmd++; 622 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 623 ret = ENA_COM_NO_DEVICE; 624 goto err; 625 } 626 627 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED, 628 admin_queue->ena_dev, "Invalid comp status %d\n", 629 comp_ctx->status); 630 631 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); 632 err: 633 comp_ctxt_release(admin_queue, comp_ctx); 634 return ret; 635 } 636 637 /* 638 * Set the LLQ configurations of the firmware 639 * 640 * The driver provides only the enabled feature values to the device, 641 * which in turn, checks if they are supported. 642 */ 643 static int ena_com_set_llq(struct ena_com_dev *ena_dev) 644 { 645 struct ena_com_admin_queue *admin_queue; 646 struct ena_admin_set_feat_cmd cmd; 647 struct ena_admin_set_feat_resp resp; 648 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 649 int ret; 650 651 memset(&cmd, 0x0, sizeof(cmd)); 652 admin_queue = &ena_dev->admin_queue; 653 654 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 655 cmd.feat_common.feature_id = ENA_ADMIN_LLQ; 656 657 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; 658 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; 659 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; 660 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; 661 662 cmd.u.llq.accel_mode.u.set.enabled_flags = 663 BIT(ENA_ADMIN_DISABLE_META_CACHING) | 664 BIT(ENA_ADMIN_LIMIT_TX_BURST); 665 666 ret = ena_com_execute_admin_command(admin_queue, 667 (struct ena_admin_aq_entry *)&cmd, 668 sizeof(cmd), 669 (struct ena_admin_acq_entry *)&resp, 670 sizeof(resp)); 671 672 if (unlikely(ret)) 673 ena_trc_err(ena_dev, "Failed to set LLQ configurations: %d\n", ret); 674 675 return ret; 676 } 677 678 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, 679 struct ena_admin_feature_llq_desc *llq_features, 680 struct ena_llq_configurations *llq_default_cfg) 681 { 682 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 683 struct ena_admin_accel_mode_get llq_accel_mode_get; 684 u16 supported_feat; 685 int rc; 686 687 memset(llq_info, 0, sizeof(*llq_info)); 688 689 supported_feat = llq_features->header_location_ctrl_supported; 690 691 if (likely(supported_feat & llq_default_cfg->llq_header_location)) { 692 llq_info->header_location_ctrl = 693 llq_default_cfg->llq_header_location; 694 } else { 695 ena_trc_err(ena_dev, "Invalid header location control, supported: 0x%x\n", 696 supported_feat); 697 return ENA_COM_INVAL; 698 } 699 700 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { 701 supported_feat = llq_features->descriptors_stride_ctrl_supported; 702 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { 703 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; 704 } else { 705 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { 706 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 707 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { 708 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; 709 } else { 710 ena_trc_err(ena_dev, "Invalid desc_stride_ctrl, supported: 0x%x\n", 711 supported_feat); 712 return ENA_COM_INVAL; 713 } 714 715 ena_trc_err(ena_dev, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 716 llq_default_cfg->llq_stride_ctrl, 717 supported_feat, 718 llq_info->desc_stride_ctrl); 719 } 720 } else { 721 llq_info->desc_stride_ctrl = 0; 722 } 723 724 supported_feat = llq_features->entry_size_ctrl_supported; 725 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { 726 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; 727 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; 728 } else { 729 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { 730 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 731 llq_info->desc_list_entry_size = 128; 732 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { 733 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; 734 llq_info->desc_list_entry_size = 192; 735 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { 736 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; 737 llq_info->desc_list_entry_size = 256; 738 } else { 739 ena_trc_err(ena_dev, "Invalid entry_size_ctrl, supported: 0x%x\n", 740 supported_feat); 741 return ENA_COM_INVAL; 742 } 743 744 ena_trc_err(ena_dev, "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 745 llq_default_cfg->llq_ring_entry_size, 746 supported_feat, 747 llq_info->desc_list_entry_size); 748 } 749 if (unlikely(llq_info->desc_list_entry_size & 0x7)) { 750 /* The desc list entry size should be whole multiply of 8 751 * This requirement comes from __iowrite64_copy() 752 */ 753 ena_trc_err(ena_dev, "Illegal entry size %d\n", 754 llq_info->desc_list_entry_size); 755 return ENA_COM_INVAL; 756 } 757 758 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) 759 llq_info->descs_per_entry = llq_info->desc_list_entry_size / 760 sizeof(struct ena_eth_io_tx_desc); 761 else 762 llq_info->descs_per_entry = 1; 763 764 supported_feat = llq_features->desc_num_before_header_supported; 765 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { 766 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; 767 } else { 768 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { 769 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 770 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { 771 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; 772 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { 773 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; 774 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { 775 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; 776 } else { 777 ena_trc_err(ena_dev, "Invalid descs_num_before_header, supported: 0x%x\n", 778 supported_feat); 779 return ENA_COM_INVAL; 780 } 781 782 ena_trc_err(ena_dev, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 783 llq_default_cfg->llq_num_decs_before_header, 784 supported_feat, 785 llq_info->descs_num_before_header); 786 } 787 /* Check for accelerated queue supported */ 788 llq_accel_mode_get = llq_features->accel_mode.u.get; 789 790 llq_info->disable_meta_caching = 791 !!(llq_accel_mode_get.supported_flags & 792 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 793 794 if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST)) 795 llq_info->max_entries_in_tx_burst = 796 llq_accel_mode_get.max_tx_burst_size / 797 llq_default_cfg->llq_ring_entry_size_value; 798 799 rc = ena_com_set_llq(ena_dev); 800 if (rc) 801 ena_trc_err(ena_dev, "Cannot set LLQ configuration: %d\n", rc); 802 803 return rc; 804 } 805 806 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, 807 struct ena_com_admin_queue *admin_queue) 808 { 809 unsigned long flags = 0; 810 int ret; 811 812 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event, 813 admin_queue->completion_timeout); 814 815 /* In case the command wasn't completed find out the root cause. 816 * There might be 2 kinds of errors 817 * 1) No completion (timeout reached) 818 * 2) There is completion but the device didn't get any msi-x interrupt. 819 */ 820 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { 821 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 822 ena_com_handle_admin_completion(admin_queue); 823 admin_queue->stats.no_completion++; 824 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 825 826 if (comp_ctx->status == ENA_CMD_COMPLETED) { 827 ena_trc_err(admin_queue->ena_dev, 828 "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", 829 comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF"); 830 /* Check if fallback to polling is enabled */ 831 if (admin_queue->auto_polling) 832 admin_queue->polling = true; 833 } else { 834 ena_trc_err(admin_queue->ena_dev, 835 "The ena device didn't send a completion for the admin cmd %d status %d\n", 836 comp_ctx->cmd_opcode, comp_ctx->status); 837 } 838 /* Check if shifted to polling mode. 839 * This will happen if there is a completion without an interrupt 840 * and autopolling mode is enabled. Continuing normal execution in such case 841 */ 842 if (!admin_queue->polling) { 843 admin_queue->running_state = false; 844 ret = ENA_COM_TIMER_EXPIRED; 845 goto err; 846 } 847 } 848 849 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); 850 err: 851 comp_ctxt_release(admin_queue, comp_ctx); 852 return ret; 853 } 854 855 /* This method read the hardware device register through posting writes 856 * and waiting for response 857 * On timeout the function will return ENA_MMIO_READ_TIMEOUT 858 */ 859 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) 860 { 861 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 862 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = 863 mmio_read->read_resp; 864 u32 mmio_read_reg, ret, i; 865 unsigned long flags = 0; 866 u32 timeout = mmio_read->reg_read_to; 867 868 ENA_MIGHT_SLEEP(); 869 870 if (timeout == 0) 871 timeout = ENA_REG_READ_TIMEOUT; 872 873 /* If readless is disabled, perform regular read */ 874 if (!mmio_read->readless_supported) 875 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset); 876 877 ENA_SPINLOCK_LOCK(mmio_read->lock, flags); 878 mmio_read->seq_num++; 879 880 read_resp->req_id = mmio_read->seq_num + 0xDEAD; 881 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & 882 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; 883 mmio_read_reg |= mmio_read->seq_num & 884 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; 885 886 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, 887 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); 888 889 for (i = 0; i < timeout; i++) { 890 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num) 891 break; 892 893 ENA_UDELAY(1); 894 } 895 896 if (unlikely(i == timeout)) { 897 ena_trc_err(ena_dev, "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", 898 mmio_read->seq_num, 899 offset, 900 read_resp->req_id, 901 read_resp->reg_off); 902 ret = ENA_MMIO_READ_TIMEOUT; 903 goto err; 904 } 905 906 if (read_resp->reg_off != offset) { 907 ena_trc_err(ena_dev, "Read failure: wrong offset provided\n"); 908 ret = ENA_MMIO_READ_TIMEOUT; 909 } else { 910 ret = read_resp->reg_val; 911 } 912 err: 913 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags); 914 915 return ret; 916 } 917 918 /* There are two types to wait for completion. 919 * Polling mode - wait until the completion is available. 920 * Async mode - wait on wait queue until the completion is ready 921 * (or the timeout expired). 922 * It is expected that the IRQ called ena_com_handle_admin_completion 923 * to mark the completions. 924 */ 925 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, 926 struct ena_com_admin_queue *admin_queue) 927 { 928 if (admin_queue->polling) 929 return ena_com_wait_and_process_admin_cq_polling(comp_ctx, 930 admin_queue); 931 932 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, 933 admin_queue); 934 } 935 936 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, 937 struct ena_com_io_sq *io_sq) 938 { 939 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 940 struct ena_admin_aq_destroy_sq_cmd destroy_cmd; 941 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; 942 u8 direction; 943 int ret; 944 945 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 946 947 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 948 direction = ENA_ADMIN_SQ_DIRECTION_TX; 949 else 950 direction = ENA_ADMIN_SQ_DIRECTION_RX; 951 952 destroy_cmd.sq.sq_identity |= (direction << 953 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & 954 ENA_ADMIN_SQ_SQ_DIRECTION_MASK; 955 956 destroy_cmd.sq.sq_idx = io_sq->idx; 957 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; 958 959 ret = ena_com_execute_admin_command(admin_queue, 960 (struct ena_admin_aq_entry *)&destroy_cmd, 961 sizeof(destroy_cmd), 962 (struct ena_admin_acq_entry *)&destroy_resp, 963 sizeof(destroy_resp)); 964 965 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) 966 ena_trc_err(ena_dev, "Failed to destroy io sq error: %d\n", ret); 967 968 return ret; 969 } 970 971 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, 972 struct ena_com_io_sq *io_sq, 973 struct ena_com_io_cq *io_cq) 974 { 975 size_t size; 976 977 if (io_cq->cdesc_addr.virt_addr) { 978 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 979 980 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 981 size, 982 io_cq->cdesc_addr.virt_addr, 983 io_cq->cdesc_addr.phys_addr, 984 io_cq->cdesc_addr.mem_handle); 985 986 io_cq->cdesc_addr.virt_addr = NULL; 987 } 988 989 if (io_sq->desc_addr.virt_addr) { 990 size = io_sq->desc_entry_size * io_sq->q_depth; 991 992 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 993 size, 994 io_sq->desc_addr.virt_addr, 995 io_sq->desc_addr.phys_addr, 996 io_sq->desc_addr.mem_handle); 997 998 io_sq->desc_addr.virt_addr = NULL; 999 } 1000 1001 if (io_sq->bounce_buf_ctrl.base_buffer) { 1002 ENA_MEM_FREE(ena_dev->dmadev, 1003 io_sq->bounce_buf_ctrl.base_buffer, 1004 (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT)); 1005 io_sq->bounce_buf_ctrl.base_buffer = NULL; 1006 } 1007 } 1008 1009 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, 1010 u16 exp_state) 1011 { 1012 u32 val, exp = 0; 1013 ena_time_t timeout_stamp; 1014 1015 /* Convert timeout from resolution of 100ms to us resolution. */ 1016 timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout); 1017 1018 while (1) { 1019 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 1020 1021 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { 1022 ena_trc_err(ena_dev, "Reg read timeout occurred\n"); 1023 return ENA_COM_TIMER_EXPIRED; 1024 } 1025 1026 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == 1027 exp_state) 1028 return 0; 1029 1030 if (ENA_TIME_EXPIRE(timeout_stamp)) 1031 return ENA_COM_TIMER_EXPIRED; 1032 1033 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); 1034 } 1035 } 1036 1037 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, 1038 enum ena_admin_aq_feature_id feature_id) 1039 { 1040 u32 feature_mask = 1 << feature_id; 1041 1042 /* Device attributes is always supported */ 1043 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && 1044 !(ena_dev->supported_features & feature_mask)) 1045 return false; 1046 1047 return true; 1048 } 1049 1050 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, 1051 struct ena_admin_get_feat_resp *get_resp, 1052 enum ena_admin_aq_feature_id feature_id, 1053 dma_addr_t control_buf_dma_addr, 1054 u32 control_buff_size, 1055 u8 feature_ver) 1056 { 1057 struct ena_com_admin_queue *admin_queue; 1058 struct ena_admin_get_feat_cmd get_cmd; 1059 int ret; 1060 1061 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { 1062 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", feature_id); 1063 return ENA_COM_UNSUPPORTED; 1064 } 1065 1066 memset(&get_cmd, 0x0, sizeof(get_cmd)); 1067 admin_queue = &ena_dev->admin_queue; 1068 1069 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; 1070 1071 if (control_buff_size) 1072 get_cmd.aq_common_descriptor.flags = 1073 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 1074 else 1075 get_cmd.aq_common_descriptor.flags = 0; 1076 1077 ret = ena_com_mem_addr_set(ena_dev, 1078 &get_cmd.control_buffer.address, 1079 control_buf_dma_addr); 1080 if (unlikely(ret)) { 1081 ena_trc_err(ena_dev, "Memory address set failed\n"); 1082 return ret; 1083 } 1084 1085 get_cmd.control_buffer.length = control_buff_size; 1086 get_cmd.feat_common.feature_version = feature_ver; 1087 get_cmd.feat_common.feature_id = feature_id; 1088 1089 ret = ena_com_execute_admin_command(admin_queue, 1090 (struct ena_admin_aq_entry *) 1091 &get_cmd, 1092 sizeof(get_cmd), 1093 (struct ena_admin_acq_entry *) 1094 get_resp, 1095 sizeof(*get_resp)); 1096 1097 if (unlikely(ret)) 1098 ena_trc_err(ena_dev, "Failed to submit get_feature command %d error: %d\n", 1099 feature_id, ret); 1100 1101 return ret; 1102 } 1103 1104 static int ena_com_get_feature(struct ena_com_dev *ena_dev, 1105 struct ena_admin_get_feat_resp *get_resp, 1106 enum ena_admin_aq_feature_id feature_id, 1107 u8 feature_ver) 1108 { 1109 return ena_com_get_feature_ex(ena_dev, 1110 get_resp, 1111 feature_id, 1112 0, 1113 0, 1114 feature_ver); 1115 } 1116 1117 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev) 1118 { 1119 return ena_dev->rss.hash_func; 1120 } 1121 1122 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) 1123 { 1124 struct ena_admin_feature_rss_flow_hash_control *hash_key = 1125 (ena_dev->rss).hash_key; 1126 1127 ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key)); 1128 /* The key buffer is stored in the device in an array of 1129 * uint32 elements. 1130 */ 1131 hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS; 1132 } 1133 1134 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) 1135 { 1136 struct ena_rss *rss = &ena_dev->rss; 1137 1138 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) 1139 return ENA_COM_UNSUPPORTED; 1140 1141 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1142 sizeof(*rss->hash_key), 1143 rss->hash_key, 1144 rss->hash_key_dma_addr, 1145 rss->hash_key_mem_handle); 1146 1147 if (unlikely(!rss->hash_key)) 1148 return ENA_COM_NO_MEM; 1149 1150 return 0; 1151 } 1152 1153 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) 1154 { 1155 struct ena_rss *rss = &ena_dev->rss; 1156 1157 if (rss->hash_key) 1158 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1159 sizeof(*rss->hash_key), 1160 rss->hash_key, 1161 rss->hash_key_dma_addr, 1162 rss->hash_key_mem_handle); 1163 rss->hash_key = NULL; 1164 } 1165 1166 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) 1167 { 1168 struct ena_rss *rss = &ena_dev->rss; 1169 1170 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1171 sizeof(*rss->hash_ctrl), 1172 rss->hash_ctrl, 1173 rss->hash_ctrl_dma_addr, 1174 rss->hash_ctrl_mem_handle); 1175 1176 if (unlikely(!rss->hash_ctrl)) 1177 return ENA_COM_NO_MEM; 1178 1179 return 0; 1180 } 1181 1182 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) 1183 { 1184 struct ena_rss *rss = &ena_dev->rss; 1185 1186 if (rss->hash_ctrl) 1187 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1188 sizeof(*rss->hash_ctrl), 1189 rss->hash_ctrl, 1190 rss->hash_ctrl_dma_addr, 1191 rss->hash_ctrl_mem_handle); 1192 rss->hash_ctrl = NULL; 1193 } 1194 1195 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, 1196 u16 log_size) 1197 { 1198 struct ena_rss *rss = &ena_dev->rss; 1199 struct ena_admin_get_feat_resp get_resp; 1200 size_t tbl_size; 1201 int ret; 1202 1203 ret = ena_com_get_feature(ena_dev, &get_resp, 1204 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0); 1205 if (unlikely(ret)) 1206 return ret; 1207 1208 if ((get_resp.u.ind_table.min_size > log_size) || 1209 (get_resp.u.ind_table.max_size < log_size)) { 1210 ena_trc_err(ena_dev, "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", 1211 1 << log_size, 1212 1 << get_resp.u.ind_table.min_size, 1213 1 << get_resp.u.ind_table.max_size); 1214 return ENA_COM_INVAL; 1215 } 1216 1217 tbl_size = (1ULL << log_size) * 1218 sizeof(struct ena_admin_rss_ind_table_entry); 1219 1220 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1221 tbl_size, 1222 rss->rss_ind_tbl, 1223 rss->rss_ind_tbl_dma_addr, 1224 rss->rss_ind_tbl_mem_handle); 1225 if (unlikely(!rss->rss_ind_tbl)) 1226 goto mem_err1; 1227 1228 tbl_size = (1ULL << log_size) * sizeof(u16); 1229 rss->host_rss_ind_tbl = 1230 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size); 1231 if (unlikely(!rss->host_rss_ind_tbl)) 1232 goto mem_err2; 1233 1234 rss->tbl_log_size = log_size; 1235 1236 return 0; 1237 1238 mem_err2: 1239 tbl_size = (1ULL << log_size) * 1240 sizeof(struct ena_admin_rss_ind_table_entry); 1241 1242 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1243 tbl_size, 1244 rss->rss_ind_tbl, 1245 rss->rss_ind_tbl_dma_addr, 1246 rss->rss_ind_tbl_mem_handle); 1247 rss->rss_ind_tbl = NULL; 1248 mem_err1: 1249 rss->tbl_log_size = 0; 1250 return ENA_COM_NO_MEM; 1251 } 1252 1253 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) 1254 { 1255 struct ena_rss *rss = &ena_dev->rss; 1256 size_t tbl_size = (1ULL << rss->tbl_log_size) * 1257 sizeof(struct ena_admin_rss_ind_table_entry); 1258 1259 if (rss->rss_ind_tbl) 1260 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1261 tbl_size, 1262 rss->rss_ind_tbl, 1263 rss->rss_ind_tbl_dma_addr, 1264 rss->rss_ind_tbl_mem_handle); 1265 rss->rss_ind_tbl = NULL; 1266 1267 if (rss->host_rss_ind_tbl) 1268 ENA_MEM_FREE(ena_dev->dmadev, 1269 rss->host_rss_ind_tbl, 1270 ((1ULL << rss->tbl_log_size) * sizeof(u16))); 1271 rss->host_rss_ind_tbl = NULL; 1272 } 1273 1274 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, 1275 struct ena_com_io_sq *io_sq, u16 cq_idx) 1276 { 1277 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1278 struct ena_admin_aq_create_sq_cmd create_cmd; 1279 struct ena_admin_acq_create_sq_resp_desc cmd_completion; 1280 u8 direction; 1281 int ret; 1282 1283 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1284 1285 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; 1286 1287 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1288 direction = ENA_ADMIN_SQ_DIRECTION_TX; 1289 else 1290 direction = ENA_ADMIN_SQ_DIRECTION_RX; 1291 1292 create_cmd.sq_identity |= (direction << 1293 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & 1294 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; 1295 1296 create_cmd.sq_caps_2 |= io_sq->mem_queue_type & 1297 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; 1298 1299 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << 1300 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & 1301 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; 1302 1303 create_cmd.sq_caps_3 |= 1304 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; 1305 1306 create_cmd.cq_idx = cq_idx; 1307 create_cmd.sq_depth = io_sq->q_depth; 1308 1309 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 1310 ret = ena_com_mem_addr_set(ena_dev, 1311 &create_cmd.sq_ba, 1312 io_sq->desc_addr.phys_addr); 1313 if (unlikely(ret)) { 1314 ena_trc_err(ena_dev, "Memory address set failed\n"); 1315 return ret; 1316 } 1317 } 1318 1319 ret = ena_com_execute_admin_command(admin_queue, 1320 (struct ena_admin_aq_entry *)&create_cmd, 1321 sizeof(create_cmd), 1322 (struct ena_admin_acq_entry *)&cmd_completion, 1323 sizeof(cmd_completion)); 1324 if (unlikely(ret)) { 1325 ena_trc_err(ena_dev, "Failed to create IO SQ. error: %d\n", ret); 1326 return ret; 1327 } 1328 1329 io_sq->idx = cmd_completion.sq_idx; 1330 1331 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1332 (uintptr_t)cmd_completion.sq_doorbell_offset); 1333 1334 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1335 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar 1336 + cmd_completion.llq_headers_offset); 1337 1338 io_sq->desc_addr.pbuf_dev_addr = 1339 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + 1340 cmd_completion.llq_descriptors_offset); 1341 } 1342 1343 ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); 1344 1345 return ret; 1346 } 1347 1348 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) 1349 { 1350 struct ena_rss *rss = &ena_dev->rss; 1351 struct ena_com_io_sq *io_sq; 1352 u16 qid; 1353 int i; 1354 1355 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1356 qid = rss->host_rss_ind_tbl[i]; 1357 if (qid >= ENA_TOTAL_NUM_QUEUES) 1358 return ENA_COM_INVAL; 1359 1360 io_sq = &ena_dev->io_sq_queues[qid]; 1361 1362 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) 1363 return ENA_COM_INVAL; 1364 1365 rss->rss_ind_tbl[i].cq_idx = io_sq->idx; 1366 } 1367 1368 return 0; 1369 } 1370 1371 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, 1372 u16 intr_delay_resolution) 1373 { 1374 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; 1375 1376 if (unlikely(!intr_delay_resolution)) { 1377 ena_trc_err(ena_dev, "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); 1378 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; 1379 } 1380 1381 /* update Rx */ 1382 ena_dev->intr_moder_rx_interval = 1383 ena_dev->intr_moder_rx_interval * 1384 prev_intr_delay_resolution / 1385 intr_delay_resolution; 1386 1387 /* update Tx */ 1388 ena_dev->intr_moder_tx_interval = 1389 ena_dev->intr_moder_tx_interval * 1390 prev_intr_delay_resolution / 1391 intr_delay_resolution; 1392 1393 ena_dev->intr_delay_resolution = intr_delay_resolution; 1394 } 1395 1396 /*****************************************************************************/ 1397 /******************************* API ******************************/ 1398 /*****************************************************************************/ 1399 1400 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, 1401 struct ena_admin_aq_entry *cmd, 1402 size_t cmd_size, 1403 struct ena_admin_acq_entry *comp, 1404 size_t comp_size) 1405 { 1406 struct ena_comp_ctx *comp_ctx; 1407 int ret; 1408 1409 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, 1410 comp, comp_size); 1411 if (IS_ERR(comp_ctx)) { 1412 ret = PTR_ERR(comp_ctx); 1413 if (ret == ENA_COM_NO_DEVICE) 1414 ena_trc_dbg(admin_queue->ena_dev, 1415 "Failed to submit command [%d]\n", 1416 ret); 1417 else 1418 ena_trc_err(admin_queue->ena_dev, 1419 "Failed to submit command [%d]\n", 1420 ret); 1421 1422 return ret; 1423 } 1424 1425 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); 1426 if (unlikely(ret)) { 1427 if (admin_queue->running_state) 1428 ena_trc_err(admin_queue->ena_dev, 1429 "Failed to process command. ret = %d\n", ret); 1430 else 1431 ena_trc_dbg(admin_queue->ena_dev, 1432 "Failed to process command. ret = %d\n", ret); 1433 } 1434 return ret; 1435 } 1436 1437 int ena_com_create_io_cq(struct ena_com_dev *ena_dev, 1438 struct ena_com_io_cq *io_cq) 1439 { 1440 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1441 struct ena_admin_aq_create_cq_cmd create_cmd; 1442 struct ena_admin_acq_create_cq_resp_desc cmd_completion; 1443 int ret; 1444 1445 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1446 1447 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; 1448 1449 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & 1450 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; 1451 create_cmd.cq_caps_1 |= 1452 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; 1453 1454 create_cmd.msix_vector = io_cq->msix_vector; 1455 create_cmd.cq_depth = io_cq->q_depth; 1456 1457 ret = ena_com_mem_addr_set(ena_dev, 1458 &create_cmd.cq_ba, 1459 io_cq->cdesc_addr.phys_addr); 1460 if (unlikely(ret)) { 1461 ena_trc_err(ena_dev, "Memory address set failed\n"); 1462 return ret; 1463 } 1464 1465 ret = ena_com_execute_admin_command(admin_queue, 1466 (struct ena_admin_aq_entry *)&create_cmd, 1467 sizeof(create_cmd), 1468 (struct ena_admin_acq_entry *)&cmd_completion, 1469 sizeof(cmd_completion)); 1470 if (unlikely(ret)) { 1471 ena_trc_err(ena_dev, "Failed to create IO CQ. error: %d\n", ret); 1472 return ret; 1473 } 1474 1475 io_cq->idx = cmd_completion.cq_idx; 1476 1477 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1478 cmd_completion.cq_interrupt_unmask_register_offset); 1479 1480 if (cmd_completion.cq_head_db_register_offset) 1481 io_cq->cq_head_db_reg = 1482 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1483 cmd_completion.cq_head_db_register_offset); 1484 1485 if (cmd_completion.numa_node_register_offset) 1486 io_cq->numa_node_cfg_reg = 1487 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1488 cmd_completion.numa_node_register_offset); 1489 1490 ena_trc_dbg(ena_dev, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); 1491 1492 return ret; 1493 } 1494 1495 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, 1496 struct ena_com_io_sq **io_sq, 1497 struct ena_com_io_cq **io_cq) 1498 { 1499 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1500 ena_trc_err(ena_dev, "Invalid queue number %d but the max is %d\n", 1501 qid, ENA_TOTAL_NUM_QUEUES); 1502 return ENA_COM_INVAL; 1503 } 1504 1505 *io_sq = &ena_dev->io_sq_queues[qid]; 1506 *io_cq = &ena_dev->io_cq_queues[qid]; 1507 1508 return 0; 1509 } 1510 1511 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) 1512 { 1513 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1514 struct ena_comp_ctx *comp_ctx; 1515 u16 i; 1516 1517 if (!admin_queue->comp_ctx) 1518 return; 1519 1520 for (i = 0; i < admin_queue->q_depth; i++) { 1521 comp_ctx = get_comp_ctxt(admin_queue, i, false); 1522 if (unlikely(!comp_ctx)) 1523 break; 1524 1525 comp_ctx->status = ENA_CMD_ABORTED; 1526 1527 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); 1528 } 1529 } 1530 1531 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) 1532 { 1533 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1534 unsigned long flags = 0; 1535 u32 exp = 0; 1536 1537 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 1538 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) { 1539 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 1540 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); 1541 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 1542 } 1543 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 1544 } 1545 1546 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, 1547 struct ena_com_io_cq *io_cq) 1548 { 1549 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1550 struct ena_admin_aq_destroy_cq_cmd destroy_cmd; 1551 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; 1552 int ret; 1553 1554 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 1555 1556 destroy_cmd.cq_idx = io_cq->idx; 1557 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; 1558 1559 ret = ena_com_execute_admin_command(admin_queue, 1560 (struct ena_admin_aq_entry *)&destroy_cmd, 1561 sizeof(destroy_cmd), 1562 (struct ena_admin_acq_entry *)&destroy_resp, 1563 sizeof(destroy_resp)); 1564 1565 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) 1566 ena_trc_err(ena_dev, "Failed to destroy IO CQ. error: %d\n", ret); 1567 1568 return ret; 1569 } 1570 1571 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) 1572 { 1573 return ena_dev->admin_queue.running_state; 1574 } 1575 1576 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) 1577 { 1578 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1579 unsigned long flags = 0; 1580 1581 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); 1582 ena_dev->admin_queue.running_state = state; 1583 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); 1584 } 1585 1586 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) 1587 { 1588 u16 depth = ena_dev->aenq.q_depth; 1589 1590 ENA_WARN(ena_dev->aenq.head != depth, ena_dev, "Invalid AENQ state\n"); 1591 1592 /* Init head_db to mark that all entries in the queue 1593 * are initially available 1594 */ 1595 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 1596 } 1597 1598 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) 1599 { 1600 struct ena_com_admin_queue *admin_queue; 1601 struct ena_admin_set_feat_cmd cmd; 1602 struct ena_admin_set_feat_resp resp; 1603 struct ena_admin_get_feat_resp get_resp; 1604 int ret; 1605 1606 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0); 1607 if (ret) { 1608 ena_trc_info(ena_dev, "Can't get aenq configuration\n"); 1609 return ret; 1610 } 1611 1612 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { 1613 ena_trc_warn(ena_dev, "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", 1614 get_resp.u.aenq.supported_groups, 1615 groups_flag); 1616 return ENA_COM_UNSUPPORTED; 1617 } 1618 1619 memset(&cmd, 0x0, sizeof(cmd)); 1620 admin_queue = &ena_dev->admin_queue; 1621 1622 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 1623 cmd.aq_common_descriptor.flags = 0; 1624 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; 1625 cmd.u.aenq.enabled_groups = groups_flag; 1626 1627 ret = ena_com_execute_admin_command(admin_queue, 1628 (struct ena_admin_aq_entry *)&cmd, 1629 sizeof(cmd), 1630 (struct ena_admin_acq_entry *)&resp, 1631 sizeof(resp)); 1632 1633 if (unlikely(ret)) 1634 ena_trc_err(ena_dev, "Failed to config AENQ ret: %d\n", ret); 1635 1636 return ret; 1637 } 1638 1639 int ena_com_get_dma_width(struct ena_com_dev *ena_dev) 1640 { 1641 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 1642 u32 width; 1643 1644 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { 1645 ena_trc_err(ena_dev, "Reg read timeout occurred\n"); 1646 return ENA_COM_TIMER_EXPIRED; 1647 } 1648 1649 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> 1650 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; 1651 1652 ena_trc_dbg(ena_dev, "ENA dma width: %d\n", width); 1653 1654 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { 1655 ena_trc_err(ena_dev, "DMA width illegal value: %d\n", width); 1656 return ENA_COM_INVAL; 1657 } 1658 1659 ena_dev->dma_addr_bits = width; 1660 1661 return width; 1662 } 1663 1664 int ena_com_validate_version(struct ena_com_dev *ena_dev) 1665 { 1666 u32 ver; 1667 u32 ctrl_ver; 1668 u32 ctrl_ver_masked; 1669 1670 /* Make sure the ENA version and the controller version are at least 1671 * as the driver expects 1672 */ 1673 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); 1674 ctrl_ver = ena_com_reg_bar_read32(ena_dev, 1675 ENA_REGS_CONTROLLER_VERSION_OFF); 1676 1677 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || 1678 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { 1679 ena_trc_err(ena_dev, "Reg read timeout occurred\n"); 1680 return ENA_COM_TIMER_EXPIRED; 1681 } 1682 1683 ena_trc_info(ena_dev, "ENA device version: %d.%d\n", 1684 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> 1685 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, 1686 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); 1687 1688 ena_trc_info(ena_dev, "ENA controller version: %d.%d.%d implementation version %d\n", 1689 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) 1690 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, 1691 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) 1692 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, 1693 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), 1694 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> 1695 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); 1696 1697 ctrl_ver_masked = 1698 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | 1699 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | 1700 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); 1701 1702 /* Validate the ctrl version without the implementation ID */ 1703 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { 1704 ena_trc_err(ena_dev, "ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); 1705 return -1; 1706 } 1707 1708 return 0; 1709 } 1710 1711 static void 1712 ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev, 1713 struct ena_com_admin_queue *admin_queue) 1714 1715 { 1716 if (!admin_queue->comp_ctx) 1717 return; 1718 1719 ENA_WAIT_EVENTS_DESTROY(admin_queue); 1720 ENA_MEM_FREE(ena_dev->dmadev, 1721 admin_queue->comp_ctx, 1722 (admin_queue->q_depth * sizeof(struct ena_comp_ctx))); 1723 1724 admin_queue->comp_ctx = NULL; 1725 } 1726 1727 void ena_com_admin_destroy(struct ena_com_dev *ena_dev) 1728 { 1729 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1730 struct ena_com_admin_cq *cq = &admin_queue->cq; 1731 struct ena_com_admin_sq *sq = &admin_queue->sq; 1732 struct ena_com_aenq *aenq = &ena_dev->aenq; 1733 u16 size; 1734 1735 ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue); 1736 1737 size = ADMIN_SQ_SIZE(admin_queue->q_depth); 1738 if (sq->entries) 1739 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries, 1740 sq->dma_addr, sq->mem_handle); 1741 sq->entries = NULL; 1742 1743 size = ADMIN_CQ_SIZE(admin_queue->q_depth); 1744 if (cq->entries) 1745 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries, 1746 cq->dma_addr, cq->mem_handle); 1747 cq->entries = NULL; 1748 1749 size = ADMIN_AENQ_SIZE(aenq->q_depth); 1750 if (ena_dev->aenq.entries) 1751 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries, 1752 aenq->dma_addr, aenq->mem_handle); 1753 aenq->entries = NULL; 1754 ENA_SPINLOCK_DESTROY(admin_queue->q_lock); 1755 } 1756 1757 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) 1758 { 1759 u32 mask_value = 0; 1760 1761 if (polling) 1762 mask_value = ENA_REGS_ADMIN_INTR_MASK; 1763 1764 ENA_REG_WRITE32(ena_dev->bus, mask_value, 1765 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); 1766 ena_dev->admin_queue.polling = polling; 1767 } 1768 1769 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev) 1770 { 1771 return ena_dev->admin_queue.polling; 1772 } 1773 1774 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, 1775 bool polling) 1776 { 1777 ena_dev->admin_queue.auto_polling = polling; 1778 } 1779 1780 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) 1781 { 1782 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1783 1784 ENA_SPINLOCK_INIT(mmio_read->lock); 1785 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 1786 sizeof(*mmio_read->read_resp), 1787 mmio_read->read_resp, 1788 mmio_read->read_resp_dma_addr, 1789 mmio_read->read_resp_mem_handle); 1790 if (unlikely(!mmio_read->read_resp)) 1791 goto err; 1792 1793 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 1794 1795 mmio_read->read_resp->req_id = 0x0; 1796 mmio_read->seq_num = 0x0; 1797 mmio_read->readless_supported = true; 1798 1799 return 0; 1800 1801 err: 1802 ENA_SPINLOCK_DESTROY(mmio_read->lock); 1803 return ENA_COM_NO_MEM; 1804 } 1805 1806 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) 1807 { 1808 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1809 1810 mmio_read->readless_supported = readless_supported; 1811 } 1812 1813 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) 1814 { 1815 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1816 1817 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1818 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1819 1820 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 1821 sizeof(*mmio_read->read_resp), 1822 mmio_read->read_resp, 1823 mmio_read->read_resp_dma_addr, 1824 mmio_read->read_resp_mem_handle); 1825 1826 mmio_read->read_resp = NULL; 1827 ENA_SPINLOCK_DESTROY(mmio_read->lock); 1828 } 1829 1830 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) 1831 { 1832 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1833 u32 addr_low, addr_high; 1834 1835 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); 1836 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); 1837 1838 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1839 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1840 } 1841 1842 int ena_com_admin_init(struct ena_com_dev *ena_dev, 1843 struct ena_aenq_handlers *aenq_handlers) 1844 { 1845 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1846 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; 1847 int ret; 1848 1849 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 1850 1851 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { 1852 ena_trc_err(ena_dev, "Reg read timeout occurred\n"); 1853 return ENA_COM_TIMER_EXPIRED; 1854 } 1855 1856 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { 1857 ena_trc_err(ena_dev, "Device isn't ready, abort com init\n"); 1858 return ENA_COM_NO_DEVICE; 1859 } 1860 1861 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; 1862 1863 admin_queue->bus = ena_dev->bus; 1864 admin_queue->q_dmadev = ena_dev->dmadev; 1865 admin_queue->polling = false; 1866 admin_queue->curr_cmd_id = 0; 1867 1868 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0); 1869 1870 ENA_SPINLOCK_INIT(admin_queue->q_lock); 1871 1872 ret = ena_com_init_comp_ctxt(admin_queue); 1873 if (ret) 1874 goto error; 1875 1876 ret = ena_com_admin_init_sq(admin_queue); 1877 if (ret) 1878 goto error; 1879 1880 ret = ena_com_admin_init_cq(admin_queue); 1881 if (ret) 1882 goto error; 1883 1884 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1885 ENA_REGS_AQ_DB_OFF); 1886 1887 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); 1888 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); 1889 1890 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); 1891 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); 1892 1893 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); 1894 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); 1895 1896 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); 1897 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); 1898 1899 aq_caps = 0; 1900 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; 1901 aq_caps |= (sizeof(struct ena_admin_aq_entry) << 1902 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & 1903 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; 1904 1905 acq_caps = 0; 1906 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; 1907 acq_caps |= (sizeof(struct ena_admin_acq_entry) << 1908 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & 1909 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; 1910 1911 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); 1912 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); 1913 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); 1914 if (ret) 1915 goto error; 1916 1917 admin_queue->ena_dev = ena_dev; 1918 admin_queue->running_state = true; 1919 1920 return 0; 1921 error: 1922 ena_com_admin_destroy(ena_dev); 1923 1924 return ret; 1925 } 1926 1927 int ena_com_create_io_queue(struct ena_com_dev *ena_dev, 1928 struct ena_com_create_io_ctx *ctx) 1929 { 1930 struct ena_com_io_sq *io_sq; 1931 struct ena_com_io_cq *io_cq; 1932 int ret; 1933 1934 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { 1935 ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n", 1936 ctx->qid, ENA_TOTAL_NUM_QUEUES); 1937 return ENA_COM_INVAL; 1938 } 1939 1940 io_sq = &ena_dev->io_sq_queues[ctx->qid]; 1941 io_cq = &ena_dev->io_cq_queues[ctx->qid]; 1942 1943 memset(io_sq, 0x0, sizeof(*io_sq)); 1944 memset(io_cq, 0x0, sizeof(*io_cq)); 1945 1946 /* Init CQ */ 1947 io_cq->q_depth = ctx->queue_size; 1948 io_cq->direction = ctx->direction; 1949 io_cq->qid = ctx->qid; 1950 1951 io_cq->msix_vector = ctx->msix_vector; 1952 1953 io_sq->q_depth = ctx->queue_size; 1954 io_sq->direction = ctx->direction; 1955 io_sq->qid = ctx->qid; 1956 1957 io_sq->mem_queue_type = ctx->mem_queue_type; 1958 1959 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1960 /* header length is limited to 8 bits */ 1961 io_sq->tx_max_header_size = 1962 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256); 1963 1964 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); 1965 if (ret) 1966 goto error; 1967 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); 1968 if (ret) 1969 goto error; 1970 1971 ret = ena_com_create_io_cq(ena_dev, io_cq); 1972 if (ret) 1973 goto error; 1974 1975 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); 1976 if (ret) 1977 goto destroy_io_cq; 1978 1979 return 0; 1980 1981 destroy_io_cq: 1982 ena_com_destroy_io_cq(ena_dev, io_cq); 1983 error: 1984 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1985 return ret; 1986 } 1987 1988 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) 1989 { 1990 struct ena_com_io_sq *io_sq; 1991 struct ena_com_io_cq *io_cq; 1992 1993 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1994 ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n", 1995 qid, ENA_TOTAL_NUM_QUEUES); 1996 return; 1997 } 1998 1999 io_sq = &ena_dev->io_sq_queues[qid]; 2000 io_cq = &ena_dev->io_cq_queues[qid]; 2001 2002 ena_com_destroy_io_sq(ena_dev, io_sq); 2003 ena_com_destroy_io_cq(ena_dev, io_cq); 2004 2005 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 2006 } 2007 2008 int ena_com_get_link_params(struct ena_com_dev *ena_dev, 2009 struct ena_admin_get_feat_resp *resp) 2010 { 2011 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0); 2012 } 2013 2014 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, 2015 struct ena_com_dev_get_features_ctx *get_feat_ctx) 2016 { 2017 struct ena_admin_get_feat_resp get_resp; 2018 int rc; 2019 2020 rc = ena_com_get_feature(ena_dev, &get_resp, 2021 ENA_ADMIN_DEVICE_ATTRIBUTES, 0); 2022 if (rc) 2023 return rc; 2024 2025 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, 2026 sizeof(get_resp.u.dev_attr)); 2027 2028 ena_dev->supported_features = get_resp.u.dev_attr.supported_features; 2029 2030 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 2031 rc = ena_com_get_feature(ena_dev, &get_resp, 2032 ENA_ADMIN_MAX_QUEUES_EXT, 2033 ENA_FEATURE_MAX_QUEUE_EXT_VER); 2034 if (rc) 2035 return rc; 2036 2037 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER) 2038 return ENA_COM_INVAL; 2039 2040 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, 2041 sizeof(get_resp.u.max_queue_ext)); 2042 ena_dev->tx_max_header_size = 2043 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size; 2044 } else { 2045 rc = ena_com_get_feature(ena_dev, &get_resp, 2046 ENA_ADMIN_MAX_QUEUES_NUM, 0); 2047 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, 2048 sizeof(get_resp.u.max_queue)); 2049 ena_dev->tx_max_header_size = 2050 get_resp.u.max_queue.max_header_size; 2051 2052 if (rc) 2053 return rc; 2054 } 2055 2056 rc = ena_com_get_feature(ena_dev, &get_resp, 2057 ENA_ADMIN_AENQ_CONFIG, 0); 2058 if (rc) 2059 return rc; 2060 2061 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, 2062 sizeof(get_resp.u.aenq)); 2063 2064 rc = ena_com_get_feature(ena_dev, &get_resp, 2065 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); 2066 if (rc) 2067 return rc; 2068 2069 memcpy(&get_feat_ctx->offload, &get_resp.u.offload, 2070 sizeof(get_resp.u.offload)); 2071 2072 /* Driver hints isn't mandatory admin command. So in case the 2073 * command isn't supported set driver hints to 0 2074 */ 2075 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0); 2076 2077 if (!rc) 2078 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, 2079 sizeof(get_resp.u.hw_hints)); 2080 else if (rc == ENA_COM_UNSUPPORTED) 2081 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints)); 2082 else 2083 return rc; 2084 2085 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0); 2086 if (!rc) 2087 memcpy(&get_feat_ctx->llq, &get_resp.u.llq, 2088 sizeof(get_resp.u.llq)); 2089 else if (rc == ENA_COM_UNSUPPORTED) 2090 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); 2091 else 2092 return rc; 2093 2094 return 0; 2095 } 2096 2097 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) 2098 { 2099 ena_com_handle_admin_completion(&ena_dev->admin_queue); 2100 } 2101 2102 /* ena_handle_specific_aenq_event: 2103 * return the handler that is relevant to the specific event group 2104 */ 2105 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev, 2106 u16 group) 2107 { 2108 struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers; 2109 2110 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) 2111 return aenq_handlers->handlers[group]; 2112 2113 return aenq_handlers->unimplemented_handler; 2114 } 2115 2116 /* ena_aenq_intr_handler: 2117 * handles the aenq incoming events. 2118 * pop events from the queue and apply the specific handler 2119 */ 2120 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) 2121 { 2122 struct ena_admin_aenq_entry *aenq_e; 2123 struct ena_admin_aenq_common_desc *aenq_common; 2124 struct ena_com_aenq *aenq = &ena_dev->aenq; 2125 u64 timestamp; 2126 ena_aenq_handler handler_cb; 2127 u16 masked_head, processed = 0; 2128 u8 phase; 2129 2130 masked_head = aenq->head & (aenq->q_depth - 1); 2131 phase = aenq->phase; 2132 aenq_e = &aenq->entries[masked_head]; /* Get first entry */ 2133 aenq_common = &aenq_e->aenq_common_desc; 2134 2135 /* Go over all the events */ 2136 while ((READ_ONCE8(aenq_common->flags) & 2137 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { 2138 /* Make sure the phase bit (ownership) is as expected before 2139 * reading the rest of the descriptor. 2140 */ 2141 dma_rmb(); 2142 2143 timestamp = (u64)aenq_common->timestamp_low | 2144 ((u64)aenq_common->timestamp_high << 32); 2145 2146 ena_trc_dbg(ena_dev, "AENQ! Group[%x] Syndrome[%x] timestamp: [%" ENA_PRIu64 "s]\n", 2147 aenq_common->group, 2148 aenq_common->syndrome, 2149 timestamp); 2150 2151 /* Handle specific event*/ 2152 handler_cb = ena_com_get_specific_aenq_cb(ena_dev, 2153 aenq_common->group); 2154 handler_cb(data, aenq_e); /* call the actual event handler*/ 2155 2156 /* Get next event entry */ 2157 masked_head++; 2158 processed++; 2159 2160 if (unlikely(masked_head == aenq->q_depth)) { 2161 masked_head = 0; 2162 phase = !phase; 2163 } 2164 aenq_e = &aenq->entries[masked_head]; 2165 aenq_common = &aenq_e->aenq_common_desc; 2166 } 2167 2168 aenq->head += processed; 2169 aenq->phase = phase; 2170 2171 /* Don't update aenq doorbell if there weren't any processed events */ 2172 if (!processed) 2173 return; 2174 2175 /* write the aenq doorbell after all AENQ descriptors were read */ 2176 mb(); 2177 ENA_REG_WRITE32_RELAXED(ena_dev->bus, (u32)aenq->head, 2178 ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 2179 mmiowb(); 2180 } 2181 #ifdef ENA_EXTENDED_STATS 2182 /* 2183 * Sets the function Idx and Queue Idx to be used for 2184 * get full statistics feature 2185 * 2186 */ 2187 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev, 2188 u32 func_queue) 2189 { 2190 2191 /* Function & Queue is acquired from user in the following format : 2192 * Bottom Half word: funct 2193 * Top Half Word: queue 2194 */ 2195 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue); 2196 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue); 2197 2198 return 0; 2199 } 2200 2201 #endif /* ENA_EXTENDED_STATS */ 2202 2203 int ena_com_dev_reset(struct ena_com_dev *ena_dev, 2204 enum ena_regs_reset_reason_types reset_reason) 2205 { 2206 u32 stat, timeout, cap, reset_val; 2207 int rc; 2208 2209 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 2210 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 2211 2212 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || 2213 (cap == ENA_MMIO_READ_TIMEOUT))) { 2214 ena_trc_err(ena_dev, "Reg read32 timeout occurred\n"); 2215 return ENA_COM_TIMER_EXPIRED; 2216 } 2217 2218 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { 2219 ena_trc_err(ena_dev, "Device isn't ready, can't reset device\n"); 2220 return ENA_COM_INVAL; 2221 } 2222 2223 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> 2224 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; 2225 if (timeout == 0) { 2226 ena_trc_err(ena_dev, "Invalid timeout value\n"); 2227 return ENA_COM_INVAL; 2228 } 2229 2230 /* start reset */ 2231 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; 2232 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) & 2233 ENA_REGS_DEV_CTL_RESET_REASON_MASK; 2234 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 2235 2236 /* Write again the MMIO read request address */ 2237 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 2238 2239 rc = wait_for_reset_state(ena_dev, timeout, 2240 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); 2241 if (rc != 0) { 2242 ena_trc_err(ena_dev, "Reset indication didn't turn on\n"); 2243 return rc; 2244 } 2245 2246 /* reset done */ 2247 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 2248 rc = wait_for_reset_state(ena_dev, timeout, 0); 2249 if (rc != 0) { 2250 ena_trc_err(ena_dev, "Reset indication didn't turn off\n"); 2251 return rc; 2252 } 2253 2254 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> 2255 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; 2256 if (timeout) 2257 /* the resolution of timeout reg is 100ms */ 2258 ena_dev->admin_queue.completion_timeout = timeout * 100000; 2259 else 2260 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; 2261 2262 return 0; 2263 } 2264 2265 static int ena_get_dev_stats(struct ena_com_dev *ena_dev, 2266 struct ena_com_stats_ctx *ctx, 2267 enum ena_admin_get_stats_type type) 2268 { 2269 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; 2270 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; 2271 struct ena_com_admin_queue *admin_queue; 2272 int ret; 2273 2274 admin_queue = &ena_dev->admin_queue; 2275 2276 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; 2277 get_cmd->aq_common_descriptor.flags = 0; 2278 get_cmd->type = type; 2279 2280 ret = ena_com_execute_admin_command(admin_queue, 2281 (struct ena_admin_aq_entry *)get_cmd, 2282 sizeof(*get_cmd), 2283 (struct ena_admin_acq_entry *)get_resp, 2284 sizeof(*get_resp)); 2285 2286 if (unlikely(ret)) 2287 ena_trc_err(ena_dev, "Failed to get stats. error: %d\n", ret); 2288 2289 return ret; 2290 } 2291 2292 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, 2293 struct ena_admin_eni_stats *stats) 2294 { 2295 struct ena_com_stats_ctx ctx; 2296 int ret; 2297 2298 memset(&ctx, 0x0, sizeof(ctx)); 2299 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI); 2300 if (likely(ret == 0)) 2301 memcpy(stats, &ctx.get_resp.u.eni_stats, 2302 sizeof(ctx.get_resp.u.eni_stats)); 2303 2304 return ret; 2305 } 2306 2307 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, 2308 struct ena_admin_basic_stats *stats) 2309 { 2310 struct ena_com_stats_ctx ctx; 2311 int ret; 2312 2313 memset(&ctx, 0x0, sizeof(ctx)); 2314 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); 2315 if (likely(ret == 0)) 2316 memcpy(stats, &ctx.get_resp.u.basic_stats, 2317 sizeof(ctx.get_resp.u.basic_stats)); 2318 2319 return ret; 2320 } 2321 #ifdef ENA_EXTENDED_STATS 2322 2323 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff, 2324 u32 len) 2325 { 2326 struct ena_com_stats_ctx ctx; 2327 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd; 2328 ena_mem_handle_t mem_handle; 2329 void *virt_addr; 2330 dma_addr_t phys_addr; 2331 int ret; 2332 2333 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len, 2334 virt_addr, phys_addr, mem_handle); 2335 if (!virt_addr) { 2336 ret = ENA_COM_NO_MEM; 2337 goto done; 2338 } 2339 memset(&ctx, 0x0, sizeof(ctx)); 2340 ret = ena_com_mem_addr_set(ena_dev, 2341 &get_cmd->u.control_buffer.address, 2342 phys_addr); 2343 if (unlikely(ret)) { 2344 ena_trc_err(ena_dev, "Memory address set failed\n"); 2345 goto free_ext_stats_mem; 2346 } 2347 get_cmd->u.control_buffer.length = len; 2348 2349 get_cmd->device_id = ena_dev->stats_func; 2350 get_cmd->queue_idx = ena_dev->stats_queue; 2351 2352 ret = ena_get_dev_stats(ena_dev, &ctx, 2353 ENA_ADMIN_GET_STATS_TYPE_EXTENDED); 2354 if (ret < 0) 2355 goto free_ext_stats_mem; 2356 2357 ret = snprintf(buff, len, "%s", (char *)virt_addr); 2358 2359 free_ext_stats_mem: 2360 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr, 2361 mem_handle); 2362 done: 2363 return ret; 2364 } 2365 #endif 2366 2367 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu) 2368 { 2369 struct ena_com_admin_queue *admin_queue; 2370 struct ena_admin_set_feat_cmd cmd; 2371 struct ena_admin_set_feat_resp resp; 2372 int ret; 2373 2374 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { 2375 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_MTU); 2376 return ENA_COM_UNSUPPORTED; 2377 } 2378 2379 memset(&cmd, 0x0, sizeof(cmd)); 2380 admin_queue = &ena_dev->admin_queue; 2381 2382 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2383 cmd.aq_common_descriptor.flags = 0; 2384 cmd.feat_common.feature_id = ENA_ADMIN_MTU; 2385 cmd.u.mtu.mtu = mtu; 2386 2387 ret = ena_com_execute_admin_command(admin_queue, 2388 (struct ena_admin_aq_entry *)&cmd, 2389 sizeof(cmd), 2390 (struct ena_admin_acq_entry *)&resp, 2391 sizeof(resp)); 2392 2393 if (unlikely(ret)) 2394 ena_trc_err(ena_dev, "Failed to set mtu %d. error: %d\n", mtu, ret); 2395 2396 return ret; 2397 } 2398 2399 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, 2400 struct ena_admin_feature_offload_desc *offload) 2401 { 2402 int ret; 2403 struct ena_admin_get_feat_resp resp; 2404 2405 ret = ena_com_get_feature(ena_dev, &resp, 2406 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); 2407 if (unlikely(ret)) { 2408 ena_trc_err(ena_dev, "Failed to get offload capabilities %d\n", ret); 2409 return ret; 2410 } 2411 2412 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); 2413 2414 return 0; 2415 } 2416 2417 int ena_com_set_hash_function(struct ena_com_dev *ena_dev) 2418 { 2419 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2420 struct ena_rss *rss = &ena_dev->rss; 2421 struct ena_admin_set_feat_cmd cmd; 2422 struct ena_admin_set_feat_resp resp; 2423 struct ena_admin_get_feat_resp get_resp; 2424 int ret; 2425 2426 if (!ena_com_check_supported_feature_id(ena_dev, 2427 ENA_ADMIN_RSS_HASH_FUNCTION)) { 2428 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", 2429 ENA_ADMIN_RSS_HASH_FUNCTION); 2430 return ENA_COM_UNSUPPORTED; 2431 } 2432 2433 /* Validate hash function is supported */ 2434 ret = ena_com_get_feature(ena_dev, &get_resp, 2435 ENA_ADMIN_RSS_HASH_FUNCTION, 0); 2436 if (unlikely(ret)) 2437 return ret; 2438 2439 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { 2440 ena_trc_err(ena_dev, "Func hash %d isn't supported by device, abort\n", 2441 rss->hash_func); 2442 return ENA_COM_UNSUPPORTED; 2443 } 2444 2445 memset(&cmd, 0x0, sizeof(cmd)); 2446 2447 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2448 cmd.aq_common_descriptor.flags = 2449 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2450 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; 2451 cmd.u.flow_hash_func.init_val = rss->hash_init_val; 2452 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; 2453 2454 ret = ena_com_mem_addr_set(ena_dev, 2455 &cmd.control_buffer.address, 2456 rss->hash_key_dma_addr); 2457 if (unlikely(ret)) { 2458 ena_trc_err(ena_dev, "Memory address set failed\n"); 2459 return ret; 2460 } 2461 2462 cmd.control_buffer.length = sizeof(*rss->hash_key); 2463 2464 ret = ena_com_execute_admin_command(admin_queue, 2465 (struct ena_admin_aq_entry *)&cmd, 2466 sizeof(cmd), 2467 (struct ena_admin_acq_entry *)&resp, 2468 sizeof(resp)); 2469 if (unlikely(ret)) { 2470 ena_trc_err(ena_dev, "Failed to set hash function %d. error: %d\n", 2471 rss->hash_func, ret); 2472 return ENA_COM_INVAL; 2473 } 2474 2475 return 0; 2476 } 2477 2478 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, 2479 enum ena_admin_hash_functions func, 2480 const u8 *key, u16 key_len, u32 init_val) 2481 { 2482 struct ena_admin_feature_rss_flow_hash_control *hash_key; 2483 struct ena_admin_get_feat_resp get_resp; 2484 enum ena_admin_hash_functions old_func; 2485 struct ena_rss *rss = &ena_dev->rss; 2486 int rc; 2487 2488 hash_key = rss->hash_key; 2489 2490 /* Make sure size is a mult of DWs */ 2491 if (unlikely(key_len & 0x3)) 2492 return ENA_COM_INVAL; 2493 2494 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2495 ENA_ADMIN_RSS_HASH_FUNCTION, 2496 rss->hash_key_dma_addr, 2497 sizeof(*rss->hash_key), 0); 2498 if (unlikely(rc)) 2499 return rc; 2500 2501 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { 2502 ena_trc_err(ena_dev, "Flow hash function %d isn't supported\n", func); 2503 return ENA_COM_UNSUPPORTED; 2504 } 2505 2506 switch (func) { 2507 case ENA_ADMIN_TOEPLITZ: 2508 if (key) { 2509 if (key_len != sizeof(hash_key->key)) { 2510 ena_trc_err(ena_dev, "key len (%hu) doesn't equal the supported size (%zu)\n", 2511 key_len, sizeof(hash_key->key)); 2512 return ENA_COM_INVAL; 2513 } 2514 memcpy(hash_key->key, key, key_len); 2515 rss->hash_init_val = init_val; 2516 hash_key->key_parts = key_len / sizeof(hash_key->key[0]); 2517 } 2518 break; 2519 case ENA_ADMIN_CRC32: 2520 rss->hash_init_val = init_val; 2521 break; 2522 default: 2523 ena_trc_err(ena_dev, "Invalid hash function (%d)\n", func); 2524 return ENA_COM_INVAL; 2525 } 2526 2527 old_func = rss->hash_func; 2528 rss->hash_func = func; 2529 rc = ena_com_set_hash_function(ena_dev); 2530 2531 /* Restore the old function */ 2532 if (unlikely(rc)) 2533 rss->hash_func = old_func; 2534 2535 return rc; 2536 } 2537 2538 int ena_com_get_hash_function(struct ena_com_dev *ena_dev, 2539 enum ena_admin_hash_functions *func) 2540 { 2541 struct ena_rss *rss = &ena_dev->rss; 2542 struct ena_admin_get_feat_resp get_resp; 2543 int rc; 2544 2545 if (unlikely(!func)) 2546 return ENA_COM_INVAL; 2547 2548 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2549 ENA_ADMIN_RSS_HASH_FUNCTION, 2550 rss->hash_key_dma_addr, 2551 sizeof(*rss->hash_key), 0); 2552 if (unlikely(rc)) 2553 return rc; 2554 2555 /* ENA_FFS() returns 1 in case the lsb is set */ 2556 rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func); 2557 if (rss->hash_func) 2558 rss->hash_func--; 2559 2560 *func = rss->hash_func; 2561 2562 return 0; 2563 } 2564 2565 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key) 2566 { 2567 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2568 ena_dev->rss.hash_key; 2569 2570 if (key) 2571 memcpy(key, hash_key->key, 2572 (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0])); 2573 2574 return 0; 2575 } 2576 2577 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, 2578 enum ena_admin_flow_hash_proto proto, 2579 u16 *fields) 2580 { 2581 struct ena_rss *rss = &ena_dev->rss; 2582 struct ena_admin_get_feat_resp get_resp; 2583 int rc; 2584 2585 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2586 ENA_ADMIN_RSS_HASH_INPUT, 2587 rss->hash_ctrl_dma_addr, 2588 sizeof(*rss->hash_ctrl), 0); 2589 if (unlikely(rc)) 2590 return rc; 2591 2592 if (fields) 2593 *fields = rss->hash_ctrl->selected_fields[proto].fields; 2594 2595 return 0; 2596 } 2597 2598 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) 2599 { 2600 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2601 struct ena_rss *rss = &ena_dev->rss; 2602 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2603 struct ena_admin_set_feat_cmd cmd; 2604 struct ena_admin_set_feat_resp resp; 2605 int ret; 2606 2607 if (!ena_com_check_supported_feature_id(ena_dev, 2608 ENA_ADMIN_RSS_HASH_INPUT)) { 2609 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", 2610 ENA_ADMIN_RSS_HASH_INPUT); 2611 return ENA_COM_UNSUPPORTED; 2612 } 2613 2614 memset(&cmd, 0x0, sizeof(cmd)); 2615 2616 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2617 cmd.aq_common_descriptor.flags = 2618 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2619 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; 2620 cmd.u.flow_hash_input.enabled_input_sort = 2621 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | 2622 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; 2623 2624 ret = ena_com_mem_addr_set(ena_dev, 2625 &cmd.control_buffer.address, 2626 rss->hash_ctrl_dma_addr); 2627 if (unlikely(ret)) { 2628 ena_trc_err(ena_dev, "Memory address set failed\n"); 2629 return ret; 2630 } 2631 cmd.control_buffer.length = sizeof(*hash_ctrl); 2632 2633 ret = ena_com_execute_admin_command(admin_queue, 2634 (struct ena_admin_aq_entry *)&cmd, 2635 sizeof(cmd), 2636 (struct ena_admin_acq_entry *)&resp, 2637 sizeof(resp)); 2638 if (unlikely(ret)) 2639 ena_trc_err(ena_dev, "Failed to set hash input. error: %d\n", ret); 2640 2641 return ret; 2642 } 2643 2644 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) 2645 { 2646 struct ena_rss *rss = &ena_dev->rss; 2647 struct ena_admin_feature_rss_hash_control *hash_ctrl = 2648 rss->hash_ctrl; 2649 u16 available_fields = 0; 2650 int rc, i; 2651 2652 /* Get the supported hash input */ 2653 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2654 if (unlikely(rc)) 2655 return rc; 2656 2657 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = 2658 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2659 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2660 2661 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = 2662 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2663 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2664 2665 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = 2666 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2667 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2668 2669 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = 2670 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2671 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2672 2673 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = 2674 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2675 2676 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = 2677 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2678 2679 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = 2680 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2681 2682 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = 2683 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; 2684 2685 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { 2686 available_fields = hash_ctrl->selected_fields[i].fields & 2687 hash_ctrl->supported_fields[i].fields; 2688 if (available_fields != hash_ctrl->selected_fields[i].fields) { 2689 ena_trc_err(ena_dev, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", 2690 i, hash_ctrl->supported_fields[i].fields, 2691 hash_ctrl->selected_fields[i].fields); 2692 return ENA_COM_UNSUPPORTED; 2693 } 2694 } 2695 2696 rc = ena_com_set_hash_ctrl(ena_dev); 2697 2698 /* In case of failure, restore the old hash ctrl */ 2699 if (unlikely(rc)) 2700 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2701 2702 return rc; 2703 } 2704 2705 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, 2706 enum ena_admin_flow_hash_proto proto, 2707 u16 hash_fields) 2708 { 2709 struct ena_rss *rss = &ena_dev->rss; 2710 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2711 u16 supported_fields; 2712 int rc; 2713 2714 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { 2715 ena_trc_err(ena_dev, "Invalid proto num (%u)\n", proto); 2716 return ENA_COM_INVAL; 2717 } 2718 2719 /* Get the ctrl table */ 2720 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); 2721 if (unlikely(rc)) 2722 return rc; 2723 2724 /* Make sure all the fields are supported */ 2725 supported_fields = hash_ctrl->supported_fields[proto].fields; 2726 if ((hash_fields & supported_fields) != hash_fields) { 2727 ena_trc_err(ena_dev, "Proto %d doesn't support the required fields %x. supports only: %x\n", 2728 proto, hash_fields, supported_fields); 2729 } 2730 2731 hash_ctrl->selected_fields[proto].fields = hash_fields; 2732 2733 rc = ena_com_set_hash_ctrl(ena_dev); 2734 2735 /* In case of failure, restore the old hash ctrl */ 2736 if (unlikely(rc)) 2737 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2738 2739 return 0; 2740 } 2741 2742 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, 2743 u16 entry_idx, u16 entry_value) 2744 { 2745 struct ena_rss *rss = &ena_dev->rss; 2746 2747 if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) 2748 return ENA_COM_INVAL; 2749 2750 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) 2751 return ENA_COM_INVAL; 2752 2753 rss->host_rss_ind_tbl[entry_idx] = entry_value; 2754 2755 return 0; 2756 } 2757 2758 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) 2759 { 2760 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2761 struct ena_rss *rss = &ena_dev->rss; 2762 struct ena_admin_set_feat_cmd cmd; 2763 struct ena_admin_set_feat_resp resp; 2764 int ret; 2765 2766 if (!ena_com_check_supported_feature_id(ena_dev, 2767 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) { 2768 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", 2769 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG); 2770 return ENA_COM_UNSUPPORTED; 2771 } 2772 2773 ret = ena_com_ind_tbl_convert_to_device(ena_dev); 2774 if (ret) { 2775 ena_trc_err(ena_dev, "Failed to convert host indirection table to device table\n"); 2776 return ret; 2777 } 2778 2779 memset(&cmd, 0x0, sizeof(cmd)); 2780 2781 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2782 cmd.aq_common_descriptor.flags = 2783 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2784 cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG; 2785 cmd.u.ind_table.size = rss->tbl_log_size; 2786 cmd.u.ind_table.inline_index = 0xFFFFFFFF; 2787 2788 ret = ena_com_mem_addr_set(ena_dev, 2789 &cmd.control_buffer.address, 2790 rss->rss_ind_tbl_dma_addr); 2791 if (unlikely(ret)) { 2792 ena_trc_err(ena_dev, "Memory address set failed\n"); 2793 return ret; 2794 } 2795 2796 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * 2797 sizeof(struct ena_admin_rss_ind_table_entry); 2798 2799 ret = ena_com_execute_admin_command(admin_queue, 2800 (struct ena_admin_aq_entry *)&cmd, 2801 sizeof(cmd), 2802 (struct ena_admin_acq_entry *)&resp, 2803 sizeof(resp)); 2804 2805 if (unlikely(ret)) 2806 ena_trc_err(ena_dev, "Failed to set indirect table. error: %d\n", ret); 2807 2808 return ret; 2809 } 2810 2811 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) 2812 { 2813 struct ena_rss *rss = &ena_dev->rss; 2814 struct ena_admin_get_feat_resp get_resp; 2815 u32 tbl_size; 2816 int i, rc; 2817 2818 tbl_size = (1ULL << rss->tbl_log_size) * 2819 sizeof(struct ena_admin_rss_ind_table_entry); 2820 2821 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2822 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 2823 rss->rss_ind_tbl_dma_addr, 2824 tbl_size, 0); 2825 if (unlikely(rc)) 2826 return rc; 2827 2828 if (!ind_tbl) 2829 return 0; 2830 2831 for (i = 0; i < (1 << rss->tbl_log_size); i++) 2832 ind_tbl[i] = rss->host_rss_ind_tbl[i]; 2833 2834 return 0; 2835 } 2836 2837 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) 2838 { 2839 int rc; 2840 2841 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2842 2843 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); 2844 if (unlikely(rc)) 2845 goto err_indr_tbl; 2846 2847 /* The following function might return unsupported in case the 2848 * device doesn't support setting the key / hash function. We can safely 2849 * ignore this error and have indirection table support only. 2850 */ 2851 rc = ena_com_hash_key_allocate(ena_dev); 2852 if (likely(!rc)) 2853 ena_com_hash_key_fill_default_key(ena_dev); 2854 else if (rc != ENA_COM_UNSUPPORTED) 2855 goto err_hash_key; 2856 2857 rc = ena_com_hash_ctrl_init(ena_dev); 2858 if (unlikely(rc)) 2859 goto err_hash_ctrl; 2860 2861 return 0; 2862 2863 err_hash_ctrl: 2864 ena_com_hash_key_destroy(ena_dev); 2865 err_hash_key: 2866 ena_com_indirect_table_destroy(ena_dev); 2867 err_indr_tbl: 2868 2869 return rc; 2870 } 2871 2872 void ena_com_rss_destroy(struct ena_com_dev *ena_dev) 2873 { 2874 ena_com_indirect_table_destroy(ena_dev); 2875 ena_com_hash_key_destroy(ena_dev); 2876 ena_com_hash_ctrl_destroy(ena_dev); 2877 2878 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2879 } 2880 2881 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) 2882 { 2883 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2884 2885 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 2886 SZ_4K, 2887 host_attr->host_info, 2888 host_attr->host_info_dma_addr, 2889 host_attr->host_info_dma_handle); 2890 if (unlikely(!host_attr->host_info)) 2891 return ENA_COM_NO_MEM; 2892 2893 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << 2894 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | 2895 (ENA_COMMON_SPEC_VERSION_MINOR)); 2896 2897 return 0; 2898 } 2899 2900 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, 2901 u32 debug_area_size) 2902 { 2903 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2904 2905 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, 2906 debug_area_size, 2907 host_attr->debug_area_virt_addr, 2908 host_attr->debug_area_dma_addr, 2909 host_attr->debug_area_dma_handle); 2910 if (unlikely(!host_attr->debug_area_virt_addr)) { 2911 host_attr->debug_area_size = 0; 2912 return ENA_COM_NO_MEM; 2913 } 2914 2915 host_attr->debug_area_size = debug_area_size; 2916 2917 return 0; 2918 } 2919 2920 void ena_com_delete_host_info(struct ena_com_dev *ena_dev) 2921 { 2922 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2923 2924 if (host_attr->host_info) { 2925 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 2926 SZ_4K, 2927 host_attr->host_info, 2928 host_attr->host_info_dma_addr, 2929 host_attr->host_info_dma_handle); 2930 host_attr->host_info = NULL; 2931 } 2932 } 2933 2934 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) 2935 { 2936 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2937 2938 if (host_attr->debug_area_virt_addr) { 2939 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, 2940 host_attr->debug_area_size, 2941 host_attr->debug_area_virt_addr, 2942 host_attr->debug_area_dma_addr, 2943 host_attr->debug_area_dma_handle); 2944 host_attr->debug_area_virt_addr = NULL; 2945 } 2946 } 2947 2948 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) 2949 { 2950 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2951 struct ena_com_admin_queue *admin_queue; 2952 struct ena_admin_set_feat_cmd cmd; 2953 struct ena_admin_set_feat_resp resp; 2954 2955 int ret; 2956 2957 /* Host attribute config is called before ena_com_get_dev_attr_feat 2958 * so ena_com can't check if the feature is supported. 2959 */ 2960 2961 memset(&cmd, 0x0, sizeof(cmd)); 2962 admin_queue = &ena_dev->admin_queue; 2963 2964 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2965 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; 2966 2967 ret = ena_com_mem_addr_set(ena_dev, 2968 &cmd.u.host_attr.debug_ba, 2969 host_attr->debug_area_dma_addr); 2970 if (unlikely(ret)) { 2971 ena_trc_err(ena_dev, "Memory address set failed\n"); 2972 return ret; 2973 } 2974 2975 ret = ena_com_mem_addr_set(ena_dev, 2976 &cmd.u.host_attr.os_info_ba, 2977 host_attr->host_info_dma_addr); 2978 if (unlikely(ret)) { 2979 ena_trc_err(ena_dev, "Memory address set failed\n"); 2980 return ret; 2981 } 2982 2983 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; 2984 2985 ret = ena_com_execute_admin_command(admin_queue, 2986 (struct ena_admin_aq_entry *)&cmd, 2987 sizeof(cmd), 2988 (struct ena_admin_acq_entry *)&resp, 2989 sizeof(resp)); 2990 2991 if (unlikely(ret)) 2992 ena_trc_err(ena_dev, "Failed to set host attributes: %d\n", ret); 2993 2994 return ret; 2995 } 2996 2997 /* Interrupt moderation */ 2998 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) 2999 { 3000 return ena_com_check_supported_feature_id(ena_dev, 3001 ENA_ADMIN_INTERRUPT_MODERATION); 3002 } 3003 3004 static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev, 3005 u32 coalesce_usecs, 3006 u32 intr_delay_resolution, 3007 u32 *intr_moder_interval) 3008 { 3009 if (!intr_delay_resolution) { 3010 ena_trc_err(ena_dev, "Illegal interrupt delay granularity value\n"); 3011 return ENA_COM_FAULT; 3012 } 3013 3014 *intr_moder_interval = coalesce_usecs / intr_delay_resolution; 3015 3016 return 0; 3017 } 3018 3019 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, 3020 u32 tx_coalesce_usecs) 3021 { 3022 return ena_com_update_nonadaptive_moderation_interval(ena_dev, 3023 tx_coalesce_usecs, 3024 ena_dev->intr_delay_resolution, 3025 &ena_dev->intr_moder_tx_interval); 3026 } 3027 3028 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, 3029 u32 rx_coalesce_usecs) 3030 { 3031 return ena_com_update_nonadaptive_moderation_interval(ena_dev, 3032 rx_coalesce_usecs, 3033 ena_dev->intr_delay_resolution, 3034 &ena_dev->intr_moder_rx_interval); 3035 } 3036 3037 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) 3038 { 3039 struct ena_admin_get_feat_resp get_resp; 3040 u16 delay_resolution; 3041 int rc; 3042 3043 rc = ena_com_get_feature(ena_dev, &get_resp, 3044 ENA_ADMIN_INTERRUPT_MODERATION, 0); 3045 3046 if (rc) { 3047 if (rc == ENA_COM_UNSUPPORTED) { 3048 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", 3049 ENA_ADMIN_INTERRUPT_MODERATION); 3050 rc = 0; 3051 } else { 3052 ena_trc_err(ena_dev, 3053 "Failed to get interrupt moderation admin cmd. rc: %d\n", rc); 3054 } 3055 3056 /* no moderation supported, disable adaptive support */ 3057 ena_com_disable_adaptive_moderation(ena_dev); 3058 return rc; 3059 } 3060 3061 /* if moderation is supported by device we set adaptive moderation */ 3062 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; 3063 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); 3064 3065 /* Disable adaptive moderation by default - can be enabled later */ 3066 ena_com_disable_adaptive_moderation(ena_dev); 3067 3068 return 0; 3069 } 3070 3071 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) 3072 { 3073 return ena_dev->intr_moder_tx_interval; 3074 } 3075 3076 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) 3077 { 3078 return ena_dev->intr_moder_rx_interval; 3079 } 3080 3081 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, 3082 struct ena_admin_feature_llq_desc *llq_features, 3083 struct ena_llq_configurations *llq_default_cfg) 3084 { 3085 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 3086 int rc; 3087 3088 if (!llq_features->max_llq_num) { 3089 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3090 return 0; 3091 } 3092 3093 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); 3094 if (rc) 3095 return rc; 3096 3097 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size - 3098 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); 3099 3100 if (unlikely(ena_dev->tx_max_header_size == 0)) { 3101 ena_trc_err(ena_dev, "The size of the LLQ entry is smaller than needed\n"); 3102 return ENA_COM_INVAL; 3103 } 3104 3105 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; 3106 3107 return 0; 3108 } 3109