1 /* 2 * Copyright 2015 Amazon.com, Inc. or its affiliates. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include "ena_com.h" 34 35 /*****************************************************************************/ 36 /*****************************************************************************/ 37 38 /* Timeout in micro-sec */ 39 #define ADMIN_CMD_TIMEOUT_US (3000000) 40 41 #define ENA_ASYNC_QUEUE_DEPTH 16 42 #define ENA_ADMIN_QUEUE_DEPTH 32 43 44 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ 45 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ 46 | (ENA_COMMON_SPEC_VERSION_MINOR)) 47 48 #define ENA_CTRL_MAJOR 0 49 #define ENA_CTRL_MINOR 0 50 #define ENA_CTRL_SUB_MINOR 1 51 52 #define MIN_ENA_CTRL_VER \ 53 (((ENA_CTRL_MAJOR) << \ 54 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ 55 ((ENA_CTRL_MINOR) << \ 56 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ 57 (ENA_CTRL_SUB_MINOR)) 58 59 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) 60 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) 61 62 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF 63 64 /*****************************************************************************/ 65 /*****************************************************************************/ 66 /*****************************************************************************/ 67 68 enum ena_cmd_status { 69 ENA_CMD_SUBMITTED, 70 ENA_CMD_COMPLETED, 71 /* Abort - canceled by the driver */ 72 ENA_CMD_ABORTED, 73 }; 74 75 struct ena_comp_ctx { 76 struct completion wait_event; 77 struct ena_admin_acq_entry *user_cqe; 78 u32 comp_size; 79 enum ena_cmd_status status; 80 /* status from the device */ 81 u8 comp_status; 82 u8 cmd_opcode; 83 bool occupied; 84 }; 85 86 struct ena_com_stats_ctx { 87 struct ena_admin_aq_get_stats_cmd get_cmd; 88 struct ena_admin_acq_get_stats_resp get_resp; 89 }; 90 91 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, 92 struct ena_common_mem_addr *ena_addr, 93 dma_addr_t addr) 94 { 95 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { 96 pr_err("dma address has more bits that the device supports\n"); 97 return -EINVAL; 98 } 99 100 ena_addr->mem_addr_low = (u32)addr; 101 ena_addr->mem_addr_high = (u64)addr >> 32; 102 103 return 0; 104 } 105 106 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) 107 { 108 struct ena_com_admin_sq *sq = &queue->sq; 109 u16 size = ADMIN_SQ_SIZE(queue->q_depth); 110 111 sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, 112 GFP_KERNEL); 113 114 if (!sq->entries) { 115 pr_err("memory allocation failed"); 116 return -ENOMEM; 117 } 118 119 sq->head = 0; 120 sq->tail = 0; 121 sq->phase = 1; 122 123 sq->db_addr = NULL; 124 125 return 0; 126 } 127 128 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) 129 { 130 struct ena_com_admin_cq *cq = &queue->cq; 131 u16 size = ADMIN_CQ_SIZE(queue->q_depth); 132 133 cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, 134 GFP_KERNEL); 135 136 if (!cq->entries) { 137 pr_err("memory allocation failed"); 138 return -ENOMEM; 139 } 140 141 cq->head = 0; 142 cq->phase = 1; 143 144 return 0; 145 } 146 147 static int ena_com_admin_init_aenq(struct ena_com_dev *dev, 148 struct ena_aenq_handlers *aenq_handlers) 149 { 150 struct ena_com_aenq *aenq = &dev->aenq; 151 u32 addr_low, addr_high, aenq_caps; 152 u16 size; 153 154 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 155 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 156 aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, 157 GFP_KERNEL); 158 159 if (!aenq->entries) { 160 pr_err("memory allocation failed"); 161 return -ENOMEM; 162 } 163 164 aenq->head = aenq->q_depth; 165 aenq->phase = 1; 166 167 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); 168 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); 169 170 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); 171 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); 172 173 aenq_caps = 0; 174 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; 175 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) 176 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & 177 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; 178 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); 179 180 if (unlikely(!aenq_handlers)) { 181 pr_err("aenq handlers pointer is NULL\n"); 182 return -EINVAL; 183 } 184 185 aenq->aenq_handlers = aenq_handlers; 186 187 return 0; 188 } 189 190 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue, 191 struct ena_comp_ctx *comp_ctx) 192 { 193 comp_ctx->occupied = false; 194 atomic_dec(&queue->outstanding_cmds); 195 } 196 197 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, 198 u16 command_id, bool capture) 199 { 200 if (unlikely(command_id >= queue->q_depth)) { 201 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", 202 command_id, queue->q_depth); 203 return NULL; 204 } 205 206 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { 207 pr_err("Completion context is occupied\n"); 208 return NULL; 209 } 210 211 if (capture) { 212 atomic_inc(&queue->outstanding_cmds); 213 queue->comp_ctx[command_id].occupied = true; 214 } 215 216 return &queue->comp_ctx[command_id]; 217 } 218 219 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 220 struct ena_admin_aq_entry *cmd, 221 size_t cmd_size_in_bytes, 222 struct ena_admin_acq_entry *comp, 223 size_t comp_size_in_bytes) 224 { 225 struct ena_comp_ctx *comp_ctx; 226 u16 tail_masked, cmd_id; 227 u16 queue_size_mask; 228 u16 cnt; 229 230 queue_size_mask = admin_queue->q_depth - 1; 231 232 tail_masked = admin_queue->sq.tail & queue_size_mask; 233 234 /* In case of queue FULL */ 235 cnt = admin_queue->sq.tail - admin_queue->sq.head; 236 if (cnt >= admin_queue->q_depth) { 237 pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n", 238 admin_queue->sq.tail, admin_queue->sq.head, 239 admin_queue->q_depth); 240 admin_queue->stats.out_of_space++; 241 return ERR_PTR(-ENOSPC); 242 } 243 244 cmd_id = admin_queue->curr_cmd_id; 245 246 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & 247 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; 248 249 cmd->aq_common_descriptor.command_id |= cmd_id & 250 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; 251 252 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); 253 if (unlikely(!comp_ctx)) 254 return ERR_PTR(-EINVAL); 255 256 comp_ctx->status = ENA_CMD_SUBMITTED; 257 comp_ctx->comp_size = (u32)comp_size_in_bytes; 258 comp_ctx->user_cqe = comp; 259 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; 260 261 reinit_completion(&comp_ctx->wait_event); 262 263 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); 264 265 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & 266 queue_size_mask; 267 268 admin_queue->sq.tail++; 269 admin_queue->stats.submitted_cmd++; 270 271 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) 272 admin_queue->sq.phase = !admin_queue->sq.phase; 273 274 writel(admin_queue->sq.tail, admin_queue->sq.db_addr); 275 276 return comp_ctx; 277 } 278 279 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) 280 { 281 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); 282 struct ena_comp_ctx *comp_ctx; 283 u16 i; 284 285 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL); 286 if (unlikely(!queue->comp_ctx)) { 287 pr_err("memory allocation failed"); 288 return -ENOMEM; 289 } 290 291 for (i = 0; i < queue->q_depth; i++) { 292 comp_ctx = get_comp_ctxt(queue, i, false); 293 if (comp_ctx) 294 init_completion(&comp_ctx->wait_event); 295 } 296 297 return 0; 298 } 299 300 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 301 struct ena_admin_aq_entry *cmd, 302 size_t cmd_size_in_bytes, 303 struct ena_admin_acq_entry *comp, 304 size_t comp_size_in_bytes) 305 { 306 unsigned long flags; 307 struct ena_comp_ctx *comp_ctx; 308 309 spin_lock_irqsave(&admin_queue->q_lock, flags); 310 if (unlikely(!admin_queue->running_state)) { 311 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 312 return ERR_PTR(-ENODEV); 313 } 314 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, 315 cmd_size_in_bytes, 316 comp, 317 comp_size_in_bytes); 318 if (unlikely(IS_ERR(comp_ctx))) 319 admin_queue->running_state = false; 320 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 321 322 return comp_ctx; 323 } 324 325 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, 326 struct ena_com_create_io_ctx *ctx, 327 struct ena_com_io_sq *io_sq) 328 { 329 size_t size; 330 int dev_node = 0; 331 332 memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); 333 334 io_sq->desc_entry_size = 335 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 336 sizeof(struct ena_eth_io_tx_desc) : 337 sizeof(struct ena_eth_io_rx_desc); 338 339 size = io_sq->desc_entry_size * io_sq->q_depth; 340 341 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 342 dev_node = dev_to_node(ena_dev->dmadev); 343 set_dev_node(ena_dev->dmadev, ctx->numa_node); 344 io_sq->desc_addr.virt_addr = 345 dma_zalloc_coherent(ena_dev->dmadev, size, 346 &io_sq->desc_addr.phys_addr, 347 GFP_KERNEL); 348 set_dev_node(ena_dev->dmadev, dev_node); 349 if (!io_sq->desc_addr.virt_addr) { 350 io_sq->desc_addr.virt_addr = 351 dma_zalloc_coherent(ena_dev->dmadev, size, 352 &io_sq->desc_addr.phys_addr, 353 GFP_KERNEL); 354 } 355 } else { 356 dev_node = dev_to_node(ena_dev->dmadev); 357 set_dev_node(ena_dev->dmadev, ctx->numa_node); 358 io_sq->desc_addr.virt_addr = 359 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); 360 set_dev_node(ena_dev->dmadev, dev_node); 361 if (!io_sq->desc_addr.virt_addr) { 362 io_sq->desc_addr.virt_addr = 363 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); 364 } 365 } 366 367 if (!io_sq->desc_addr.virt_addr) { 368 pr_err("memory allocation failed"); 369 return -ENOMEM; 370 } 371 372 io_sq->tail = 0; 373 io_sq->next_to_comp = 0; 374 io_sq->phase = 1; 375 376 return 0; 377 } 378 379 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, 380 struct ena_com_create_io_ctx *ctx, 381 struct ena_com_io_cq *io_cq) 382 { 383 size_t size; 384 int prev_node = 0; 385 386 memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); 387 388 /* Use the basic completion descriptor for Rx */ 389 io_cq->cdesc_entry_size_in_bytes = 390 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 391 sizeof(struct ena_eth_io_tx_cdesc) : 392 sizeof(struct ena_eth_io_rx_cdesc_base); 393 394 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 395 396 prev_node = dev_to_node(ena_dev->dmadev); 397 set_dev_node(ena_dev->dmadev, ctx->numa_node); 398 io_cq->cdesc_addr.virt_addr = 399 dma_zalloc_coherent(ena_dev->dmadev, size, 400 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); 401 set_dev_node(ena_dev->dmadev, prev_node); 402 if (!io_cq->cdesc_addr.virt_addr) { 403 io_cq->cdesc_addr.virt_addr = 404 dma_zalloc_coherent(ena_dev->dmadev, size, 405 &io_cq->cdesc_addr.phys_addr, 406 GFP_KERNEL); 407 } 408 409 if (!io_cq->cdesc_addr.virt_addr) { 410 pr_err("memory allocation failed"); 411 return -ENOMEM; 412 } 413 414 io_cq->phase = 1; 415 io_cq->head = 0; 416 417 return 0; 418 } 419 420 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, 421 struct ena_admin_acq_entry *cqe) 422 { 423 struct ena_comp_ctx *comp_ctx; 424 u16 cmd_id; 425 426 cmd_id = cqe->acq_common_descriptor.command & 427 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; 428 429 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); 430 if (unlikely(!comp_ctx)) { 431 pr_err("comp_ctx is NULL. Changing the admin queue running state\n"); 432 admin_queue->running_state = false; 433 return; 434 } 435 436 comp_ctx->status = ENA_CMD_COMPLETED; 437 comp_ctx->comp_status = cqe->acq_common_descriptor.status; 438 439 if (comp_ctx->user_cqe) 440 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); 441 442 if (!admin_queue->polling) 443 complete(&comp_ctx->wait_event); 444 } 445 446 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) 447 { 448 struct ena_admin_acq_entry *cqe = NULL; 449 u16 comp_num = 0; 450 u16 head_masked; 451 u8 phase; 452 453 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); 454 phase = admin_queue->cq.phase; 455 456 cqe = &admin_queue->cq.entries[head_masked]; 457 458 /* Go over all the completions */ 459 while ((cqe->acq_common_descriptor.flags & 460 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { 461 /* Do not read the rest of the completion entry before the 462 * phase bit was validated 463 */ 464 rmb(); 465 ena_com_handle_single_admin_completion(admin_queue, cqe); 466 467 head_masked++; 468 comp_num++; 469 if (unlikely(head_masked == admin_queue->q_depth)) { 470 head_masked = 0; 471 phase = !phase; 472 } 473 474 cqe = &admin_queue->cq.entries[head_masked]; 475 } 476 477 admin_queue->cq.head += comp_num; 478 admin_queue->cq.phase = phase; 479 admin_queue->sq.head += comp_num; 480 admin_queue->stats.completed_cmd += comp_num; 481 } 482 483 static int ena_com_comp_status_to_errno(u8 comp_status) 484 { 485 if (unlikely(comp_status != 0)) 486 pr_err("admin command failed[%u]\n", comp_status); 487 488 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) 489 return -EINVAL; 490 491 switch (comp_status) { 492 case ENA_ADMIN_SUCCESS: 493 return 0; 494 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: 495 return -ENOMEM; 496 case ENA_ADMIN_UNSUPPORTED_OPCODE: 497 return -EPERM; 498 case ENA_ADMIN_BAD_OPCODE: 499 case ENA_ADMIN_MALFORMED_REQUEST: 500 case ENA_ADMIN_ILLEGAL_PARAMETER: 501 case ENA_ADMIN_UNKNOWN_ERROR: 502 return -EINVAL; 503 } 504 505 return 0; 506 } 507 508 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, 509 struct ena_com_admin_queue *admin_queue) 510 { 511 unsigned long flags; 512 u32 start_time; 513 int ret; 514 515 start_time = ((u32)jiffies_to_usecs(jiffies)); 516 517 while (comp_ctx->status == ENA_CMD_SUBMITTED) { 518 if ((((u32)jiffies_to_usecs(jiffies)) - start_time) > 519 ADMIN_CMD_TIMEOUT_US) { 520 pr_err("Wait for completion (polling) timeout\n"); 521 /* ENA didn't have any completion */ 522 spin_lock_irqsave(&admin_queue->q_lock, flags); 523 admin_queue->stats.no_completion++; 524 admin_queue->running_state = false; 525 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 526 527 ret = -ETIME; 528 goto err; 529 } 530 531 spin_lock_irqsave(&admin_queue->q_lock, flags); 532 ena_com_handle_admin_completion(admin_queue); 533 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 534 535 msleep(100); 536 } 537 538 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { 539 pr_err("Command was aborted\n"); 540 spin_lock_irqsave(&admin_queue->q_lock, flags); 541 admin_queue->stats.aborted_cmd++; 542 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 543 ret = -ENODEV; 544 goto err; 545 } 546 547 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", 548 comp_ctx->status); 549 550 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 551 err: 552 comp_ctxt_release(admin_queue, comp_ctx); 553 return ret; 554 } 555 556 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, 557 struct ena_com_admin_queue *admin_queue) 558 { 559 unsigned long flags; 560 int ret; 561 562 wait_for_completion_timeout(&comp_ctx->wait_event, 563 usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US)); 564 565 /* In case the command wasn't completed find out the root cause. 566 * There might be 2 kinds of errors 567 * 1) No completion (timeout reached) 568 * 2) There is completion but the device didn't get any msi-x interrupt. 569 */ 570 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { 571 spin_lock_irqsave(&admin_queue->q_lock, flags); 572 ena_com_handle_admin_completion(admin_queue); 573 admin_queue->stats.no_completion++; 574 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 575 576 if (comp_ctx->status == ENA_CMD_COMPLETED) 577 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n", 578 comp_ctx->cmd_opcode); 579 else 580 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n", 581 comp_ctx->cmd_opcode, comp_ctx->status); 582 583 admin_queue->running_state = false; 584 ret = -ETIME; 585 goto err; 586 } 587 588 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 589 err: 590 comp_ctxt_release(admin_queue, comp_ctx); 591 return ret; 592 } 593 594 /* This method read the hardware device register through posting writes 595 * and waiting for response 596 * On timeout the function will return ENA_MMIO_READ_TIMEOUT 597 */ 598 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) 599 { 600 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 601 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = 602 mmio_read->read_resp; 603 u32 mmio_read_reg, ret; 604 unsigned long flags; 605 int i; 606 607 might_sleep(); 608 609 /* If readless is disabled, perform regular read */ 610 if (!mmio_read->readless_supported) 611 return readl(ena_dev->reg_bar + offset); 612 613 spin_lock_irqsave(&mmio_read->lock, flags); 614 mmio_read->seq_num++; 615 616 read_resp->req_id = mmio_read->seq_num + 0xDEAD; 617 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & 618 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; 619 mmio_read_reg |= mmio_read->seq_num & 620 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; 621 622 /* make sure read_resp->req_id get updated before the hw can write 623 * there 624 */ 625 wmb(); 626 627 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); 628 629 for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) { 630 if (read_resp->req_id == mmio_read->seq_num) 631 break; 632 633 udelay(1); 634 } 635 636 if (unlikely(i == ENA_REG_READ_TIMEOUT)) { 637 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", 638 mmio_read->seq_num, offset, read_resp->req_id, 639 read_resp->reg_off); 640 ret = ENA_MMIO_READ_TIMEOUT; 641 goto err; 642 } 643 644 if (read_resp->reg_off != offset) { 645 pr_err("Read failure: wrong offset provided"); 646 ret = ENA_MMIO_READ_TIMEOUT; 647 } else { 648 ret = read_resp->reg_val; 649 } 650 err: 651 spin_unlock_irqrestore(&mmio_read->lock, flags); 652 653 return ret; 654 } 655 656 /* There are two types to wait for completion. 657 * Polling mode - wait until the completion is available. 658 * Async mode - wait on wait queue until the completion is ready 659 * (or the timeout expired). 660 * It is expected that the IRQ called ena_com_handle_admin_completion 661 * to mark the completions. 662 */ 663 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, 664 struct ena_com_admin_queue *admin_queue) 665 { 666 if (admin_queue->polling) 667 return ena_com_wait_and_process_admin_cq_polling(comp_ctx, 668 admin_queue); 669 670 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, 671 admin_queue); 672 } 673 674 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, 675 struct ena_com_io_sq *io_sq) 676 { 677 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 678 struct ena_admin_aq_destroy_sq_cmd destroy_cmd; 679 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; 680 u8 direction; 681 int ret; 682 683 memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd)); 684 685 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 686 direction = ENA_ADMIN_SQ_DIRECTION_TX; 687 else 688 direction = ENA_ADMIN_SQ_DIRECTION_RX; 689 690 destroy_cmd.sq.sq_identity |= (direction << 691 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & 692 ENA_ADMIN_SQ_SQ_DIRECTION_MASK; 693 694 destroy_cmd.sq.sq_idx = io_sq->idx; 695 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; 696 697 ret = ena_com_execute_admin_command(admin_queue, 698 (struct ena_admin_aq_entry *)&destroy_cmd, 699 sizeof(destroy_cmd), 700 (struct ena_admin_acq_entry *)&destroy_resp, 701 sizeof(destroy_resp)); 702 703 if (unlikely(ret && (ret != -ENODEV))) 704 pr_err("failed to destroy io sq error: %d\n", ret); 705 706 return ret; 707 } 708 709 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, 710 struct ena_com_io_sq *io_sq, 711 struct ena_com_io_cq *io_cq) 712 { 713 size_t size; 714 715 if (io_cq->cdesc_addr.virt_addr) { 716 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 717 718 dma_free_coherent(ena_dev->dmadev, size, 719 io_cq->cdesc_addr.virt_addr, 720 io_cq->cdesc_addr.phys_addr); 721 722 io_cq->cdesc_addr.virt_addr = NULL; 723 } 724 725 if (io_sq->desc_addr.virt_addr) { 726 size = io_sq->desc_entry_size * io_sq->q_depth; 727 728 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) 729 dma_free_coherent(ena_dev->dmadev, size, 730 io_sq->desc_addr.virt_addr, 731 io_sq->desc_addr.phys_addr); 732 else 733 devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr); 734 735 io_sq->desc_addr.virt_addr = NULL; 736 } 737 } 738 739 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, 740 u16 exp_state) 741 { 742 u32 val, i; 743 744 for (i = 0; i < timeout; i++) { 745 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 746 747 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { 748 pr_err("Reg read timeout occurred\n"); 749 return -ETIME; 750 } 751 752 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == 753 exp_state) 754 return 0; 755 756 /* The resolution of the timeout is 100ms */ 757 msleep(100); 758 } 759 760 return -ETIME; 761 } 762 763 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, 764 enum ena_admin_aq_feature_id feature_id) 765 { 766 u32 feature_mask = 1 << feature_id; 767 768 /* Device attributes is always supported */ 769 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && 770 !(ena_dev->supported_features & feature_mask)) 771 return false; 772 773 return true; 774 } 775 776 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, 777 struct ena_admin_get_feat_resp *get_resp, 778 enum ena_admin_aq_feature_id feature_id, 779 dma_addr_t control_buf_dma_addr, 780 u32 control_buff_size) 781 { 782 struct ena_com_admin_queue *admin_queue; 783 struct ena_admin_get_feat_cmd get_cmd; 784 int ret; 785 786 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { 787 pr_debug("Feature %d isn't supported\n", feature_id); 788 return -EPERM; 789 } 790 791 memset(&get_cmd, 0x0, sizeof(get_cmd)); 792 admin_queue = &ena_dev->admin_queue; 793 794 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; 795 796 if (control_buff_size) 797 get_cmd.aq_common_descriptor.flags = 798 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 799 else 800 get_cmd.aq_common_descriptor.flags = 0; 801 802 ret = ena_com_mem_addr_set(ena_dev, 803 &get_cmd.control_buffer.address, 804 control_buf_dma_addr); 805 if (unlikely(ret)) { 806 pr_err("memory address set failed\n"); 807 return ret; 808 } 809 810 get_cmd.control_buffer.length = control_buff_size; 811 812 get_cmd.feat_common.feature_id = feature_id; 813 814 ret = ena_com_execute_admin_command(admin_queue, 815 (struct ena_admin_aq_entry *) 816 &get_cmd, 817 sizeof(get_cmd), 818 (struct ena_admin_acq_entry *) 819 get_resp, 820 sizeof(*get_resp)); 821 822 if (unlikely(ret)) 823 pr_err("Failed to submit get_feature command %d error: %d\n", 824 feature_id, ret); 825 826 return ret; 827 } 828 829 static int ena_com_get_feature(struct ena_com_dev *ena_dev, 830 struct ena_admin_get_feat_resp *get_resp, 831 enum ena_admin_aq_feature_id feature_id) 832 { 833 return ena_com_get_feature_ex(ena_dev, 834 get_resp, 835 feature_id, 836 0, 837 0); 838 } 839 840 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) 841 { 842 struct ena_rss *rss = &ena_dev->rss; 843 844 rss->hash_key = 845 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 846 &rss->hash_key_dma_addr, GFP_KERNEL); 847 848 if (unlikely(!rss->hash_key)) 849 return -ENOMEM; 850 851 return 0; 852 } 853 854 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) 855 { 856 struct ena_rss *rss = &ena_dev->rss; 857 858 if (rss->hash_key) 859 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 860 rss->hash_key, rss->hash_key_dma_addr); 861 rss->hash_key = NULL; 862 } 863 864 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) 865 { 866 struct ena_rss *rss = &ena_dev->rss; 867 868 rss->hash_ctrl = 869 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 870 &rss->hash_ctrl_dma_addr, GFP_KERNEL); 871 872 if (unlikely(!rss->hash_ctrl)) 873 return -ENOMEM; 874 875 return 0; 876 } 877 878 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) 879 { 880 struct ena_rss *rss = &ena_dev->rss; 881 882 if (rss->hash_ctrl) 883 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 884 rss->hash_ctrl, rss->hash_ctrl_dma_addr); 885 rss->hash_ctrl = NULL; 886 } 887 888 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, 889 u16 log_size) 890 { 891 struct ena_rss *rss = &ena_dev->rss; 892 struct ena_admin_get_feat_resp get_resp; 893 size_t tbl_size; 894 int ret; 895 896 ret = ena_com_get_feature(ena_dev, &get_resp, 897 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); 898 if (unlikely(ret)) 899 return ret; 900 901 if ((get_resp.u.ind_table.min_size > log_size) || 902 (get_resp.u.ind_table.max_size < log_size)) { 903 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", 904 1 << log_size, 1 << get_resp.u.ind_table.min_size, 905 1 << get_resp.u.ind_table.max_size); 906 return -EINVAL; 907 } 908 909 tbl_size = (1ULL << log_size) * 910 sizeof(struct ena_admin_rss_ind_table_entry); 911 912 rss->rss_ind_tbl = 913 dma_zalloc_coherent(ena_dev->dmadev, tbl_size, 914 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); 915 if (unlikely(!rss->rss_ind_tbl)) 916 goto mem_err1; 917 918 tbl_size = (1ULL << log_size) * sizeof(u16); 919 rss->host_rss_ind_tbl = 920 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); 921 if (unlikely(!rss->host_rss_ind_tbl)) 922 goto mem_err2; 923 924 rss->tbl_log_size = log_size; 925 926 return 0; 927 928 mem_err2: 929 tbl_size = (1ULL << log_size) * 930 sizeof(struct ena_admin_rss_ind_table_entry); 931 932 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, 933 rss->rss_ind_tbl_dma_addr); 934 rss->rss_ind_tbl = NULL; 935 mem_err1: 936 rss->tbl_log_size = 0; 937 return -ENOMEM; 938 } 939 940 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) 941 { 942 struct ena_rss *rss = &ena_dev->rss; 943 size_t tbl_size = (1ULL << rss->tbl_log_size) * 944 sizeof(struct ena_admin_rss_ind_table_entry); 945 946 if (rss->rss_ind_tbl) 947 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, 948 rss->rss_ind_tbl_dma_addr); 949 rss->rss_ind_tbl = NULL; 950 951 if (rss->host_rss_ind_tbl) 952 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl); 953 rss->host_rss_ind_tbl = NULL; 954 } 955 956 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, 957 struct ena_com_io_sq *io_sq, u16 cq_idx) 958 { 959 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 960 struct ena_admin_aq_create_sq_cmd create_cmd; 961 struct ena_admin_acq_create_sq_resp_desc cmd_completion; 962 u8 direction; 963 int ret; 964 965 memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd)); 966 967 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; 968 969 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 970 direction = ENA_ADMIN_SQ_DIRECTION_TX; 971 else 972 direction = ENA_ADMIN_SQ_DIRECTION_RX; 973 974 create_cmd.sq_identity |= (direction << 975 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & 976 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; 977 978 create_cmd.sq_caps_2 |= io_sq->mem_queue_type & 979 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; 980 981 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << 982 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & 983 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; 984 985 create_cmd.sq_caps_3 |= 986 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; 987 988 create_cmd.cq_idx = cq_idx; 989 create_cmd.sq_depth = io_sq->q_depth; 990 991 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 992 ret = ena_com_mem_addr_set(ena_dev, 993 &create_cmd.sq_ba, 994 io_sq->desc_addr.phys_addr); 995 if (unlikely(ret)) { 996 pr_err("memory address set failed\n"); 997 return ret; 998 } 999 } 1000 1001 ret = ena_com_execute_admin_command(admin_queue, 1002 (struct ena_admin_aq_entry *)&create_cmd, 1003 sizeof(create_cmd), 1004 (struct ena_admin_acq_entry *)&cmd_completion, 1005 sizeof(cmd_completion)); 1006 if (unlikely(ret)) { 1007 pr_err("Failed to create IO SQ. error: %d\n", ret); 1008 return ret; 1009 } 1010 1011 io_sq->idx = cmd_completion.sq_idx; 1012 1013 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1014 (uintptr_t)cmd_completion.sq_doorbell_offset); 1015 1016 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1017 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar 1018 + cmd_completion.llq_headers_offset); 1019 1020 io_sq->desc_addr.pbuf_dev_addr = 1021 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + 1022 cmd_completion.llq_descriptors_offset); 1023 } 1024 1025 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); 1026 1027 return ret; 1028 } 1029 1030 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) 1031 { 1032 struct ena_rss *rss = &ena_dev->rss; 1033 struct ena_com_io_sq *io_sq; 1034 u16 qid; 1035 int i; 1036 1037 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1038 qid = rss->host_rss_ind_tbl[i]; 1039 if (qid >= ENA_TOTAL_NUM_QUEUES) 1040 return -EINVAL; 1041 1042 io_sq = &ena_dev->io_sq_queues[qid]; 1043 1044 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) 1045 return -EINVAL; 1046 1047 rss->rss_ind_tbl[i].cq_idx = io_sq->idx; 1048 } 1049 1050 return 0; 1051 } 1052 1053 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) 1054 { 1055 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; 1056 struct ena_rss *rss = &ena_dev->rss; 1057 u8 idx; 1058 u16 i; 1059 1060 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) 1061 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; 1062 1063 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1064 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) 1065 return -EINVAL; 1066 idx = (u8)rss->rss_ind_tbl[i].cq_idx; 1067 1068 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) 1069 return -EINVAL; 1070 1071 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; 1072 } 1073 1074 return 0; 1075 } 1076 1077 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) 1078 { 1079 size_t size; 1080 1081 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS; 1082 1083 ena_dev->intr_moder_tbl = 1084 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); 1085 if (!ena_dev->intr_moder_tbl) 1086 return -ENOMEM; 1087 1088 ena_com_config_default_interrupt_moderation_table(ena_dev); 1089 1090 return 0; 1091 } 1092 1093 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, 1094 u16 intr_delay_resolution) 1095 { 1096 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 1097 unsigned int i; 1098 1099 if (!intr_delay_resolution) { 1100 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); 1101 intr_delay_resolution = 1; 1102 } 1103 ena_dev->intr_delay_resolution = intr_delay_resolution; 1104 1105 /* update Rx */ 1106 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++) 1107 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution; 1108 1109 /* update Tx */ 1110 ena_dev->intr_moder_tx_interval /= intr_delay_resolution; 1111 } 1112 1113 /*****************************************************************************/ 1114 /******************************* API ******************************/ 1115 /*****************************************************************************/ 1116 1117 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, 1118 struct ena_admin_aq_entry *cmd, 1119 size_t cmd_size, 1120 struct ena_admin_acq_entry *comp, 1121 size_t comp_size) 1122 { 1123 struct ena_comp_ctx *comp_ctx; 1124 int ret; 1125 1126 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, 1127 comp, comp_size); 1128 if (unlikely(IS_ERR(comp_ctx))) { 1129 if (comp_ctx == ERR_PTR(-ENODEV)) 1130 pr_debug("Failed to submit command [%ld]\n", 1131 PTR_ERR(comp_ctx)); 1132 else 1133 pr_err("Failed to submit command [%ld]\n", 1134 PTR_ERR(comp_ctx)); 1135 1136 return PTR_ERR(comp_ctx); 1137 } 1138 1139 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); 1140 if (unlikely(ret)) { 1141 if (admin_queue->running_state) 1142 pr_err("Failed to process command. ret = %d\n", ret); 1143 else 1144 pr_debug("Failed to process command. ret = %d\n", ret); 1145 } 1146 return ret; 1147 } 1148 1149 int ena_com_create_io_cq(struct ena_com_dev *ena_dev, 1150 struct ena_com_io_cq *io_cq) 1151 { 1152 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1153 struct ena_admin_aq_create_cq_cmd create_cmd; 1154 struct ena_admin_acq_create_cq_resp_desc cmd_completion; 1155 int ret; 1156 1157 memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd)); 1158 1159 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; 1160 1161 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & 1162 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; 1163 create_cmd.cq_caps_1 |= 1164 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; 1165 1166 create_cmd.msix_vector = io_cq->msix_vector; 1167 create_cmd.cq_depth = io_cq->q_depth; 1168 1169 ret = ena_com_mem_addr_set(ena_dev, 1170 &create_cmd.cq_ba, 1171 io_cq->cdesc_addr.phys_addr); 1172 if (unlikely(ret)) { 1173 pr_err("memory address set failed\n"); 1174 return ret; 1175 } 1176 1177 ret = ena_com_execute_admin_command(admin_queue, 1178 (struct ena_admin_aq_entry *)&create_cmd, 1179 sizeof(create_cmd), 1180 (struct ena_admin_acq_entry *)&cmd_completion, 1181 sizeof(cmd_completion)); 1182 if (unlikely(ret)) { 1183 pr_err("Failed to create IO CQ. error: %d\n", ret); 1184 return ret; 1185 } 1186 1187 io_cq->idx = cmd_completion.cq_idx; 1188 1189 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1190 cmd_completion.cq_interrupt_unmask_register_offset); 1191 1192 if (cmd_completion.cq_head_db_register_offset) 1193 io_cq->cq_head_db_reg = 1194 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1195 cmd_completion.cq_head_db_register_offset); 1196 1197 if (cmd_completion.numa_node_register_offset) 1198 io_cq->numa_node_cfg_reg = 1199 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1200 cmd_completion.numa_node_register_offset); 1201 1202 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); 1203 1204 return ret; 1205 } 1206 1207 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, 1208 struct ena_com_io_sq **io_sq, 1209 struct ena_com_io_cq **io_cq) 1210 { 1211 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1212 pr_err("Invalid queue number %d but the max is %d\n", qid, 1213 ENA_TOTAL_NUM_QUEUES); 1214 return -EINVAL; 1215 } 1216 1217 *io_sq = &ena_dev->io_sq_queues[qid]; 1218 *io_cq = &ena_dev->io_cq_queues[qid]; 1219 1220 return 0; 1221 } 1222 1223 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) 1224 { 1225 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1226 struct ena_comp_ctx *comp_ctx; 1227 u16 i; 1228 1229 if (!admin_queue->comp_ctx) 1230 return; 1231 1232 for (i = 0; i < admin_queue->q_depth; i++) { 1233 comp_ctx = get_comp_ctxt(admin_queue, i, false); 1234 if (unlikely(!comp_ctx)) 1235 break; 1236 1237 comp_ctx->status = ENA_CMD_ABORTED; 1238 1239 complete(&comp_ctx->wait_event); 1240 } 1241 } 1242 1243 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) 1244 { 1245 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1246 unsigned long flags; 1247 1248 spin_lock_irqsave(&admin_queue->q_lock, flags); 1249 while (atomic_read(&admin_queue->outstanding_cmds) != 0) { 1250 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 1251 msleep(20); 1252 spin_lock_irqsave(&admin_queue->q_lock, flags); 1253 } 1254 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 1255 } 1256 1257 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, 1258 struct ena_com_io_cq *io_cq) 1259 { 1260 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1261 struct ena_admin_aq_destroy_cq_cmd destroy_cmd; 1262 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; 1263 int ret; 1264 1265 memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd)); 1266 1267 destroy_cmd.cq_idx = io_cq->idx; 1268 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; 1269 1270 ret = ena_com_execute_admin_command(admin_queue, 1271 (struct ena_admin_aq_entry *)&destroy_cmd, 1272 sizeof(destroy_cmd), 1273 (struct ena_admin_acq_entry *)&destroy_resp, 1274 sizeof(destroy_resp)); 1275 1276 if (unlikely(ret && (ret != -ENODEV))) 1277 pr_err("Failed to destroy IO CQ. error: %d\n", ret); 1278 1279 return ret; 1280 } 1281 1282 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) 1283 { 1284 return ena_dev->admin_queue.running_state; 1285 } 1286 1287 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) 1288 { 1289 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1290 unsigned long flags; 1291 1292 spin_lock_irqsave(&admin_queue->q_lock, flags); 1293 ena_dev->admin_queue.running_state = state; 1294 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 1295 } 1296 1297 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) 1298 { 1299 u16 depth = ena_dev->aenq.q_depth; 1300 1301 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); 1302 1303 /* Init head_db to mark that all entries in the queue 1304 * are initially available 1305 */ 1306 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 1307 } 1308 1309 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) 1310 { 1311 struct ena_com_admin_queue *admin_queue; 1312 struct ena_admin_set_feat_cmd cmd; 1313 struct ena_admin_set_feat_resp resp; 1314 struct ena_admin_get_feat_resp get_resp; 1315 int ret; 1316 1317 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG); 1318 if (ret) { 1319 pr_info("Can't get aenq configuration\n"); 1320 return ret; 1321 } 1322 1323 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { 1324 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", 1325 get_resp.u.aenq.supported_groups, groups_flag); 1326 return -EPERM; 1327 } 1328 1329 memset(&cmd, 0x0, sizeof(cmd)); 1330 admin_queue = &ena_dev->admin_queue; 1331 1332 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 1333 cmd.aq_common_descriptor.flags = 0; 1334 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; 1335 cmd.u.aenq.enabled_groups = groups_flag; 1336 1337 ret = ena_com_execute_admin_command(admin_queue, 1338 (struct ena_admin_aq_entry *)&cmd, 1339 sizeof(cmd), 1340 (struct ena_admin_acq_entry *)&resp, 1341 sizeof(resp)); 1342 1343 if (unlikely(ret)) 1344 pr_err("Failed to config AENQ ret: %d\n", ret); 1345 1346 return ret; 1347 } 1348 1349 int ena_com_get_dma_width(struct ena_com_dev *ena_dev) 1350 { 1351 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 1352 int width; 1353 1354 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { 1355 pr_err("Reg read timeout occurred\n"); 1356 return -ETIME; 1357 } 1358 1359 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> 1360 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; 1361 1362 pr_debug("ENA dma width: %d\n", width); 1363 1364 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { 1365 pr_err("DMA width illegal value: %d\n", width); 1366 return -EINVAL; 1367 } 1368 1369 ena_dev->dma_addr_bits = width; 1370 1371 return width; 1372 } 1373 1374 int ena_com_validate_version(struct ena_com_dev *ena_dev) 1375 { 1376 u32 ver; 1377 u32 ctrl_ver; 1378 u32 ctrl_ver_masked; 1379 1380 /* Make sure the ENA version and the controller version are at least 1381 * as the driver expects 1382 */ 1383 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); 1384 ctrl_ver = ena_com_reg_bar_read32(ena_dev, 1385 ENA_REGS_CONTROLLER_VERSION_OFF); 1386 1387 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || 1388 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { 1389 pr_err("Reg read timeout occurred\n"); 1390 return -ETIME; 1391 } 1392 1393 pr_info("ena device version: %d.%d\n", 1394 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> 1395 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, 1396 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); 1397 1398 if (ver < MIN_ENA_VER) { 1399 pr_err("ENA version is lower than the minimal version the driver supports\n"); 1400 return -1; 1401 } 1402 1403 pr_info("ena controller version: %d.%d.%d implementation version %d\n", 1404 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> 1405 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, 1406 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> 1407 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, 1408 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), 1409 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> 1410 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); 1411 1412 ctrl_ver_masked = 1413 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | 1414 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | 1415 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); 1416 1417 /* Validate the ctrl version without the implementation ID */ 1418 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { 1419 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); 1420 return -1; 1421 } 1422 1423 return 0; 1424 } 1425 1426 void ena_com_admin_destroy(struct ena_com_dev *ena_dev) 1427 { 1428 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1429 struct ena_com_admin_cq *cq = &admin_queue->cq; 1430 struct ena_com_admin_sq *sq = &admin_queue->sq; 1431 struct ena_com_aenq *aenq = &ena_dev->aenq; 1432 u16 size; 1433 1434 if (admin_queue->comp_ctx) 1435 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx); 1436 admin_queue->comp_ctx = NULL; 1437 size = ADMIN_SQ_SIZE(admin_queue->q_depth); 1438 if (sq->entries) 1439 dma_free_coherent(ena_dev->dmadev, size, sq->entries, 1440 sq->dma_addr); 1441 sq->entries = NULL; 1442 1443 size = ADMIN_CQ_SIZE(admin_queue->q_depth); 1444 if (cq->entries) 1445 dma_free_coherent(ena_dev->dmadev, size, cq->entries, 1446 cq->dma_addr); 1447 cq->entries = NULL; 1448 1449 size = ADMIN_AENQ_SIZE(aenq->q_depth); 1450 if (ena_dev->aenq.entries) 1451 dma_free_coherent(ena_dev->dmadev, size, aenq->entries, 1452 aenq->dma_addr); 1453 aenq->entries = NULL; 1454 } 1455 1456 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) 1457 { 1458 ena_dev->admin_queue.polling = polling; 1459 } 1460 1461 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) 1462 { 1463 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1464 1465 spin_lock_init(&mmio_read->lock); 1466 mmio_read->read_resp = 1467 dma_zalloc_coherent(ena_dev->dmadev, 1468 sizeof(*mmio_read->read_resp), 1469 &mmio_read->read_resp_dma_addr, GFP_KERNEL); 1470 if (unlikely(!mmio_read->read_resp)) 1471 return -ENOMEM; 1472 1473 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 1474 1475 mmio_read->read_resp->req_id = 0x0; 1476 mmio_read->seq_num = 0x0; 1477 mmio_read->readless_supported = true; 1478 1479 return 0; 1480 } 1481 1482 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) 1483 { 1484 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1485 1486 mmio_read->readless_supported = readless_supported; 1487 } 1488 1489 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) 1490 { 1491 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1492 1493 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1494 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1495 1496 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), 1497 mmio_read->read_resp, mmio_read->read_resp_dma_addr); 1498 1499 mmio_read->read_resp = NULL; 1500 } 1501 1502 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) 1503 { 1504 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1505 u32 addr_low, addr_high; 1506 1507 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); 1508 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); 1509 1510 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1511 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1512 } 1513 1514 int ena_com_admin_init(struct ena_com_dev *ena_dev, 1515 struct ena_aenq_handlers *aenq_handlers, 1516 bool init_spinlock) 1517 { 1518 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1519 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; 1520 int ret; 1521 1522 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 1523 1524 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { 1525 pr_err("Reg read timeout occurred\n"); 1526 return -ETIME; 1527 } 1528 1529 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { 1530 pr_err("Device isn't ready, abort com init\n"); 1531 return -ENODEV; 1532 } 1533 1534 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; 1535 1536 admin_queue->q_dmadev = ena_dev->dmadev; 1537 admin_queue->polling = false; 1538 admin_queue->curr_cmd_id = 0; 1539 1540 atomic_set(&admin_queue->outstanding_cmds, 0); 1541 1542 if (init_spinlock) 1543 spin_lock_init(&admin_queue->q_lock); 1544 1545 ret = ena_com_init_comp_ctxt(admin_queue); 1546 if (ret) 1547 goto error; 1548 1549 ret = ena_com_admin_init_sq(admin_queue); 1550 if (ret) 1551 goto error; 1552 1553 ret = ena_com_admin_init_cq(admin_queue); 1554 if (ret) 1555 goto error; 1556 1557 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1558 ENA_REGS_AQ_DB_OFF); 1559 1560 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); 1561 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); 1562 1563 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); 1564 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); 1565 1566 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); 1567 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); 1568 1569 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); 1570 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); 1571 1572 aq_caps = 0; 1573 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; 1574 aq_caps |= (sizeof(struct ena_admin_aq_entry) << 1575 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & 1576 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; 1577 1578 acq_caps = 0; 1579 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; 1580 acq_caps |= (sizeof(struct ena_admin_acq_entry) << 1581 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & 1582 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; 1583 1584 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); 1585 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); 1586 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); 1587 if (ret) 1588 goto error; 1589 1590 admin_queue->running_state = true; 1591 1592 return 0; 1593 error: 1594 ena_com_admin_destroy(ena_dev); 1595 1596 return ret; 1597 } 1598 1599 int ena_com_create_io_queue(struct ena_com_dev *ena_dev, 1600 struct ena_com_create_io_ctx *ctx) 1601 { 1602 struct ena_com_io_sq *io_sq; 1603 struct ena_com_io_cq *io_cq; 1604 int ret; 1605 1606 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { 1607 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", 1608 ctx->qid, ENA_TOTAL_NUM_QUEUES); 1609 return -EINVAL; 1610 } 1611 1612 io_sq = &ena_dev->io_sq_queues[ctx->qid]; 1613 io_cq = &ena_dev->io_cq_queues[ctx->qid]; 1614 1615 memset(io_sq, 0x0, sizeof(struct ena_com_io_sq)); 1616 memset(io_cq, 0x0, sizeof(struct ena_com_io_cq)); 1617 1618 /* Init CQ */ 1619 io_cq->q_depth = ctx->queue_size; 1620 io_cq->direction = ctx->direction; 1621 io_cq->qid = ctx->qid; 1622 1623 io_cq->msix_vector = ctx->msix_vector; 1624 1625 io_sq->q_depth = ctx->queue_size; 1626 io_sq->direction = ctx->direction; 1627 io_sq->qid = ctx->qid; 1628 1629 io_sq->mem_queue_type = ctx->mem_queue_type; 1630 1631 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1632 /* header length is limited to 8 bits */ 1633 io_sq->tx_max_header_size = 1634 min_t(u32, ena_dev->tx_max_header_size, SZ_256); 1635 1636 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); 1637 if (ret) 1638 goto error; 1639 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); 1640 if (ret) 1641 goto error; 1642 1643 ret = ena_com_create_io_cq(ena_dev, io_cq); 1644 if (ret) 1645 goto error; 1646 1647 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); 1648 if (ret) 1649 goto destroy_io_cq; 1650 1651 return 0; 1652 1653 destroy_io_cq: 1654 ena_com_destroy_io_cq(ena_dev, io_cq); 1655 error: 1656 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1657 return ret; 1658 } 1659 1660 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) 1661 { 1662 struct ena_com_io_sq *io_sq; 1663 struct ena_com_io_cq *io_cq; 1664 1665 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1666 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid, 1667 ENA_TOTAL_NUM_QUEUES); 1668 return; 1669 } 1670 1671 io_sq = &ena_dev->io_sq_queues[qid]; 1672 io_cq = &ena_dev->io_cq_queues[qid]; 1673 1674 ena_com_destroy_io_sq(ena_dev, io_sq); 1675 ena_com_destroy_io_cq(ena_dev, io_cq); 1676 1677 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1678 } 1679 1680 int ena_com_get_link_params(struct ena_com_dev *ena_dev, 1681 struct ena_admin_get_feat_resp *resp) 1682 { 1683 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG); 1684 } 1685 1686 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, 1687 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1688 { 1689 struct ena_admin_get_feat_resp get_resp; 1690 int rc; 1691 1692 rc = ena_com_get_feature(ena_dev, &get_resp, 1693 ENA_ADMIN_DEVICE_ATTRIBUTES); 1694 if (rc) 1695 return rc; 1696 1697 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, 1698 sizeof(get_resp.u.dev_attr)); 1699 ena_dev->supported_features = get_resp.u.dev_attr.supported_features; 1700 1701 rc = ena_com_get_feature(ena_dev, &get_resp, 1702 ENA_ADMIN_MAX_QUEUES_NUM); 1703 if (rc) 1704 return rc; 1705 1706 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, 1707 sizeof(get_resp.u.max_queue)); 1708 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size; 1709 1710 rc = ena_com_get_feature(ena_dev, &get_resp, 1711 ENA_ADMIN_AENQ_CONFIG); 1712 if (rc) 1713 return rc; 1714 1715 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, 1716 sizeof(get_resp.u.aenq)); 1717 1718 rc = ena_com_get_feature(ena_dev, &get_resp, 1719 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); 1720 if (rc) 1721 return rc; 1722 1723 memcpy(&get_feat_ctx->offload, &get_resp.u.offload, 1724 sizeof(get_resp.u.offload)); 1725 1726 return 0; 1727 } 1728 1729 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) 1730 { 1731 ena_com_handle_admin_completion(&ena_dev->admin_queue); 1732 } 1733 1734 /* ena_handle_specific_aenq_event: 1735 * return the handler that is relevant to the specific event group 1736 */ 1737 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, 1738 u16 group) 1739 { 1740 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers; 1741 1742 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) 1743 return aenq_handlers->handlers[group]; 1744 1745 return aenq_handlers->unimplemented_handler; 1746 } 1747 1748 /* ena_aenq_intr_handler: 1749 * handles the aenq incoming events. 1750 * pop events from the queue and apply the specific handler 1751 */ 1752 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) 1753 { 1754 struct ena_admin_aenq_entry *aenq_e; 1755 struct ena_admin_aenq_common_desc *aenq_common; 1756 struct ena_com_aenq *aenq = &dev->aenq; 1757 ena_aenq_handler handler_cb; 1758 u16 masked_head, processed = 0; 1759 u8 phase; 1760 1761 masked_head = aenq->head & (aenq->q_depth - 1); 1762 phase = aenq->phase; 1763 aenq_e = &aenq->entries[masked_head]; /* Get first entry */ 1764 aenq_common = &aenq_e->aenq_common_desc; 1765 1766 /* Go over all the events */ 1767 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == 1768 phase) { 1769 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", 1770 aenq_common->group, aenq_common->syndrom, 1771 (u64)aenq_common->timestamp_low + 1772 ((u64)aenq_common->timestamp_high << 32)); 1773 1774 /* Handle specific event*/ 1775 handler_cb = ena_com_get_specific_aenq_cb(dev, 1776 aenq_common->group); 1777 handler_cb(data, aenq_e); /* call the actual event handler*/ 1778 1779 /* Get next event entry */ 1780 masked_head++; 1781 processed++; 1782 1783 if (unlikely(masked_head == aenq->q_depth)) { 1784 masked_head = 0; 1785 phase = !phase; 1786 } 1787 aenq_e = &aenq->entries[masked_head]; 1788 aenq_common = &aenq_e->aenq_common_desc; 1789 } 1790 1791 aenq->head += processed; 1792 aenq->phase = phase; 1793 1794 /* Don't update aenq doorbell if there weren't any processed events */ 1795 if (!processed) 1796 return; 1797 1798 /* write the aenq doorbell after all AENQ descriptors were read */ 1799 mb(); 1800 writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 1801 } 1802 1803 int ena_com_dev_reset(struct ena_com_dev *ena_dev) 1804 { 1805 u32 stat, timeout, cap, reset_val; 1806 int rc; 1807 1808 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 1809 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 1810 1811 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || 1812 (cap == ENA_MMIO_READ_TIMEOUT))) { 1813 pr_err("Reg read32 timeout occurred\n"); 1814 return -ETIME; 1815 } 1816 1817 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { 1818 pr_err("Device isn't ready, can't reset device\n"); 1819 return -EINVAL; 1820 } 1821 1822 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> 1823 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; 1824 if (timeout == 0) { 1825 pr_err("Invalid timeout value\n"); 1826 return -EINVAL; 1827 } 1828 1829 /* start reset */ 1830 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; 1831 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 1832 1833 /* Write again the MMIO read request address */ 1834 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 1835 1836 rc = wait_for_reset_state(ena_dev, timeout, 1837 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); 1838 if (rc != 0) { 1839 pr_err("Reset indication didn't turn on\n"); 1840 return rc; 1841 } 1842 1843 /* reset done */ 1844 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 1845 rc = wait_for_reset_state(ena_dev, timeout, 0); 1846 if (rc != 0) { 1847 pr_err("Reset indication didn't turn off\n"); 1848 return rc; 1849 } 1850 1851 return 0; 1852 } 1853 1854 static int ena_get_dev_stats(struct ena_com_dev *ena_dev, 1855 struct ena_com_stats_ctx *ctx, 1856 enum ena_admin_get_stats_type type) 1857 { 1858 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; 1859 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; 1860 struct ena_com_admin_queue *admin_queue; 1861 int ret; 1862 1863 admin_queue = &ena_dev->admin_queue; 1864 1865 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; 1866 get_cmd->aq_common_descriptor.flags = 0; 1867 get_cmd->type = type; 1868 1869 ret = ena_com_execute_admin_command(admin_queue, 1870 (struct ena_admin_aq_entry *)get_cmd, 1871 sizeof(*get_cmd), 1872 (struct ena_admin_acq_entry *)get_resp, 1873 sizeof(*get_resp)); 1874 1875 if (unlikely(ret)) 1876 pr_err("Failed to get stats. error: %d\n", ret); 1877 1878 return ret; 1879 } 1880 1881 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, 1882 struct ena_admin_basic_stats *stats) 1883 { 1884 struct ena_com_stats_ctx ctx; 1885 int ret; 1886 1887 memset(&ctx, 0x0, sizeof(ctx)); 1888 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); 1889 if (likely(ret == 0)) 1890 memcpy(stats, &ctx.get_resp.basic_stats, 1891 sizeof(ctx.get_resp.basic_stats)); 1892 1893 return ret; 1894 } 1895 1896 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) 1897 { 1898 struct ena_com_admin_queue *admin_queue; 1899 struct ena_admin_set_feat_cmd cmd; 1900 struct ena_admin_set_feat_resp resp; 1901 int ret; 1902 1903 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { 1904 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU); 1905 return -EPERM; 1906 } 1907 1908 memset(&cmd, 0x0, sizeof(cmd)); 1909 admin_queue = &ena_dev->admin_queue; 1910 1911 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 1912 cmd.aq_common_descriptor.flags = 0; 1913 cmd.feat_common.feature_id = ENA_ADMIN_MTU; 1914 cmd.u.mtu.mtu = mtu; 1915 1916 ret = ena_com_execute_admin_command(admin_queue, 1917 (struct ena_admin_aq_entry *)&cmd, 1918 sizeof(cmd), 1919 (struct ena_admin_acq_entry *)&resp, 1920 sizeof(resp)); 1921 1922 if (unlikely(ret)) 1923 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret); 1924 1925 return ret; 1926 } 1927 1928 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, 1929 struct ena_admin_feature_offload_desc *offload) 1930 { 1931 int ret; 1932 struct ena_admin_get_feat_resp resp; 1933 1934 ret = ena_com_get_feature(ena_dev, &resp, 1935 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); 1936 if (unlikely(ret)) { 1937 pr_err("Failed to get offload capabilities %d\n", ret); 1938 return ret; 1939 } 1940 1941 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); 1942 1943 return 0; 1944 } 1945 1946 int ena_com_set_hash_function(struct ena_com_dev *ena_dev) 1947 { 1948 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1949 struct ena_rss *rss = &ena_dev->rss; 1950 struct ena_admin_set_feat_cmd cmd; 1951 struct ena_admin_set_feat_resp resp; 1952 struct ena_admin_get_feat_resp get_resp; 1953 int ret; 1954 1955 if (!ena_com_check_supported_feature_id(ena_dev, 1956 ENA_ADMIN_RSS_HASH_FUNCTION)) { 1957 pr_debug("Feature %d isn't supported\n", 1958 ENA_ADMIN_RSS_HASH_FUNCTION); 1959 return -EPERM; 1960 } 1961 1962 /* Validate hash function is supported */ 1963 ret = ena_com_get_feature(ena_dev, &get_resp, 1964 ENA_ADMIN_RSS_HASH_FUNCTION); 1965 if (unlikely(ret)) 1966 return ret; 1967 1968 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) { 1969 pr_err("Func hash %d isn't supported by device, abort\n", 1970 rss->hash_func); 1971 return -EPERM; 1972 } 1973 1974 memset(&cmd, 0x0, sizeof(cmd)); 1975 1976 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 1977 cmd.aq_common_descriptor.flags = 1978 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 1979 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; 1980 cmd.u.flow_hash_func.init_val = rss->hash_init_val; 1981 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; 1982 1983 ret = ena_com_mem_addr_set(ena_dev, 1984 &cmd.control_buffer.address, 1985 rss->hash_key_dma_addr); 1986 if (unlikely(ret)) { 1987 pr_err("memory address set failed\n"); 1988 return ret; 1989 } 1990 1991 cmd.control_buffer.length = sizeof(*rss->hash_key); 1992 1993 ret = ena_com_execute_admin_command(admin_queue, 1994 (struct ena_admin_aq_entry *)&cmd, 1995 sizeof(cmd), 1996 (struct ena_admin_acq_entry *)&resp, 1997 sizeof(resp)); 1998 if (unlikely(ret)) { 1999 pr_err("Failed to set hash function %d. error: %d\n", 2000 rss->hash_func, ret); 2001 return -EINVAL; 2002 } 2003 2004 return 0; 2005 } 2006 2007 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, 2008 enum ena_admin_hash_functions func, 2009 const u8 *key, u16 key_len, u32 init_val) 2010 { 2011 struct ena_rss *rss = &ena_dev->rss; 2012 struct ena_admin_get_feat_resp get_resp; 2013 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2014 rss->hash_key; 2015 int rc; 2016 2017 /* Make sure size is a mult of DWs */ 2018 if (unlikely(key_len & 0x3)) 2019 return -EINVAL; 2020 2021 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2022 ENA_ADMIN_RSS_HASH_FUNCTION, 2023 rss->hash_key_dma_addr, 2024 sizeof(*rss->hash_key)); 2025 if (unlikely(rc)) 2026 return rc; 2027 2028 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { 2029 pr_err("Flow hash function %d isn't supported\n", func); 2030 return -EPERM; 2031 } 2032 2033 switch (func) { 2034 case ENA_ADMIN_TOEPLITZ: 2035 if (key_len > sizeof(hash_key->key)) { 2036 pr_err("key len (%hu) is bigger than the max supported (%zu)\n", 2037 key_len, sizeof(hash_key->key)); 2038 return -EINVAL; 2039 } 2040 2041 memcpy(hash_key->key, key, key_len); 2042 rss->hash_init_val = init_val; 2043 hash_key->keys_num = key_len >> 2; 2044 break; 2045 case ENA_ADMIN_CRC32: 2046 rss->hash_init_val = init_val; 2047 break; 2048 default: 2049 pr_err("Invalid hash function (%d)\n", func); 2050 return -EINVAL; 2051 } 2052 2053 rc = ena_com_set_hash_function(ena_dev); 2054 2055 /* Restore the old function */ 2056 if (unlikely(rc)) 2057 ena_com_get_hash_function(ena_dev, NULL, NULL); 2058 2059 return rc; 2060 } 2061 2062 int ena_com_get_hash_function(struct ena_com_dev *ena_dev, 2063 enum ena_admin_hash_functions *func, 2064 u8 *key) 2065 { 2066 struct ena_rss *rss = &ena_dev->rss; 2067 struct ena_admin_get_feat_resp get_resp; 2068 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2069 rss->hash_key; 2070 int rc; 2071 2072 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2073 ENA_ADMIN_RSS_HASH_FUNCTION, 2074 rss->hash_key_dma_addr, 2075 sizeof(*rss->hash_key)); 2076 if (unlikely(rc)) 2077 return rc; 2078 2079 rss->hash_func = get_resp.u.flow_hash_func.selected_func; 2080 if (func) 2081 *func = rss->hash_func; 2082 2083 if (key) 2084 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); 2085 2086 return 0; 2087 } 2088 2089 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, 2090 enum ena_admin_flow_hash_proto proto, 2091 u16 *fields) 2092 { 2093 struct ena_rss *rss = &ena_dev->rss; 2094 struct ena_admin_get_feat_resp get_resp; 2095 int rc; 2096 2097 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2098 ENA_ADMIN_RSS_HASH_INPUT, 2099 rss->hash_ctrl_dma_addr, 2100 sizeof(*rss->hash_ctrl)); 2101 if (unlikely(rc)) 2102 return rc; 2103 2104 if (fields) 2105 *fields = rss->hash_ctrl->selected_fields[proto].fields; 2106 2107 return 0; 2108 } 2109 2110 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) 2111 { 2112 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2113 struct ena_rss *rss = &ena_dev->rss; 2114 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2115 struct ena_admin_set_feat_cmd cmd; 2116 struct ena_admin_set_feat_resp resp; 2117 int ret; 2118 2119 if (!ena_com_check_supported_feature_id(ena_dev, 2120 ENA_ADMIN_RSS_HASH_INPUT)) { 2121 pr_debug("Feature %d isn't supported\n", 2122 ENA_ADMIN_RSS_HASH_INPUT); 2123 return -EPERM; 2124 } 2125 2126 memset(&cmd, 0x0, sizeof(cmd)); 2127 2128 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2129 cmd.aq_common_descriptor.flags = 2130 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2131 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; 2132 cmd.u.flow_hash_input.enabled_input_sort = 2133 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | 2134 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; 2135 2136 ret = ena_com_mem_addr_set(ena_dev, 2137 &cmd.control_buffer.address, 2138 rss->hash_ctrl_dma_addr); 2139 if (unlikely(ret)) { 2140 pr_err("memory address set failed\n"); 2141 return ret; 2142 } 2143 cmd.control_buffer.length = sizeof(*hash_ctrl); 2144 2145 ret = ena_com_execute_admin_command(admin_queue, 2146 (struct ena_admin_aq_entry *)&cmd, 2147 sizeof(cmd), 2148 (struct ena_admin_acq_entry *)&resp, 2149 sizeof(resp)); 2150 if (unlikely(ret)) 2151 pr_err("Failed to set hash input. error: %d\n", ret); 2152 2153 return ret; 2154 } 2155 2156 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) 2157 { 2158 struct ena_rss *rss = &ena_dev->rss; 2159 struct ena_admin_feature_rss_hash_control *hash_ctrl = 2160 rss->hash_ctrl; 2161 u16 available_fields = 0; 2162 int rc, i; 2163 2164 /* Get the supported hash input */ 2165 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2166 if (unlikely(rc)) 2167 return rc; 2168 2169 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = 2170 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2171 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2172 2173 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = 2174 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2175 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2176 2177 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = 2178 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2179 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2180 2181 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = 2182 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2183 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2184 2185 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = 2186 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2187 2188 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = 2189 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2190 2191 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = 2192 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2193 2194 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = 2195 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; 2196 2197 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { 2198 available_fields = hash_ctrl->selected_fields[i].fields & 2199 hash_ctrl->supported_fields[i].fields; 2200 if (available_fields != hash_ctrl->selected_fields[i].fields) { 2201 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", 2202 i, hash_ctrl->supported_fields[i].fields, 2203 hash_ctrl->selected_fields[i].fields); 2204 return -EPERM; 2205 } 2206 } 2207 2208 rc = ena_com_set_hash_ctrl(ena_dev); 2209 2210 /* In case of failure, restore the old hash ctrl */ 2211 if (unlikely(rc)) 2212 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2213 2214 return rc; 2215 } 2216 2217 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, 2218 enum ena_admin_flow_hash_proto proto, 2219 u16 hash_fields) 2220 { 2221 struct ena_rss *rss = &ena_dev->rss; 2222 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2223 u16 supported_fields; 2224 int rc; 2225 2226 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { 2227 pr_err("Invalid proto num (%u)\n", proto); 2228 return -EINVAL; 2229 } 2230 2231 /* Get the ctrl table */ 2232 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); 2233 if (unlikely(rc)) 2234 return rc; 2235 2236 /* Make sure all the fields are supported */ 2237 supported_fields = hash_ctrl->supported_fields[proto].fields; 2238 if ((hash_fields & supported_fields) != hash_fields) { 2239 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n", 2240 proto, hash_fields, supported_fields); 2241 } 2242 2243 hash_ctrl->selected_fields[proto].fields = hash_fields; 2244 2245 rc = ena_com_set_hash_ctrl(ena_dev); 2246 2247 /* In case of failure, restore the old hash ctrl */ 2248 if (unlikely(rc)) 2249 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2250 2251 return 0; 2252 } 2253 2254 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, 2255 u16 entry_idx, u16 entry_value) 2256 { 2257 struct ena_rss *rss = &ena_dev->rss; 2258 2259 if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) 2260 return -EINVAL; 2261 2262 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) 2263 return -EINVAL; 2264 2265 rss->host_rss_ind_tbl[entry_idx] = entry_value; 2266 2267 return 0; 2268 } 2269 2270 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) 2271 { 2272 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2273 struct ena_rss *rss = &ena_dev->rss; 2274 struct ena_admin_set_feat_cmd cmd; 2275 struct ena_admin_set_feat_resp resp; 2276 int ret; 2277 2278 if (!ena_com_check_supported_feature_id( 2279 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { 2280 pr_debug("Feature %d isn't supported\n", 2281 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); 2282 return -EPERM; 2283 } 2284 2285 ret = ena_com_ind_tbl_convert_to_device(ena_dev); 2286 if (ret) { 2287 pr_err("Failed to convert host indirection table to device table\n"); 2288 return ret; 2289 } 2290 2291 memset(&cmd, 0x0, sizeof(cmd)); 2292 2293 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2294 cmd.aq_common_descriptor.flags = 2295 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2296 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG; 2297 cmd.u.ind_table.size = rss->tbl_log_size; 2298 cmd.u.ind_table.inline_index = 0xFFFFFFFF; 2299 2300 ret = ena_com_mem_addr_set(ena_dev, 2301 &cmd.control_buffer.address, 2302 rss->rss_ind_tbl_dma_addr); 2303 if (unlikely(ret)) { 2304 pr_err("memory address set failed\n"); 2305 return ret; 2306 } 2307 2308 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * 2309 sizeof(struct ena_admin_rss_ind_table_entry); 2310 2311 ret = ena_com_execute_admin_command(admin_queue, 2312 (struct ena_admin_aq_entry *)&cmd, 2313 sizeof(cmd), 2314 (struct ena_admin_acq_entry *)&resp, 2315 sizeof(resp)); 2316 2317 if (unlikely(ret)) 2318 pr_err("Failed to set indirect table. error: %d\n", ret); 2319 2320 return ret; 2321 } 2322 2323 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) 2324 { 2325 struct ena_rss *rss = &ena_dev->rss; 2326 struct ena_admin_get_feat_resp get_resp; 2327 u32 tbl_size; 2328 int i, rc; 2329 2330 tbl_size = (1ULL << rss->tbl_log_size) * 2331 sizeof(struct ena_admin_rss_ind_table_entry); 2332 2333 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2334 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 2335 rss->rss_ind_tbl_dma_addr, 2336 tbl_size); 2337 if (unlikely(rc)) 2338 return rc; 2339 2340 if (!ind_tbl) 2341 return 0; 2342 2343 rc = ena_com_ind_tbl_convert_from_device(ena_dev); 2344 if (unlikely(rc)) 2345 return rc; 2346 2347 for (i = 0; i < (1 << rss->tbl_log_size); i++) 2348 ind_tbl[i] = rss->host_rss_ind_tbl[i]; 2349 2350 return 0; 2351 } 2352 2353 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) 2354 { 2355 int rc; 2356 2357 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2358 2359 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); 2360 if (unlikely(rc)) 2361 goto err_indr_tbl; 2362 2363 rc = ena_com_hash_key_allocate(ena_dev); 2364 if (unlikely(rc)) 2365 goto err_hash_key; 2366 2367 rc = ena_com_hash_ctrl_init(ena_dev); 2368 if (unlikely(rc)) 2369 goto err_hash_ctrl; 2370 2371 return 0; 2372 2373 err_hash_ctrl: 2374 ena_com_hash_key_destroy(ena_dev); 2375 err_hash_key: 2376 ena_com_indirect_table_destroy(ena_dev); 2377 err_indr_tbl: 2378 2379 return rc; 2380 } 2381 2382 void ena_com_rss_destroy(struct ena_com_dev *ena_dev) 2383 { 2384 ena_com_indirect_table_destroy(ena_dev); 2385 ena_com_hash_key_destroy(ena_dev); 2386 ena_com_hash_ctrl_destroy(ena_dev); 2387 2388 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2389 } 2390 2391 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) 2392 { 2393 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2394 2395 host_attr->host_info = 2396 dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, 2397 &host_attr->host_info_dma_addr, GFP_KERNEL); 2398 if (unlikely(!host_attr->host_info)) 2399 return -ENOMEM; 2400 2401 return 0; 2402 } 2403 2404 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, 2405 u32 debug_area_size) 2406 { 2407 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2408 2409 host_attr->debug_area_virt_addr = 2410 dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, 2411 &host_attr->debug_area_dma_addr, GFP_KERNEL); 2412 if (unlikely(!host_attr->debug_area_virt_addr)) { 2413 host_attr->debug_area_size = 0; 2414 return -ENOMEM; 2415 } 2416 2417 host_attr->debug_area_size = debug_area_size; 2418 2419 return 0; 2420 } 2421 2422 void ena_com_delete_host_info(struct ena_com_dev *ena_dev) 2423 { 2424 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2425 2426 if (host_attr->host_info) { 2427 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info, 2428 host_attr->host_info_dma_addr); 2429 host_attr->host_info = NULL; 2430 } 2431 } 2432 2433 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) 2434 { 2435 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2436 2437 if (host_attr->debug_area_virt_addr) { 2438 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size, 2439 host_attr->debug_area_virt_addr, 2440 host_attr->debug_area_dma_addr); 2441 host_attr->debug_area_virt_addr = NULL; 2442 } 2443 } 2444 2445 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) 2446 { 2447 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2448 struct ena_com_admin_queue *admin_queue; 2449 struct ena_admin_set_feat_cmd cmd; 2450 struct ena_admin_set_feat_resp resp; 2451 2452 int ret; 2453 2454 /* Host attribute config is called before ena_com_get_dev_attr_feat 2455 * so ena_com can't check if the feature is supported. 2456 */ 2457 2458 memset(&cmd, 0x0, sizeof(cmd)); 2459 admin_queue = &ena_dev->admin_queue; 2460 2461 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2462 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; 2463 2464 ret = ena_com_mem_addr_set(ena_dev, 2465 &cmd.u.host_attr.debug_ba, 2466 host_attr->debug_area_dma_addr); 2467 if (unlikely(ret)) { 2468 pr_err("memory address set failed\n"); 2469 return ret; 2470 } 2471 2472 ret = ena_com_mem_addr_set(ena_dev, 2473 &cmd.u.host_attr.os_info_ba, 2474 host_attr->host_info_dma_addr); 2475 if (unlikely(ret)) { 2476 pr_err("memory address set failed\n"); 2477 return ret; 2478 } 2479 2480 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; 2481 2482 ret = ena_com_execute_admin_command(admin_queue, 2483 (struct ena_admin_aq_entry *)&cmd, 2484 sizeof(cmd), 2485 (struct ena_admin_acq_entry *)&resp, 2486 sizeof(resp)); 2487 2488 if (unlikely(ret)) 2489 pr_err("Failed to set host attributes: %d\n", ret); 2490 2491 return ret; 2492 } 2493 2494 /* Interrupt moderation */ 2495 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) 2496 { 2497 return ena_com_check_supported_feature_id(ena_dev, 2498 ENA_ADMIN_INTERRUPT_MODERATION); 2499 } 2500 2501 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, 2502 u32 tx_coalesce_usecs) 2503 { 2504 if (!ena_dev->intr_delay_resolution) { 2505 pr_err("Illegal interrupt delay granularity value\n"); 2506 return -EFAULT; 2507 } 2508 2509 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs / 2510 ena_dev->intr_delay_resolution; 2511 2512 return 0; 2513 } 2514 2515 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, 2516 u32 rx_coalesce_usecs) 2517 { 2518 if (!ena_dev->intr_delay_resolution) { 2519 pr_err("Illegal interrupt delay granularity value\n"); 2520 return -EFAULT; 2521 } 2522 2523 /* We use LOWEST entry of moderation table for storing 2524 * nonadaptive interrupt coalescing values 2525 */ 2526 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = 2527 rx_coalesce_usecs / ena_dev->intr_delay_resolution; 2528 2529 return 0; 2530 } 2531 2532 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev) 2533 { 2534 if (ena_dev->intr_moder_tbl) 2535 devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl); 2536 ena_dev->intr_moder_tbl = NULL; 2537 } 2538 2539 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) 2540 { 2541 struct ena_admin_get_feat_resp get_resp; 2542 u16 delay_resolution; 2543 int rc; 2544 2545 rc = ena_com_get_feature(ena_dev, &get_resp, 2546 ENA_ADMIN_INTERRUPT_MODERATION); 2547 2548 if (rc) { 2549 if (rc == -EPERM) { 2550 pr_debug("Feature %d isn't supported\n", 2551 ENA_ADMIN_INTERRUPT_MODERATION); 2552 rc = 0; 2553 } else { 2554 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n", 2555 rc); 2556 } 2557 2558 /* no moderation supported, disable adaptive support */ 2559 ena_com_disable_adaptive_moderation(ena_dev); 2560 return rc; 2561 } 2562 2563 rc = ena_com_init_interrupt_moderation_table(ena_dev); 2564 if (rc) 2565 goto err; 2566 2567 /* if moderation is supported by device we set adaptive moderation */ 2568 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; 2569 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); 2570 ena_com_enable_adaptive_moderation(ena_dev); 2571 2572 return 0; 2573 err: 2574 ena_com_destroy_interrupt_moderation(ena_dev); 2575 return rc; 2576 } 2577 2578 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) 2579 { 2580 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2581 2582 if (!intr_moder_tbl) 2583 return; 2584 2585 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = 2586 ENA_INTR_LOWEST_USECS; 2587 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval = 2588 ENA_INTR_LOWEST_PKTS; 2589 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval = 2590 ENA_INTR_LOWEST_BYTES; 2591 2592 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval = 2593 ENA_INTR_LOW_USECS; 2594 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval = 2595 ENA_INTR_LOW_PKTS; 2596 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval = 2597 ENA_INTR_LOW_BYTES; 2598 2599 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval = 2600 ENA_INTR_MID_USECS; 2601 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval = 2602 ENA_INTR_MID_PKTS; 2603 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval = 2604 ENA_INTR_MID_BYTES; 2605 2606 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval = 2607 ENA_INTR_HIGH_USECS; 2608 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval = 2609 ENA_INTR_HIGH_PKTS; 2610 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval = 2611 ENA_INTR_HIGH_BYTES; 2612 2613 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval = 2614 ENA_INTR_HIGHEST_USECS; 2615 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval = 2616 ENA_INTR_HIGHEST_PKTS; 2617 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval = 2618 ENA_INTR_HIGHEST_BYTES; 2619 } 2620 2621 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) 2622 { 2623 return ena_dev->intr_moder_tx_interval; 2624 } 2625 2626 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) 2627 { 2628 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2629 2630 if (intr_moder_tbl) 2631 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval; 2632 2633 return 0; 2634 } 2635 2636 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, 2637 enum ena_intr_moder_level level, 2638 struct ena_intr_moder_entry *entry) 2639 { 2640 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2641 2642 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) 2643 return; 2644 2645 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval; 2646 if (ena_dev->intr_delay_resolution) 2647 intr_moder_tbl[level].intr_moder_interval /= 2648 ena_dev->intr_delay_resolution; 2649 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval; 2650 2651 /* use hardcoded value until ethtool supports bytecount parameter */ 2652 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED) 2653 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; 2654 } 2655 2656 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, 2657 enum ena_intr_moder_level level, 2658 struct ena_intr_moder_entry *entry) 2659 { 2660 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2661 2662 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) 2663 return; 2664 2665 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval; 2666 if (ena_dev->intr_delay_resolution) 2667 entry->intr_moder_interval *= ena_dev->intr_delay_resolution; 2668 entry->pkts_per_interval = 2669 intr_moder_tbl[level].pkts_per_interval; 2670 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; 2671 } 2672