1 /* 2 * Copyright 2015 Amazon.com, Inc. or its affiliates. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include "ena_com.h" 34 35 /*****************************************************************************/ 36 /*****************************************************************************/ 37 38 /* Timeout in micro-sec */ 39 #define ADMIN_CMD_TIMEOUT_US (3000000) 40 41 #define ENA_ASYNC_QUEUE_DEPTH 16 42 #define ENA_ADMIN_QUEUE_DEPTH 32 43 44 45 #define ENA_CTRL_MAJOR 0 46 #define ENA_CTRL_MINOR 0 47 #define ENA_CTRL_SUB_MINOR 1 48 49 #define MIN_ENA_CTRL_VER \ 50 (((ENA_CTRL_MAJOR) << \ 51 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ 52 ((ENA_CTRL_MINOR) << \ 53 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ 54 (ENA_CTRL_SUB_MINOR)) 55 56 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) 57 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) 58 59 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF 60 61 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 62 63 #define ENA_REGS_ADMIN_INTR_MASK 1 64 65 #define ENA_POLL_MS 5 66 67 /*****************************************************************************/ 68 /*****************************************************************************/ 69 /*****************************************************************************/ 70 71 enum ena_cmd_status { 72 ENA_CMD_SUBMITTED, 73 ENA_CMD_COMPLETED, 74 /* Abort - canceled by the driver */ 75 ENA_CMD_ABORTED, 76 }; 77 78 struct ena_comp_ctx { 79 struct completion wait_event; 80 struct ena_admin_acq_entry *user_cqe; 81 u32 comp_size; 82 enum ena_cmd_status status; 83 /* status from the device */ 84 u8 comp_status; 85 u8 cmd_opcode; 86 bool occupied; 87 }; 88 89 struct ena_com_stats_ctx { 90 struct ena_admin_aq_get_stats_cmd get_cmd; 91 struct ena_admin_acq_get_stats_resp get_resp; 92 }; 93 94 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, 95 struct ena_common_mem_addr *ena_addr, 96 dma_addr_t addr) 97 { 98 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { 99 pr_err("dma address has more bits that the device supports\n"); 100 return -EINVAL; 101 } 102 103 ena_addr->mem_addr_low = lower_32_bits(addr); 104 ena_addr->mem_addr_high = (u16)upper_32_bits(addr); 105 106 return 0; 107 } 108 109 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) 110 { 111 struct ena_com_admin_sq *sq = &queue->sq; 112 u16 size = ADMIN_SQ_SIZE(queue->q_depth); 113 114 sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr, 115 GFP_KERNEL); 116 117 if (!sq->entries) { 118 pr_err("memory allocation failed\n"); 119 return -ENOMEM; 120 } 121 122 sq->head = 0; 123 sq->tail = 0; 124 sq->phase = 1; 125 126 sq->db_addr = NULL; 127 128 return 0; 129 } 130 131 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) 132 { 133 struct ena_com_admin_cq *cq = &queue->cq; 134 u16 size = ADMIN_CQ_SIZE(queue->q_depth); 135 136 cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr, 137 GFP_KERNEL); 138 139 if (!cq->entries) { 140 pr_err("memory allocation failed\n"); 141 return -ENOMEM; 142 } 143 144 cq->head = 0; 145 cq->phase = 1; 146 147 return 0; 148 } 149 150 static int ena_com_admin_init_aenq(struct ena_com_dev *dev, 151 struct ena_aenq_handlers *aenq_handlers) 152 { 153 struct ena_com_aenq *aenq = &dev->aenq; 154 u32 addr_low, addr_high, aenq_caps; 155 u16 size; 156 157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 159 aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr, 160 GFP_KERNEL); 161 162 if (!aenq->entries) { 163 pr_err("memory allocation failed\n"); 164 return -ENOMEM; 165 } 166 167 aenq->head = aenq->q_depth; 168 aenq->phase = 1; 169 170 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); 171 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); 172 173 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); 174 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); 175 176 aenq_caps = 0; 177 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; 178 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) 179 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & 180 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; 181 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); 182 183 if (unlikely(!aenq_handlers)) { 184 pr_err("aenq handlers pointer is NULL\n"); 185 return -EINVAL; 186 } 187 188 aenq->aenq_handlers = aenq_handlers; 189 190 return 0; 191 } 192 193 static void comp_ctxt_release(struct ena_com_admin_queue *queue, 194 struct ena_comp_ctx *comp_ctx) 195 { 196 comp_ctx->occupied = false; 197 atomic_dec(&queue->outstanding_cmds); 198 } 199 200 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, 201 u16 command_id, bool capture) 202 { 203 if (unlikely(command_id >= queue->q_depth)) { 204 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", 205 command_id, queue->q_depth); 206 return NULL; 207 } 208 209 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { 210 pr_err("Completion context is occupied\n"); 211 return NULL; 212 } 213 214 if (capture) { 215 atomic_inc(&queue->outstanding_cmds); 216 queue->comp_ctx[command_id].occupied = true; 217 } 218 219 return &queue->comp_ctx[command_id]; 220 } 221 222 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 223 struct ena_admin_aq_entry *cmd, 224 size_t cmd_size_in_bytes, 225 struct ena_admin_acq_entry *comp, 226 size_t comp_size_in_bytes) 227 { 228 struct ena_comp_ctx *comp_ctx; 229 u16 tail_masked, cmd_id; 230 u16 queue_size_mask; 231 u16 cnt; 232 233 queue_size_mask = admin_queue->q_depth - 1; 234 235 tail_masked = admin_queue->sq.tail & queue_size_mask; 236 237 /* In case of queue FULL */ 238 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); 239 if (cnt >= admin_queue->q_depth) { 240 pr_debug("admin queue is full.\n"); 241 admin_queue->stats.out_of_space++; 242 return ERR_PTR(-ENOSPC); 243 } 244 245 cmd_id = admin_queue->curr_cmd_id; 246 247 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & 248 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; 249 250 cmd->aq_common_descriptor.command_id |= cmd_id & 251 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; 252 253 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); 254 if (unlikely(!comp_ctx)) 255 return ERR_PTR(-EINVAL); 256 257 comp_ctx->status = ENA_CMD_SUBMITTED; 258 comp_ctx->comp_size = (u32)comp_size_in_bytes; 259 comp_ctx->user_cqe = comp; 260 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; 261 262 reinit_completion(&comp_ctx->wait_event); 263 264 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); 265 266 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & 267 queue_size_mask; 268 269 admin_queue->sq.tail++; 270 admin_queue->stats.submitted_cmd++; 271 272 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) 273 admin_queue->sq.phase = !admin_queue->sq.phase; 274 275 writel(admin_queue->sq.tail, admin_queue->sq.db_addr); 276 277 return comp_ctx; 278 } 279 280 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) 281 { 282 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); 283 struct ena_comp_ctx *comp_ctx; 284 u16 i; 285 286 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL); 287 if (unlikely(!queue->comp_ctx)) { 288 pr_err("memory allocation failed\n"); 289 return -ENOMEM; 290 } 291 292 for (i = 0; i < queue->q_depth; i++) { 293 comp_ctx = get_comp_ctxt(queue, i, false); 294 if (comp_ctx) 295 init_completion(&comp_ctx->wait_event); 296 } 297 298 return 0; 299 } 300 301 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 302 struct ena_admin_aq_entry *cmd, 303 size_t cmd_size_in_bytes, 304 struct ena_admin_acq_entry *comp, 305 size_t comp_size_in_bytes) 306 { 307 unsigned long flags = 0; 308 struct ena_comp_ctx *comp_ctx; 309 310 spin_lock_irqsave(&admin_queue->q_lock, flags); 311 if (unlikely(!admin_queue->running_state)) { 312 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 313 return ERR_PTR(-ENODEV); 314 } 315 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, 316 cmd_size_in_bytes, 317 comp, 318 comp_size_in_bytes); 319 if (IS_ERR(comp_ctx)) 320 admin_queue->running_state = false; 321 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 322 323 return comp_ctx; 324 } 325 326 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, 327 struct ena_com_create_io_ctx *ctx, 328 struct ena_com_io_sq *io_sq) 329 { 330 size_t size; 331 int dev_node = 0; 332 333 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); 334 335 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; 336 io_sq->desc_entry_size = 337 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 338 sizeof(struct ena_eth_io_tx_desc) : 339 sizeof(struct ena_eth_io_rx_desc); 340 341 size = io_sq->desc_entry_size * io_sq->q_depth; 342 343 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 344 dev_node = dev_to_node(ena_dev->dmadev); 345 set_dev_node(ena_dev->dmadev, ctx->numa_node); 346 io_sq->desc_addr.virt_addr = 347 dma_alloc_coherent(ena_dev->dmadev, size, 348 &io_sq->desc_addr.phys_addr, 349 GFP_KERNEL); 350 set_dev_node(ena_dev->dmadev, dev_node); 351 if (!io_sq->desc_addr.virt_addr) { 352 io_sq->desc_addr.virt_addr = 353 dma_alloc_coherent(ena_dev->dmadev, size, 354 &io_sq->desc_addr.phys_addr, 355 GFP_KERNEL); 356 } 357 358 if (!io_sq->desc_addr.virt_addr) { 359 pr_err("memory allocation failed\n"); 360 return -ENOMEM; 361 } 362 } 363 364 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 365 /* Allocate bounce buffers */ 366 io_sq->bounce_buf_ctrl.buffer_size = 367 ena_dev->llq_info.desc_list_entry_size; 368 io_sq->bounce_buf_ctrl.buffers_num = 369 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; 370 io_sq->bounce_buf_ctrl.next_to_use = 0; 371 372 size = io_sq->bounce_buf_ctrl.buffer_size * 373 io_sq->bounce_buf_ctrl.buffers_num; 374 375 dev_node = dev_to_node(ena_dev->dmadev); 376 set_dev_node(ena_dev->dmadev, ctx->numa_node); 377 io_sq->bounce_buf_ctrl.base_buffer = 378 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); 379 set_dev_node(ena_dev->dmadev, dev_node); 380 if (!io_sq->bounce_buf_ctrl.base_buffer) 381 io_sq->bounce_buf_ctrl.base_buffer = 382 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); 383 384 if (!io_sq->bounce_buf_ctrl.base_buffer) { 385 pr_err("bounce buffer memory allocation failed\n"); 386 return -ENOMEM; 387 } 388 389 memcpy(&io_sq->llq_info, &ena_dev->llq_info, 390 sizeof(io_sq->llq_info)); 391 392 /* Initiate the first bounce buffer */ 393 io_sq->llq_buf_ctrl.curr_bounce_buf = 394 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); 395 memset(io_sq->llq_buf_ctrl.curr_bounce_buf, 396 0x0, io_sq->llq_info.desc_list_entry_size); 397 io_sq->llq_buf_ctrl.descs_left_in_line = 398 io_sq->llq_info.descs_num_before_header; 399 400 if (io_sq->llq_info.max_entries_in_tx_burst > 0) 401 io_sq->entries_in_tx_burst_left = 402 io_sq->llq_info.max_entries_in_tx_burst; 403 } 404 405 io_sq->tail = 0; 406 io_sq->next_to_comp = 0; 407 io_sq->phase = 1; 408 409 return 0; 410 } 411 412 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, 413 struct ena_com_create_io_ctx *ctx, 414 struct ena_com_io_cq *io_cq) 415 { 416 size_t size; 417 int prev_node = 0; 418 419 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); 420 421 /* Use the basic completion descriptor for Rx */ 422 io_cq->cdesc_entry_size_in_bytes = 423 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 424 sizeof(struct ena_eth_io_tx_cdesc) : 425 sizeof(struct ena_eth_io_rx_cdesc_base); 426 427 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 428 429 prev_node = dev_to_node(ena_dev->dmadev); 430 set_dev_node(ena_dev->dmadev, ctx->numa_node); 431 io_cq->cdesc_addr.virt_addr = 432 dma_alloc_coherent(ena_dev->dmadev, size, 433 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); 434 set_dev_node(ena_dev->dmadev, prev_node); 435 if (!io_cq->cdesc_addr.virt_addr) { 436 io_cq->cdesc_addr.virt_addr = 437 dma_alloc_coherent(ena_dev->dmadev, size, 438 &io_cq->cdesc_addr.phys_addr, 439 GFP_KERNEL); 440 } 441 442 if (!io_cq->cdesc_addr.virt_addr) { 443 pr_err("memory allocation failed\n"); 444 return -ENOMEM; 445 } 446 447 io_cq->phase = 1; 448 io_cq->head = 0; 449 450 return 0; 451 } 452 453 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, 454 struct ena_admin_acq_entry *cqe) 455 { 456 struct ena_comp_ctx *comp_ctx; 457 u16 cmd_id; 458 459 cmd_id = cqe->acq_common_descriptor.command & 460 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; 461 462 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); 463 if (unlikely(!comp_ctx)) { 464 pr_err("comp_ctx is NULL. Changing the admin queue running state\n"); 465 admin_queue->running_state = false; 466 return; 467 } 468 469 comp_ctx->status = ENA_CMD_COMPLETED; 470 comp_ctx->comp_status = cqe->acq_common_descriptor.status; 471 472 if (comp_ctx->user_cqe) 473 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); 474 475 if (!admin_queue->polling) 476 complete(&comp_ctx->wait_event); 477 } 478 479 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) 480 { 481 struct ena_admin_acq_entry *cqe = NULL; 482 u16 comp_num = 0; 483 u16 head_masked; 484 u8 phase; 485 486 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); 487 phase = admin_queue->cq.phase; 488 489 cqe = &admin_queue->cq.entries[head_masked]; 490 491 /* Go over all the completions */ 492 while ((READ_ONCE(cqe->acq_common_descriptor.flags) & 493 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { 494 /* Do not read the rest of the completion entry before the 495 * phase bit was validated 496 */ 497 dma_rmb(); 498 ena_com_handle_single_admin_completion(admin_queue, cqe); 499 500 head_masked++; 501 comp_num++; 502 if (unlikely(head_masked == admin_queue->q_depth)) { 503 head_masked = 0; 504 phase = !phase; 505 } 506 507 cqe = &admin_queue->cq.entries[head_masked]; 508 } 509 510 admin_queue->cq.head += comp_num; 511 admin_queue->cq.phase = phase; 512 admin_queue->sq.head += comp_num; 513 admin_queue->stats.completed_cmd += comp_num; 514 } 515 516 static int ena_com_comp_status_to_errno(u8 comp_status) 517 { 518 if (unlikely(comp_status != 0)) 519 pr_err("admin command failed[%u]\n", comp_status); 520 521 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) 522 return -EINVAL; 523 524 switch (comp_status) { 525 case ENA_ADMIN_SUCCESS: 526 return 0; 527 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: 528 return -ENOMEM; 529 case ENA_ADMIN_UNSUPPORTED_OPCODE: 530 return -EOPNOTSUPP; 531 case ENA_ADMIN_BAD_OPCODE: 532 case ENA_ADMIN_MALFORMED_REQUEST: 533 case ENA_ADMIN_ILLEGAL_PARAMETER: 534 case ENA_ADMIN_UNKNOWN_ERROR: 535 return -EINVAL; 536 } 537 538 return 0; 539 } 540 541 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, 542 struct ena_com_admin_queue *admin_queue) 543 { 544 unsigned long flags = 0; 545 unsigned long timeout; 546 int ret; 547 548 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); 549 550 while (1) { 551 spin_lock_irqsave(&admin_queue->q_lock, flags); 552 ena_com_handle_admin_completion(admin_queue); 553 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 554 555 if (comp_ctx->status != ENA_CMD_SUBMITTED) 556 break; 557 558 if (time_is_before_jiffies(timeout)) { 559 pr_err("Wait for completion (polling) timeout\n"); 560 /* ENA didn't have any completion */ 561 spin_lock_irqsave(&admin_queue->q_lock, flags); 562 admin_queue->stats.no_completion++; 563 admin_queue->running_state = false; 564 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 565 566 ret = -ETIME; 567 goto err; 568 } 569 570 msleep(ENA_POLL_MS); 571 } 572 573 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { 574 pr_err("Command was aborted\n"); 575 spin_lock_irqsave(&admin_queue->q_lock, flags); 576 admin_queue->stats.aborted_cmd++; 577 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 578 ret = -ENODEV; 579 goto err; 580 } 581 582 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", 583 comp_ctx->status); 584 585 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 586 err: 587 comp_ctxt_release(admin_queue, comp_ctx); 588 return ret; 589 } 590 591 /** 592 * Set the LLQ configurations of the firmware 593 * 594 * The driver provides only the enabled feature values to the device, 595 * which in turn, checks if they are supported. 596 */ 597 static int ena_com_set_llq(struct ena_com_dev *ena_dev) 598 { 599 struct ena_com_admin_queue *admin_queue; 600 struct ena_admin_set_feat_cmd cmd; 601 struct ena_admin_set_feat_resp resp; 602 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 603 int ret; 604 605 memset(&cmd, 0x0, sizeof(cmd)); 606 admin_queue = &ena_dev->admin_queue; 607 608 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 609 cmd.feat_common.feature_id = ENA_ADMIN_LLQ; 610 611 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; 612 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; 613 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; 614 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; 615 616 ret = ena_com_execute_admin_command(admin_queue, 617 (struct ena_admin_aq_entry *)&cmd, 618 sizeof(cmd), 619 (struct ena_admin_acq_entry *)&resp, 620 sizeof(resp)); 621 622 if (unlikely(ret)) 623 pr_err("Failed to set LLQ configurations: %d\n", ret); 624 625 return ret; 626 } 627 628 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, 629 struct ena_admin_feature_llq_desc *llq_features, 630 struct ena_llq_configurations *llq_default_cfg) 631 { 632 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 633 u16 supported_feat; 634 int rc; 635 636 memset(llq_info, 0, sizeof(*llq_info)); 637 638 supported_feat = llq_features->header_location_ctrl_supported; 639 640 if (likely(supported_feat & llq_default_cfg->llq_header_location)) { 641 llq_info->header_location_ctrl = 642 llq_default_cfg->llq_header_location; 643 } else { 644 pr_err("Invalid header location control, supported: 0x%x\n", 645 supported_feat); 646 return -EINVAL; 647 } 648 649 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { 650 supported_feat = llq_features->descriptors_stride_ctrl_supported; 651 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { 652 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; 653 } else { 654 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { 655 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 656 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { 657 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; 658 } else { 659 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n", 660 supported_feat); 661 return -EINVAL; 662 } 663 664 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 665 llq_default_cfg->llq_stride_ctrl, supported_feat, 666 llq_info->desc_stride_ctrl); 667 } 668 } else { 669 llq_info->desc_stride_ctrl = 0; 670 } 671 672 supported_feat = llq_features->entry_size_ctrl_supported; 673 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { 674 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; 675 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; 676 } else { 677 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { 678 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 679 llq_info->desc_list_entry_size = 128; 680 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { 681 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; 682 llq_info->desc_list_entry_size = 192; 683 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { 684 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; 685 llq_info->desc_list_entry_size = 256; 686 } else { 687 pr_err("Invalid entry_size_ctrl, supported: 0x%x\n", 688 supported_feat); 689 return -EINVAL; 690 } 691 692 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 693 llq_default_cfg->llq_ring_entry_size, supported_feat, 694 llq_info->desc_list_entry_size); 695 } 696 if (unlikely(llq_info->desc_list_entry_size & 0x7)) { 697 /* The desc list entry size should be whole multiply of 8 698 * This requirement comes from __iowrite64_copy() 699 */ 700 pr_err("illegal entry size %d\n", 701 llq_info->desc_list_entry_size); 702 return -EINVAL; 703 } 704 705 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) 706 llq_info->descs_per_entry = llq_info->desc_list_entry_size / 707 sizeof(struct ena_eth_io_tx_desc); 708 else 709 llq_info->descs_per_entry = 1; 710 711 supported_feat = llq_features->desc_num_before_header_supported; 712 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { 713 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; 714 } else { 715 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { 716 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 717 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { 718 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; 719 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { 720 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; 721 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { 722 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; 723 } else { 724 pr_err("Invalid descs_num_before_header, supported: 0x%x\n", 725 supported_feat); 726 return -EINVAL; 727 } 728 729 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 730 llq_default_cfg->llq_num_decs_before_header, 731 supported_feat, llq_info->descs_num_before_header); 732 } 733 734 llq_info->max_entries_in_tx_burst = 735 (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value); 736 737 rc = ena_com_set_llq(ena_dev); 738 if (rc) 739 pr_err("Cannot set LLQ configuration: %d\n", rc); 740 741 return rc; 742 } 743 744 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, 745 struct ena_com_admin_queue *admin_queue) 746 { 747 unsigned long flags = 0; 748 int ret; 749 750 wait_for_completion_timeout(&comp_ctx->wait_event, 751 usecs_to_jiffies( 752 admin_queue->completion_timeout)); 753 754 /* In case the command wasn't completed find out the root cause. 755 * There might be 2 kinds of errors 756 * 1) No completion (timeout reached) 757 * 2) There is completion but the device didn't get any msi-x interrupt. 758 */ 759 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { 760 spin_lock_irqsave(&admin_queue->q_lock, flags); 761 ena_com_handle_admin_completion(admin_queue); 762 admin_queue->stats.no_completion++; 763 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 764 765 if (comp_ctx->status == ENA_CMD_COMPLETED) { 766 pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", 767 comp_ctx->cmd_opcode, 768 admin_queue->auto_polling ? "ON" : "OFF"); 769 /* Check if fallback to polling is enabled */ 770 if (admin_queue->auto_polling) 771 admin_queue->polling = true; 772 } else { 773 pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n", 774 comp_ctx->cmd_opcode, comp_ctx->status); 775 } 776 /* Check if shifted to polling mode. 777 * This will happen if there is a completion without an interrupt 778 * and autopolling mode is enabled. Continuing normal execution in such case 779 */ 780 if (!admin_queue->polling) { 781 admin_queue->running_state = false; 782 ret = -ETIME; 783 goto err; 784 } 785 } 786 787 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 788 err: 789 comp_ctxt_release(admin_queue, comp_ctx); 790 return ret; 791 } 792 793 /* This method read the hardware device register through posting writes 794 * and waiting for response 795 * On timeout the function will return ENA_MMIO_READ_TIMEOUT 796 */ 797 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) 798 { 799 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 800 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = 801 mmio_read->read_resp; 802 u32 mmio_read_reg, ret, i; 803 unsigned long flags = 0; 804 u32 timeout = mmio_read->reg_read_to; 805 806 might_sleep(); 807 808 if (timeout == 0) 809 timeout = ENA_REG_READ_TIMEOUT; 810 811 /* If readless is disabled, perform regular read */ 812 if (!mmio_read->readless_supported) 813 return readl(ena_dev->reg_bar + offset); 814 815 spin_lock_irqsave(&mmio_read->lock, flags); 816 mmio_read->seq_num++; 817 818 read_resp->req_id = mmio_read->seq_num + 0xDEAD; 819 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & 820 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; 821 mmio_read_reg |= mmio_read->seq_num & 822 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; 823 824 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); 825 826 for (i = 0; i < timeout; i++) { 827 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) 828 break; 829 830 udelay(1); 831 } 832 833 if (unlikely(i == timeout)) { 834 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", 835 mmio_read->seq_num, offset, read_resp->req_id, 836 read_resp->reg_off); 837 ret = ENA_MMIO_READ_TIMEOUT; 838 goto err; 839 } 840 841 if (read_resp->reg_off != offset) { 842 pr_err("Read failure: wrong offset provided\n"); 843 ret = ENA_MMIO_READ_TIMEOUT; 844 } else { 845 ret = read_resp->reg_val; 846 } 847 err: 848 spin_unlock_irqrestore(&mmio_read->lock, flags); 849 850 return ret; 851 } 852 853 /* There are two types to wait for completion. 854 * Polling mode - wait until the completion is available. 855 * Async mode - wait on wait queue until the completion is ready 856 * (or the timeout expired). 857 * It is expected that the IRQ called ena_com_handle_admin_completion 858 * to mark the completions. 859 */ 860 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, 861 struct ena_com_admin_queue *admin_queue) 862 { 863 if (admin_queue->polling) 864 return ena_com_wait_and_process_admin_cq_polling(comp_ctx, 865 admin_queue); 866 867 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, 868 admin_queue); 869 } 870 871 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, 872 struct ena_com_io_sq *io_sq) 873 { 874 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 875 struct ena_admin_aq_destroy_sq_cmd destroy_cmd; 876 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; 877 u8 direction; 878 int ret; 879 880 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 881 882 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 883 direction = ENA_ADMIN_SQ_DIRECTION_TX; 884 else 885 direction = ENA_ADMIN_SQ_DIRECTION_RX; 886 887 destroy_cmd.sq.sq_identity |= (direction << 888 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & 889 ENA_ADMIN_SQ_SQ_DIRECTION_MASK; 890 891 destroy_cmd.sq.sq_idx = io_sq->idx; 892 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; 893 894 ret = ena_com_execute_admin_command(admin_queue, 895 (struct ena_admin_aq_entry *)&destroy_cmd, 896 sizeof(destroy_cmd), 897 (struct ena_admin_acq_entry *)&destroy_resp, 898 sizeof(destroy_resp)); 899 900 if (unlikely(ret && (ret != -ENODEV))) 901 pr_err("failed to destroy io sq error: %d\n", ret); 902 903 return ret; 904 } 905 906 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, 907 struct ena_com_io_sq *io_sq, 908 struct ena_com_io_cq *io_cq) 909 { 910 size_t size; 911 912 if (io_cq->cdesc_addr.virt_addr) { 913 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 914 915 dma_free_coherent(ena_dev->dmadev, size, 916 io_cq->cdesc_addr.virt_addr, 917 io_cq->cdesc_addr.phys_addr); 918 919 io_cq->cdesc_addr.virt_addr = NULL; 920 } 921 922 if (io_sq->desc_addr.virt_addr) { 923 size = io_sq->desc_entry_size * io_sq->q_depth; 924 925 dma_free_coherent(ena_dev->dmadev, size, 926 io_sq->desc_addr.virt_addr, 927 io_sq->desc_addr.phys_addr); 928 929 io_sq->desc_addr.virt_addr = NULL; 930 } 931 932 if (io_sq->bounce_buf_ctrl.base_buffer) { 933 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); 934 io_sq->bounce_buf_ctrl.base_buffer = NULL; 935 } 936 } 937 938 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, 939 u16 exp_state) 940 { 941 u32 val, i; 942 943 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */ 944 timeout = (timeout * 100) / ENA_POLL_MS; 945 946 for (i = 0; i < timeout; i++) { 947 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 948 949 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { 950 pr_err("Reg read timeout occurred\n"); 951 return -ETIME; 952 } 953 954 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == 955 exp_state) 956 return 0; 957 958 msleep(ENA_POLL_MS); 959 } 960 961 return -ETIME; 962 } 963 964 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, 965 enum ena_admin_aq_feature_id feature_id) 966 { 967 u32 feature_mask = 1 << feature_id; 968 969 /* Device attributes is always supported */ 970 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && 971 !(ena_dev->supported_features & feature_mask)) 972 return false; 973 974 return true; 975 } 976 977 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, 978 struct ena_admin_get_feat_resp *get_resp, 979 enum ena_admin_aq_feature_id feature_id, 980 dma_addr_t control_buf_dma_addr, 981 u32 control_buff_size, 982 u8 feature_ver) 983 { 984 struct ena_com_admin_queue *admin_queue; 985 struct ena_admin_get_feat_cmd get_cmd; 986 int ret; 987 988 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { 989 pr_debug("Feature %d isn't supported\n", feature_id); 990 return -EOPNOTSUPP; 991 } 992 993 memset(&get_cmd, 0x0, sizeof(get_cmd)); 994 admin_queue = &ena_dev->admin_queue; 995 996 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; 997 998 if (control_buff_size) 999 get_cmd.aq_common_descriptor.flags = 1000 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 1001 else 1002 get_cmd.aq_common_descriptor.flags = 0; 1003 1004 ret = ena_com_mem_addr_set(ena_dev, 1005 &get_cmd.control_buffer.address, 1006 control_buf_dma_addr); 1007 if (unlikely(ret)) { 1008 pr_err("memory address set failed\n"); 1009 return ret; 1010 } 1011 1012 get_cmd.control_buffer.length = control_buff_size; 1013 get_cmd.feat_common.feature_version = feature_ver; 1014 get_cmd.feat_common.feature_id = feature_id; 1015 1016 ret = ena_com_execute_admin_command(admin_queue, 1017 (struct ena_admin_aq_entry *) 1018 &get_cmd, 1019 sizeof(get_cmd), 1020 (struct ena_admin_acq_entry *) 1021 get_resp, 1022 sizeof(*get_resp)); 1023 1024 if (unlikely(ret)) 1025 pr_err("Failed to submit get_feature command %d error: %d\n", 1026 feature_id, ret); 1027 1028 return ret; 1029 } 1030 1031 static int ena_com_get_feature(struct ena_com_dev *ena_dev, 1032 struct ena_admin_get_feat_resp *get_resp, 1033 enum ena_admin_aq_feature_id feature_id, 1034 u8 feature_ver) 1035 { 1036 return ena_com_get_feature_ex(ena_dev, 1037 get_resp, 1038 feature_id, 1039 0, 1040 0, 1041 feature_ver); 1042 } 1043 1044 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) 1045 { 1046 struct ena_rss *rss = &ena_dev->rss; 1047 1048 rss->hash_key = 1049 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1050 &rss->hash_key_dma_addr, GFP_KERNEL); 1051 1052 if (unlikely(!rss->hash_key)) 1053 return -ENOMEM; 1054 1055 return 0; 1056 } 1057 1058 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) 1059 { 1060 struct ena_rss *rss = &ena_dev->rss; 1061 1062 if (rss->hash_key) 1063 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1064 rss->hash_key, rss->hash_key_dma_addr); 1065 rss->hash_key = NULL; 1066 } 1067 1068 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) 1069 { 1070 struct ena_rss *rss = &ena_dev->rss; 1071 1072 rss->hash_ctrl = 1073 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1074 &rss->hash_ctrl_dma_addr, GFP_KERNEL); 1075 1076 if (unlikely(!rss->hash_ctrl)) 1077 return -ENOMEM; 1078 1079 return 0; 1080 } 1081 1082 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) 1083 { 1084 struct ena_rss *rss = &ena_dev->rss; 1085 1086 if (rss->hash_ctrl) 1087 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1088 rss->hash_ctrl, rss->hash_ctrl_dma_addr); 1089 rss->hash_ctrl = NULL; 1090 } 1091 1092 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, 1093 u16 log_size) 1094 { 1095 struct ena_rss *rss = &ena_dev->rss; 1096 struct ena_admin_get_feat_resp get_resp; 1097 size_t tbl_size; 1098 int ret; 1099 1100 ret = ena_com_get_feature(ena_dev, &get_resp, 1101 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0); 1102 if (unlikely(ret)) 1103 return ret; 1104 1105 if ((get_resp.u.ind_table.min_size > log_size) || 1106 (get_resp.u.ind_table.max_size < log_size)) { 1107 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", 1108 1 << log_size, 1 << get_resp.u.ind_table.min_size, 1109 1 << get_resp.u.ind_table.max_size); 1110 return -EINVAL; 1111 } 1112 1113 tbl_size = (1ULL << log_size) * 1114 sizeof(struct ena_admin_rss_ind_table_entry); 1115 1116 rss->rss_ind_tbl = 1117 dma_alloc_coherent(ena_dev->dmadev, tbl_size, 1118 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); 1119 if (unlikely(!rss->rss_ind_tbl)) 1120 goto mem_err1; 1121 1122 tbl_size = (1ULL << log_size) * sizeof(u16); 1123 rss->host_rss_ind_tbl = 1124 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); 1125 if (unlikely(!rss->host_rss_ind_tbl)) 1126 goto mem_err2; 1127 1128 rss->tbl_log_size = log_size; 1129 1130 return 0; 1131 1132 mem_err2: 1133 tbl_size = (1ULL << log_size) * 1134 sizeof(struct ena_admin_rss_ind_table_entry); 1135 1136 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, 1137 rss->rss_ind_tbl_dma_addr); 1138 rss->rss_ind_tbl = NULL; 1139 mem_err1: 1140 rss->tbl_log_size = 0; 1141 return -ENOMEM; 1142 } 1143 1144 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) 1145 { 1146 struct ena_rss *rss = &ena_dev->rss; 1147 size_t tbl_size = (1ULL << rss->tbl_log_size) * 1148 sizeof(struct ena_admin_rss_ind_table_entry); 1149 1150 if (rss->rss_ind_tbl) 1151 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, 1152 rss->rss_ind_tbl_dma_addr); 1153 rss->rss_ind_tbl = NULL; 1154 1155 if (rss->host_rss_ind_tbl) 1156 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl); 1157 rss->host_rss_ind_tbl = NULL; 1158 } 1159 1160 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, 1161 struct ena_com_io_sq *io_sq, u16 cq_idx) 1162 { 1163 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1164 struct ena_admin_aq_create_sq_cmd create_cmd; 1165 struct ena_admin_acq_create_sq_resp_desc cmd_completion; 1166 u8 direction; 1167 int ret; 1168 1169 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1170 1171 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; 1172 1173 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1174 direction = ENA_ADMIN_SQ_DIRECTION_TX; 1175 else 1176 direction = ENA_ADMIN_SQ_DIRECTION_RX; 1177 1178 create_cmd.sq_identity |= (direction << 1179 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & 1180 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; 1181 1182 create_cmd.sq_caps_2 |= io_sq->mem_queue_type & 1183 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; 1184 1185 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << 1186 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & 1187 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; 1188 1189 create_cmd.sq_caps_3 |= 1190 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; 1191 1192 create_cmd.cq_idx = cq_idx; 1193 create_cmd.sq_depth = io_sq->q_depth; 1194 1195 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 1196 ret = ena_com_mem_addr_set(ena_dev, 1197 &create_cmd.sq_ba, 1198 io_sq->desc_addr.phys_addr); 1199 if (unlikely(ret)) { 1200 pr_err("memory address set failed\n"); 1201 return ret; 1202 } 1203 } 1204 1205 ret = ena_com_execute_admin_command(admin_queue, 1206 (struct ena_admin_aq_entry *)&create_cmd, 1207 sizeof(create_cmd), 1208 (struct ena_admin_acq_entry *)&cmd_completion, 1209 sizeof(cmd_completion)); 1210 if (unlikely(ret)) { 1211 pr_err("Failed to create IO SQ. error: %d\n", ret); 1212 return ret; 1213 } 1214 1215 io_sq->idx = cmd_completion.sq_idx; 1216 1217 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1218 (uintptr_t)cmd_completion.sq_doorbell_offset); 1219 1220 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1221 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar 1222 + cmd_completion.llq_headers_offset); 1223 1224 io_sq->desc_addr.pbuf_dev_addr = 1225 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + 1226 cmd_completion.llq_descriptors_offset); 1227 } 1228 1229 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); 1230 1231 return ret; 1232 } 1233 1234 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) 1235 { 1236 struct ena_rss *rss = &ena_dev->rss; 1237 struct ena_com_io_sq *io_sq; 1238 u16 qid; 1239 int i; 1240 1241 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1242 qid = rss->host_rss_ind_tbl[i]; 1243 if (qid >= ENA_TOTAL_NUM_QUEUES) 1244 return -EINVAL; 1245 1246 io_sq = &ena_dev->io_sq_queues[qid]; 1247 1248 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) 1249 return -EINVAL; 1250 1251 rss->rss_ind_tbl[i].cq_idx = io_sq->idx; 1252 } 1253 1254 return 0; 1255 } 1256 1257 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) 1258 { 1259 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; 1260 struct ena_rss *rss = &ena_dev->rss; 1261 u8 idx; 1262 u16 i; 1263 1264 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) 1265 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; 1266 1267 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1268 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) 1269 return -EINVAL; 1270 idx = (u8)rss->rss_ind_tbl[i].cq_idx; 1271 1272 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) 1273 return -EINVAL; 1274 1275 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; 1276 } 1277 1278 return 0; 1279 } 1280 1281 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) 1282 { 1283 size_t size; 1284 1285 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS; 1286 1287 ena_dev->intr_moder_tbl = 1288 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); 1289 if (!ena_dev->intr_moder_tbl) 1290 return -ENOMEM; 1291 1292 ena_com_config_default_interrupt_moderation_table(ena_dev); 1293 1294 return 0; 1295 } 1296 1297 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, 1298 u16 intr_delay_resolution) 1299 { 1300 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 1301 unsigned int i; 1302 1303 if (!intr_delay_resolution) { 1304 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); 1305 intr_delay_resolution = 1; 1306 } 1307 ena_dev->intr_delay_resolution = intr_delay_resolution; 1308 1309 /* update Rx */ 1310 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++) 1311 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution; 1312 1313 /* update Tx */ 1314 ena_dev->intr_moder_tx_interval /= intr_delay_resolution; 1315 } 1316 1317 /*****************************************************************************/ 1318 /******************************* API ******************************/ 1319 /*****************************************************************************/ 1320 1321 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, 1322 struct ena_admin_aq_entry *cmd, 1323 size_t cmd_size, 1324 struct ena_admin_acq_entry *comp, 1325 size_t comp_size) 1326 { 1327 struct ena_comp_ctx *comp_ctx; 1328 int ret; 1329 1330 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, 1331 comp, comp_size); 1332 if (IS_ERR(comp_ctx)) { 1333 if (comp_ctx == ERR_PTR(-ENODEV)) 1334 pr_debug("Failed to submit command [%ld]\n", 1335 PTR_ERR(comp_ctx)); 1336 else 1337 pr_err("Failed to submit command [%ld]\n", 1338 PTR_ERR(comp_ctx)); 1339 1340 return PTR_ERR(comp_ctx); 1341 } 1342 1343 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); 1344 if (unlikely(ret)) { 1345 if (admin_queue->running_state) 1346 pr_err("Failed to process command. ret = %d\n", ret); 1347 else 1348 pr_debug("Failed to process command. ret = %d\n", ret); 1349 } 1350 return ret; 1351 } 1352 1353 int ena_com_create_io_cq(struct ena_com_dev *ena_dev, 1354 struct ena_com_io_cq *io_cq) 1355 { 1356 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1357 struct ena_admin_aq_create_cq_cmd create_cmd; 1358 struct ena_admin_acq_create_cq_resp_desc cmd_completion; 1359 int ret; 1360 1361 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1362 1363 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; 1364 1365 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & 1366 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; 1367 create_cmd.cq_caps_1 |= 1368 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; 1369 1370 create_cmd.msix_vector = io_cq->msix_vector; 1371 create_cmd.cq_depth = io_cq->q_depth; 1372 1373 ret = ena_com_mem_addr_set(ena_dev, 1374 &create_cmd.cq_ba, 1375 io_cq->cdesc_addr.phys_addr); 1376 if (unlikely(ret)) { 1377 pr_err("memory address set failed\n"); 1378 return ret; 1379 } 1380 1381 ret = ena_com_execute_admin_command(admin_queue, 1382 (struct ena_admin_aq_entry *)&create_cmd, 1383 sizeof(create_cmd), 1384 (struct ena_admin_acq_entry *)&cmd_completion, 1385 sizeof(cmd_completion)); 1386 if (unlikely(ret)) { 1387 pr_err("Failed to create IO CQ. error: %d\n", ret); 1388 return ret; 1389 } 1390 1391 io_cq->idx = cmd_completion.cq_idx; 1392 1393 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1394 cmd_completion.cq_interrupt_unmask_register_offset); 1395 1396 if (cmd_completion.cq_head_db_register_offset) 1397 io_cq->cq_head_db_reg = 1398 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1399 cmd_completion.cq_head_db_register_offset); 1400 1401 if (cmd_completion.numa_node_register_offset) 1402 io_cq->numa_node_cfg_reg = 1403 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1404 cmd_completion.numa_node_register_offset); 1405 1406 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); 1407 1408 return ret; 1409 } 1410 1411 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, 1412 struct ena_com_io_sq **io_sq, 1413 struct ena_com_io_cq **io_cq) 1414 { 1415 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1416 pr_err("Invalid queue number %d but the max is %d\n", qid, 1417 ENA_TOTAL_NUM_QUEUES); 1418 return -EINVAL; 1419 } 1420 1421 *io_sq = &ena_dev->io_sq_queues[qid]; 1422 *io_cq = &ena_dev->io_cq_queues[qid]; 1423 1424 return 0; 1425 } 1426 1427 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) 1428 { 1429 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1430 struct ena_comp_ctx *comp_ctx; 1431 u16 i; 1432 1433 if (!admin_queue->comp_ctx) 1434 return; 1435 1436 for (i = 0; i < admin_queue->q_depth; i++) { 1437 comp_ctx = get_comp_ctxt(admin_queue, i, false); 1438 if (unlikely(!comp_ctx)) 1439 break; 1440 1441 comp_ctx->status = ENA_CMD_ABORTED; 1442 1443 complete(&comp_ctx->wait_event); 1444 } 1445 } 1446 1447 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) 1448 { 1449 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1450 unsigned long flags = 0; 1451 1452 spin_lock_irqsave(&admin_queue->q_lock, flags); 1453 while (atomic_read(&admin_queue->outstanding_cmds) != 0) { 1454 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 1455 msleep(ENA_POLL_MS); 1456 spin_lock_irqsave(&admin_queue->q_lock, flags); 1457 } 1458 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 1459 } 1460 1461 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, 1462 struct ena_com_io_cq *io_cq) 1463 { 1464 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1465 struct ena_admin_aq_destroy_cq_cmd destroy_cmd; 1466 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; 1467 int ret; 1468 1469 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 1470 1471 destroy_cmd.cq_idx = io_cq->idx; 1472 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; 1473 1474 ret = ena_com_execute_admin_command(admin_queue, 1475 (struct ena_admin_aq_entry *)&destroy_cmd, 1476 sizeof(destroy_cmd), 1477 (struct ena_admin_acq_entry *)&destroy_resp, 1478 sizeof(destroy_resp)); 1479 1480 if (unlikely(ret && (ret != -ENODEV))) 1481 pr_err("Failed to destroy IO CQ. error: %d\n", ret); 1482 1483 return ret; 1484 } 1485 1486 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) 1487 { 1488 return ena_dev->admin_queue.running_state; 1489 } 1490 1491 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) 1492 { 1493 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1494 unsigned long flags = 0; 1495 1496 spin_lock_irqsave(&admin_queue->q_lock, flags); 1497 ena_dev->admin_queue.running_state = state; 1498 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 1499 } 1500 1501 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) 1502 { 1503 u16 depth = ena_dev->aenq.q_depth; 1504 1505 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); 1506 1507 /* Init head_db to mark that all entries in the queue 1508 * are initially available 1509 */ 1510 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 1511 } 1512 1513 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) 1514 { 1515 struct ena_com_admin_queue *admin_queue; 1516 struct ena_admin_set_feat_cmd cmd; 1517 struct ena_admin_set_feat_resp resp; 1518 struct ena_admin_get_feat_resp get_resp; 1519 int ret; 1520 1521 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0); 1522 if (ret) { 1523 pr_info("Can't get aenq configuration\n"); 1524 return ret; 1525 } 1526 1527 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { 1528 pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", 1529 get_resp.u.aenq.supported_groups, groups_flag); 1530 return -EOPNOTSUPP; 1531 } 1532 1533 memset(&cmd, 0x0, sizeof(cmd)); 1534 admin_queue = &ena_dev->admin_queue; 1535 1536 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 1537 cmd.aq_common_descriptor.flags = 0; 1538 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; 1539 cmd.u.aenq.enabled_groups = groups_flag; 1540 1541 ret = ena_com_execute_admin_command(admin_queue, 1542 (struct ena_admin_aq_entry *)&cmd, 1543 sizeof(cmd), 1544 (struct ena_admin_acq_entry *)&resp, 1545 sizeof(resp)); 1546 1547 if (unlikely(ret)) 1548 pr_err("Failed to config AENQ ret: %d\n", ret); 1549 1550 return ret; 1551 } 1552 1553 int ena_com_get_dma_width(struct ena_com_dev *ena_dev) 1554 { 1555 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 1556 int width; 1557 1558 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { 1559 pr_err("Reg read timeout occurred\n"); 1560 return -ETIME; 1561 } 1562 1563 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> 1564 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; 1565 1566 pr_debug("ENA dma width: %d\n", width); 1567 1568 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { 1569 pr_err("DMA width illegal value: %d\n", width); 1570 return -EINVAL; 1571 } 1572 1573 ena_dev->dma_addr_bits = width; 1574 1575 return width; 1576 } 1577 1578 int ena_com_validate_version(struct ena_com_dev *ena_dev) 1579 { 1580 u32 ver; 1581 u32 ctrl_ver; 1582 u32 ctrl_ver_masked; 1583 1584 /* Make sure the ENA version and the controller version are at least 1585 * as the driver expects 1586 */ 1587 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); 1588 ctrl_ver = ena_com_reg_bar_read32(ena_dev, 1589 ENA_REGS_CONTROLLER_VERSION_OFF); 1590 1591 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || 1592 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { 1593 pr_err("Reg read timeout occurred\n"); 1594 return -ETIME; 1595 } 1596 1597 pr_info("ena device version: %d.%d\n", 1598 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> 1599 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, 1600 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); 1601 1602 pr_info("ena controller version: %d.%d.%d implementation version %d\n", 1603 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> 1604 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, 1605 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> 1606 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, 1607 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), 1608 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> 1609 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); 1610 1611 ctrl_ver_masked = 1612 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | 1613 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | 1614 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); 1615 1616 /* Validate the ctrl version without the implementation ID */ 1617 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { 1618 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); 1619 return -1; 1620 } 1621 1622 return 0; 1623 } 1624 1625 void ena_com_admin_destroy(struct ena_com_dev *ena_dev) 1626 { 1627 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1628 struct ena_com_admin_cq *cq = &admin_queue->cq; 1629 struct ena_com_admin_sq *sq = &admin_queue->sq; 1630 struct ena_com_aenq *aenq = &ena_dev->aenq; 1631 u16 size; 1632 1633 if (admin_queue->comp_ctx) 1634 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx); 1635 admin_queue->comp_ctx = NULL; 1636 size = ADMIN_SQ_SIZE(admin_queue->q_depth); 1637 if (sq->entries) 1638 dma_free_coherent(ena_dev->dmadev, size, sq->entries, 1639 sq->dma_addr); 1640 sq->entries = NULL; 1641 1642 size = ADMIN_CQ_SIZE(admin_queue->q_depth); 1643 if (cq->entries) 1644 dma_free_coherent(ena_dev->dmadev, size, cq->entries, 1645 cq->dma_addr); 1646 cq->entries = NULL; 1647 1648 size = ADMIN_AENQ_SIZE(aenq->q_depth); 1649 if (ena_dev->aenq.entries) 1650 dma_free_coherent(ena_dev->dmadev, size, aenq->entries, 1651 aenq->dma_addr); 1652 aenq->entries = NULL; 1653 } 1654 1655 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) 1656 { 1657 u32 mask_value = 0; 1658 1659 if (polling) 1660 mask_value = ENA_REGS_ADMIN_INTR_MASK; 1661 1662 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); 1663 ena_dev->admin_queue.polling = polling; 1664 } 1665 1666 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, 1667 bool polling) 1668 { 1669 ena_dev->admin_queue.auto_polling = polling; 1670 } 1671 1672 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) 1673 { 1674 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1675 1676 spin_lock_init(&mmio_read->lock); 1677 mmio_read->read_resp = 1678 dma_alloc_coherent(ena_dev->dmadev, 1679 sizeof(*mmio_read->read_resp), 1680 &mmio_read->read_resp_dma_addr, GFP_KERNEL); 1681 if (unlikely(!mmio_read->read_resp)) 1682 goto err; 1683 1684 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 1685 1686 mmio_read->read_resp->req_id = 0x0; 1687 mmio_read->seq_num = 0x0; 1688 mmio_read->readless_supported = true; 1689 1690 return 0; 1691 1692 err: 1693 1694 return -ENOMEM; 1695 } 1696 1697 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) 1698 { 1699 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1700 1701 mmio_read->readless_supported = readless_supported; 1702 } 1703 1704 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) 1705 { 1706 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1707 1708 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1709 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1710 1711 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), 1712 mmio_read->read_resp, mmio_read->read_resp_dma_addr); 1713 1714 mmio_read->read_resp = NULL; 1715 } 1716 1717 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) 1718 { 1719 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1720 u32 addr_low, addr_high; 1721 1722 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); 1723 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); 1724 1725 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1726 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1727 } 1728 1729 int ena_com_admin_init(struct ena_com_dev *ena_dev, 1730 struct ena_aenq_handlers *aenq_handlers) 1731 { 1732 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1733 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; 1734 int ret; 1735 1736 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 1737 1738 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { 1739 pr_err("Reg read timeout occurred\n"); 1740 return -ETIME; 1741 } 1742 1743 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { 1744 pr_err("Device isn't ready, abort com init\n"); 1745 return -ENODEV; 1746 } 1747 1748 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; 1749 1750 admin_queue->q_dmadev = ena_dev->dmadev; 1751 admin_queue->polling = false; 1752 admin_queue->curr_cmd_id = 0; 1753 1754 atomic_set(&admin_queue->outstanding_cmds, 0); 1755 1756 spin_lock_init(&admin_queue->q_lock); 1757 1758 ret = ena_com_init_comp_ctxt(admin_queue); 1759 if (ret) 1760 goto error; 1761 1762 ret = ena_com_admin_init_sq(admin_queue); 1763 if (ret) 1764 goto error; 1765 1766 ret = ena_com_admin_init_cq(admin_queue); 1767 if (ret) 1768 goto error; 1769 1770 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1771 ENA_REGS_AQ_DB_OFF); 1772 1773 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); 1774 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); 1775 1776 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); 1777 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); 1778 1779 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); 1780 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); 1781 1782 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); 1783 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); 1784 1785 aq_caps = 0; 1786 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; 1787 aq_caps |= (sizeof(struct ena_admin_aq_entry) << 1788 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & 1789 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; 1790 1791 acq_caps = 0; 1792 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; 1793 acq_caps |= (sizeof(struct ena_admin_acq_entry) << 1794 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & 1795 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; 1796 1797 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); 1798 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); 1799 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); 1800 if (ret) 1801 goto error; 1802 1803 admin_queue->running_state = true; 1804 1805 return 0; 1806 error: 1807 ena_com_admin_destroy(ena_dev); 1808 1809 return ret; 1810 } 1811 1812 int ena_com_create_io_queue(struct ena_com_dev *ena_dev, 1813 struct ena_com_create_io_ctx *ctx) 1814 { 1815 struct ena_com_io_sq *io_sq; 1816 struct ena_com_io_cq *io_cq; 1817 int ret; 1818 1819 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { 1820 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", 1821 ctx->qid, ENA_TOTAL_NUM_QUEUES); 1822 return -EINVAL; 1823 } 1824 1825 io_sq = &ena_dev->io_sq_queues[ctx->qid]; 1826 io_cq = &ena_dev->io_cq_queues[ctx->qid]; 1827 1828 memset(io_sq, 0x0, sizeof(*io_sq)); 1829 memset(io_cq, 0x0, sizeof(*io_cq)); 1830 1831 /* Init CQ */ 1832 io_cq->q_depth = ctx->queue_size; 1833 io_cq->direction = ctx->direction; 1834 io_cq->qid = ctx->qid; 1835 1836 io_cq->msix_vector = ctx->msix_vector; 1837 1838 io_sq->q_depth = ctx->queue_size; 1839 io_sq->direction = ctx->direction; 1840 io_sq->qid = ctx->qid; 1841 1842 io_sq->mem_queue_type = ctx->mem_queue_type; 1843 1844 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1845 /* header length is limited to 8 bits */ 1846 io_sq->tx_max_header_size = 1847 min_t(u32, ena_dev->tx_max_header_size, SZ_256); 1848 1849 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); 1850 if (ret) 1851 goto error; 1852 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); 1853 if (ret) 1854 goto error; 1855 1856 ret = ena_com_create_io_cq(ena_dev, io_cq); 1857 if (ret) 1858 goto error; 1859 1860 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); 1861 if (ret) 1862 goto destroy_io_cq; 1863 1864 return 0; 1865 1866 destroy_io_cq: 1867 ena_com_destroy_io_cq(ena_dev, io_cq); 1868 error: 1869 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1870 return ret; 1871 } 1872 1873 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) 1874 { 1875 struct ena_com_io_sq *io_sq; 1876 struct ena_com_io_cq *io_cq; 1877 1878 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1879 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid, 1880 ENA_TOTAL_NUM_QUEUES); 1881 return; 1882 } 1883 1884 io_sq = &ena_dev->io_sq_queues[qid]; 1885 io_cq = &ena_dev->io_cq_queues[qid]; 1886 1887 ena_com_destroy_io_sq(ena_dev, io_sq); 1888 ena_com_destroy_io_cq(ena_dev, io_cq); 1889 1890 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1891 } 1892 1893 int ena_com_get_link_params(struct ena_com_dev *ena_dev, 1894 struct ena_admin_get_feat_resp *resp) 1895 { 1896 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0); 1897 } 1898 1899 int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev) 1900 { 1901 struct ena_admin_get_feat_resp resp; 1902 struct ena_extra_properties_strings *extra_properties_strings = 1903 &ena_dev->extra_properties_strings; 1904 u32 rc; 1905 1906 extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT * 1907 ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN; 1908 1909 extra_properties_strings->virt_addr = 1910 dma_alloc_coherent(ena_dev->dmadev, 1911 extra_properties_strings->size, 1912 &extra_properties_strings->dma_addr, 1913 GFP_KERNEL); 1914 if (unlikely(!extra_properties_strings->virt_addr)) { 1915 pr_err("Failed to allocate extra properties strings\n"); 1916 return 0; 1917 } 1918 1919 rc = ena_com_get_feature_ex(ena_dev, &resp, 1920 ENA_ADMIN_EXTRA_PROPERTIES_STRINGS, 1921 extra_properties_strings->dma_addr, 1922 extra_properties_strings->size, 0); 1923 if (rc) { 1924 pr_debug("Failed to get extra properties strings\n"); 1925 goto err; 1926 } 1927 1928 return resp.u.extra_properties_strings.count; 1929 err: 1930 ena_com_delete_extra_properties_strings(ena_dev); 1931 return 0; 1932 } 1933 1934 void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev) 1935 { 1936 struct ena_extra_properties_strings *extra_properties_strings = 1937 &ena_dev->extra_properties_strings; 1938 1939 if (extra_properties_strings->virt_addr) { 1940 dma_free_coherent(ena_dev->dmadev, 1941 extra_properties_strings->size, 1942 extra_properties_strings->virt_addr, 1943 extra_properties_strings->dma_addr); 1944 extra_properties_strings->virt_addr = NULL; 1945 } 1946 } 1947 1948 int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev, 1949 struct ena_admin_get_feat_resp *resp) 1950 { 1951 return ena_com_get_feature(ena_dev, resp, 1952 ENA_ADMIN_EXTRA_PROPERTIES_FLAGS, 0); 1953 } 1954 1955 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, 1956 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1957 { 1958 struct ena_admin_get_feat_resp get_resp; 1959 int rc; 1960 1961 rc = ena_com_get_feature(ena_dev, &get_resp, 1962 ENA_ADMIN_DEVICE_ATTRIBUTES, 0); 1963 if (rc) 1964 return rc; 1965 1966 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, 1967 sizeof(get_resp.u.dev_attr)); 1968 ena_dev->supported_features = get_resp.u.dev_attr.supported_features; 1969 1970 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1971 rc = ena_com_get_feature(ena_dev, &get_resp, 1972 ENA_ADMIN_MAX_QUEUES_EXT, 1973 ENA_FEATURE_MAX_QUEUE_EXT_VER); 1974 if (rc) 1975 return rc; 1976 1977 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER) 1978 return -EINVAL; 1979 1980 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, 1981 sizeof(get_resp.u.max_queue_ext)); 1982 ena_dev->tx_max_header_size = 1983 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size; 1984 } else { 1985 rc = ena_com_get_feature(ena_dev, &get_resp, 1986 ENA_ADMIN_MAX_QUEUES_NUM, 0); 1987 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, 1988 sizeof(get_resp.u.max_queue)); 1989 ena_dev->tx_max_header_size = 1990 get_resp.u.max_queue.max_header_size; 1991 1992 if (rc) 1993 return rc; 1994 } 1995 1996 rc = ena_com_get_feature(ena_dev, &get_resp, 1997 ENA_ADMIN_AENQ_CONFIG, 0); 1998 if (rc) 1999 return rc; 2000 2001 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, 2002 sizeof(get_resp.u.aenq)); 2003 2004 rc = ena_com_get_feature(ena_dev, &get_resp, 2005 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); 2006 if (rc) 2007 return rc; 2008 2009 memcpy(&get_feat_ctx->offload, &get_resp.u.offload, 2010 sizeof(get_resp.u.offload)); 2011 2012 /* Driver hints isn't mandatory admin command. So in case the 2013 * command isn't supported set driver hints to 0 2014 */ 2015 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0); 2016 2017 if (!rc) 2018 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, 2019 sizeof(get_resp.u.hw_hints)); 2020 else if (rc == -EOPNOTSUPP) 2021 memset(&get_feat_ctx->hw_hints, 0x0, 2022 sizeof(get_feat_ctx->hw_hints)); 2023 else 2024 return rc; 2025 2026 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0); 2027 if (!rc) 2028 memcpy(&get_feat_ctx->llq, &get_resp.u.llq, 2029 sizeof(get_resp.u.llq)); 2030 else if (rc == -EOPNOTSUPP) 2031 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); 2032 else 2033 return rc; 2034 2035 return 0; 2036 } 2037 2038 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) 2039 { 2040 ena_com_handle_admin_completion(&ena_dev->admin_queue); 2041 } 2042 2043 /* ena_handle_specific_aenq_event: 2044 * return the handler that is relevant to the specific event group 2045 */ 2046 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, 2047 u16 group) 2048 { 2049 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers; 2050 2051 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) 2052 return aenq_handlers->handlers[group]; 2053 2054 return aenq_handlers->unimplemented_handler; 2055 } 2056 2057 /* ena_aenq_intr_handler: 2058 * handles the aenq incoming events. 2059 * pop events from the queue and apply the specific handler 2060 */ 2061 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) 2062 { 2063 struct ena_admin_aenq_entry *aenq_e; 2064 struct ena_admin_aenq_common_desc *aenq_common; 2065 struct ena_com_aenq *aenq = &dev->aenq; 2066 unsigned long long timestamp; 2067 ena_aenq_handler handler_cb; 2068 u16 masked_head, processed = 0; 2069 u8 phase; 2070 2071 masked_head = aenq->head & (aenq->q_depth - 1); 2072 phase = aenq->phase; 2073 aenq_e = &aenq->entries[masked_head]; /* Get first entry */ 2074 aenq_common = &aenq_e->aenq_common_desc; 2075 2076 /* Go over all the events */ 2077 while ((READ_ONCE(aenq_common->flags) & 2078 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { 2079 /* Make sure the phase bit (ownership) is as expected before 2080 * reading the rest of the descriptor. 2081 */ 2082 dma_rmb(); 2083 2084 timestamp = 2085 (unsigned long long)aenq_common->timestamp_low | 2086 ((unsigned long long)aenq_common->timestamp_high << 32); 2087 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", 2088 aenq_common->group, aenq_common->syndrom, timestamp); 2089 2090 /* Handle specific event*/ 2091 handler_cb = ena_com_get_specific_aenq_cb(dev, 2092 aenq_common->group); 2093 handler_cb(data, aenq_e); /* call the actual event handler*/ 2094 2095 /* Get next event entry */ 2096 masked_head++; 2097 processed++; 2098 2099 if (unlikely(masked_head == aenq->q_depth)) { 2100 masked_head = 0; 2101 phase = !phase; 2102 } 2103 aenq_e = &aenq->entries[masked_head]; 2104 aenq_common = &aenq_e->aenq_common_desc; 2105 } 2106 2107 aenq->head += processed; 2108 aenq->phase = phase; 2109 2110 /* Don't update aenq doorbell if there weren't any processed events */ 2111 if (!processed) 2112 return; 2113 2114 /* write the aenq doorbell after all AENQ descriptors were read */ 2115 mb(); 2116 writel_relaxed((u32)aenq->head, 2117 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 2118 } 2119 2120 int ena_com_dev_reset(struct ena_com_dev *ena_dev, 2121 enum ena_regs_reset_reason_types reset_reason) 2122 { 2123 u32 stat, timeout, cap, reset_val; 2124 int rc; 2125 2126 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 2127 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 2128 2129 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || 2130 (cap == ENA_MMIO_READ_TIMEOUT))) { 2131 pr_err("Reg read32 timeout occurred\n"); 2132 return -ETIME; 2133 } 2134 2135 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { 2136 pr_err("Device isn't ready, can't reset device\n"); 2137 return -EINVAL; 2138 } 2139 2140 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> 2141 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; 2142 if (timeout == 0) { 2143 pr_err("Invalid timeout value\n"); 2144 return -EINVAL; 2145 } 2146 2147 /* start reset */ 2148 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; 2149 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) & 2150 ENA_REGS_DEV_CTL_RESET_REASON_MASK; 2151 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 2152 2153 /* Write again the MMIO read request address */ 2154 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 2155 2156 rc = wait_for_reset_state(ena_dev, timeout, 2157 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); 2158 if (rc != 0) { 2159 pr_err("Reset indication didn't turn on\n"); 2160 return rc; 2161 } 2162 2163 /* reset done */ 2164 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 2165 rc = wait_for_reset_state(ena_dev, timeout, 0); 2166 if (rc != 0) { 2167 pr_err("Reset indication didn't turn off\n"); 2168 return rc; 2169 } 2170 2171 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> 2172 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; 2173 if (timeout) 2174 /* the resolution of timeout reg is 100ms */ 2175 ena_dev->admin_queue.completion_timeout = timeout * 100000; 2176 else 2177 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; 2178 2179 return 0; 2180 } 2181 2182 static int ena_get_dev_stats(struct ena_com_dev *ena_dev, 2183 struct ena_com_stats_ctx *ctx, 2184 enum ena_admin_get_stats_type type) 2185 { 2186 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; 2187 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; 2188 struct ena_com_admin_queue *admin_queue; 2189 int ret; 2190 2191 admin_queue = &ena_dev->admin_queue; 2192 2193 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; 2194 get_cmd->aq_common_descriptor.flags = 0; 2195 get_cmd->type = type; 2196 2197 ret = ena_com_execute_admin_command(admin_queue, 2198 (struct ena_admin_aq_entry *)get_cmd, 2199 sizeof(*get_cmd), 2200 (struct ena_admin_acq_entry *)get_resp, 2201 sizeof(*get_resp)); 2202 2203 if (unlikely(ret)) 2204 pr_err("Failed to get stats. error: %d\n", ret); 2205 2206 return ret; 2207 } 2208 2209 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, 2210 struct ena_admin_basic_stats *stats) 2211 { 2212 struct ena_com_stats_ctx ctx; 2213 int ret; 2214 2215 memset(&ctx, 0x0, sizeof(ctx)); 2216 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); 2217 if (likely(ret == 0)) 2218 memcpy(stats, &ctx.get_resp.basic_stats, 2219 sizeof(ctx.get_resp.basic_stats)); 2220 2221 return ret; 2222 } 2223 2224 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) 2225 { 2226 struct ena_com_admin_queue *admin_queue; 2227 struct ena_admin_set_feat_cmd cmd; 2228 struct ena_admin_set_feat_resp resp; 2229 int ret; 2230 2231 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { 2232 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU); 2233 return -EOPNOTSUPP; 2234 } 2235 2236 memset(&cmd, 0x0, sizeof(cmd)); 2237 admin_queue = &ena_dev->admin_queue; 2238 2239 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2240 cmd.aq_common_descriptor.flags = 0; 2241 cmd.feat_common.feature_id = ENA_ADMIN_MTU; 2242 cmd.u.mtu.mtu = mtu; 2243 2244 ret = ena_com_execute_admin_command(admin_queue, 2245 (struct ena_admin_aq_entry *)&cmd, 2246 sizeof(cmd), 2247 (struct ena_admin_acq_entry *)&resp, 2248 sizeof(resp)); 2249 2250 if (unlikely(ret)) 2251 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret); 2252 2253 return ret; 2254 } 2255 2256 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, 2257 struct ena_admin_feature_offload_desc *offload) 2258 { 2259 int ret; 2260 struct ena_admin_get_feat_resp resp; 2261 2262 ret = ena_com_get_feature(ena_dev, &resp, 2263 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); 2264 if (unlikely(ret)) { 2265 pr_err("Failed to get offload capabilities %d\n", ret); 2266 return ret; 2267 } 2268 2269 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); 2270 2271 return 0; 2272 } 2273 2274 int ena_com_set_hash_function(struct ena_com_dev *ena_dev) 2275 { 2276 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2277 struct ena_rss *rss = &ena_dev->rss; 2278 struct ena_admin_set_feat_cmd cmd; 2279 struct ena_admin_set_feat_resp resp; 2280 struct ena_admin_get_feat_resp get_resp; 2281 int ret; 2282 2283 if (!ena_com_check_supported_feature_id(ena_dev, 2284 ENA_ADMIN_RSS_HASH_FUNCTION)) { 2285 pr_debug("Feature %d isn't supported\n", 2286 ENA_ADMIN_RSS_HASH_FUNCTION); 2287 return -EOPNOTSUPP; 2288 } 2289 2290 /* Validate hash function is supported */ 2291 ret = ena_com_get_feature(ena_dev, &get_resp, 2292 ENA_ADMIN_RSS_HASH_FUNCTION, 0); 2293 if (unlikely(ret)) 2294 return ret; 2295 2296 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { 2297 pr_err("Func hash %d isn't supported by device, abort\n", 2298 rss->hash_func); 2299 return -EOPNOTSUPP; 2300 } 2301 2302 memset(&cmd, 0x0, sizeof(cmd)); 2303 2304 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2305 cmd.aq_common_descriptor.flags = 2306 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2307 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; 2308 cmd.u.flow_hash_func.init_val = rss->hash_init_val; 2309 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; 2310 2311 ret = ena_com_mem_addr_set(ena_dev, 2312 &cmd.control_buffer.address, 2313 rss->hash_key_dma_addr); 2314 if (unlikely(ret)) { 2315 pr_err("memory address set failed\n"); 2316 return ret; 2317 } 2318 2319 cmd.control_buffer.length = sizeof(*rss->hash_key); 2320 2321 ret = ena_com_execute_admin_command(admin_queue, 2322 (struct ena_admin_aq_entry *)&cmd, 2323 sizeof(cmd), 2324 (struct ena_admin_acq_entry *)&resp, 2325 sizeof(resp)); 2326 if (unlikely(ret)) { 2327 pr_err("Failed to set hash function %d. error: %d\n", 2328 rss->hash_func, ret); 2329 return -EINVAL; 2330 } 2331 2332 return 0; 2333 } 2334 2335 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, 2336 enum ena_admin_hash_functions func, 2337 const u8 *key, u16 key_len, u32 init_val) 2338 { 2339 struct ena_rss *rss = &ena_dev->rss; 2340 struct ena_admin_get_feat_resp get_resp; 2341 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2342 rss->hash_key; 2343 int rc; 2344 2345 /* Make sure size is a mult of DWs */ 2346 if (unlikely(key_len & 0x3)) 2347 return -EINVAL; 2348 2349 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2350 ENA_ADMIN_RSS_HASH_FUNCTION, 2351 rss->hash_key_dma_addr, 2352 sizeof(*rss->hash_key), 0); 2353 if (unlikely(rc)) 2354 return rc; 2355 2356 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { 2357 pr_err("Flow hash function %d isn't supported\n", func); 2358 return -EOPNOTSUPP; 2359 } 2360 2361 switch (func) { 2362 case ENA_ADMIN_TOEPLITZ: 2363 if (key_len > sizeof(hash_key->key)) { 2364 pr_err("key len (%hu) is bigger than the max supported (%zu)\n", 2365 key_len, sizeof(hash_key->key)); 2366 return -EINVAL; 2367 } 2368 2369 memcpy(hash_key->key, key, key_len); 2370 rss->hash_init_val = init_val; 2371 hash_key->keys_num = key_len >> 2; 2372 break; 2373 case ENA_ADMIN_CRC32: 2374 rss->hash_init_val = init_val; 2375 break; 2376 default: 2377 pr_err("Invalid hash function (%d)\n", func); 2378 return -EINVAL; 2379 } 2380 2381 rss->hash_func = func; 2382 rc = ena_com_set_hash_function(ena_dev); 2383 2384 /* Restore the old function */ 2385 if (unlikely(rc)) 2386 ena_com_get_hash_function(ena_dev, NULL, NULL); 2387 2388 return rc; 2389 } 2390 2391 int ena_com_get_hash_function(struct ena_com_dev *ena_dev, 2392 enum ena_admin_hash_functions *func, 2393 u8 *key) 2394 { 2395 struct ena_rss *rss = &ena_dev->rss; 2396 struct ena_admin_get_feat_resp get_resp; 2397 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2398 rss->hash_key; 2399 int rc; 2400 2401 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2402 ENA_ADMIN_RSS_HASH_FUNCTION, 2403 rss->hash_key_dma_addr, 2404 sizeof(*rss->hash_key), 0); 2405 if (unlikely(rc)) 2406 return rc; 2407 2408 rss->hash_func = get_resp.u.flow_hash_func.selected_func; 2409 if (func) 2410 *func = rss->hash_func; 2411 2412 if (key) 2413 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); 2414 2415 return 0; 2416 } 2417 2418 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, 2419 enum ena_admin_flow_hash_proto proto, 2420 u16 *fields) 2421 { 2422 struct ena_rss *rss = &ena_dev->rss; 2423 struct ena_admin_get_feat_resp get_resp; 2424 int rc; 2425 2426 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2427 ENA_ADMIN_RSS_HASH_INPUT, 2428 rss->hash_ctrl_dma_addr, 2429 sizeof(*rss->hash_ctrl), 0); 2430 if (unlikely(rc)) 2431 return rc; 2432 2433 if (fields) 2434 *fields = rss->hash_ctrl->selected_fields[proto].fields; 2435 2436 return 0; 2437 } 2438 2439 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) 2440 { 2441 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2442 struct ena_rss *rss = &ena_dev->rss; 2443 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2444 struct ena_admin_set_feat_cmd cmd; 2445 struct ena_admin_set_feat_resp resp; 2446 int ret; 2447 2448 if (!ena_com_check_supported_feature_id(ena_dev, 2449 ENA_ADMIN_RSS_HASH_INPUT)) { 2450 pr_debug("Feature %d isn't supported\n", 2451 ENA_ADMIN_RSS_HASH_INPUT); 2452 return -EOPNOTSUPP; 2453 } 2454 2455 memset(&cmd, 0x0, sizeof(cmd)); 2456 2457 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2458 cmd.aq_common_descriptor.flags = 2459 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2460 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; 2461 cmd.u.flow_hash_input.enabled_input_sort = 2462 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | 2463 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; 2464 2465 ret = ena_com_mem_addr_set(ena_dev, 2466 &cmd.control_buffer.address, 2467 rss->hash_ctrl_dma_addr); 2468 if (unlikely(ret)) { 2469 pr_err("memory address set failed\n"); 2470 return ret; 2471 } 2472 cmd.control_buffer.length = sizeof(*hash_ctrl); 2473 2474 ret = ena_com_execute_admin_command(admin_queue, 2475 (struct ena_admin_aq_entry *)&cmd, 2476 sizeof(cmd), 2477 (struct ena_admin_acq_entry *)&resp, 2478 sizeof(resp)); 2479 if (unlikely(ret)) 2480 pr_err("Failed to set hash input. error: %d\n", ret); 2481 2482 return ret; 2483 } 2484 2485 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) 2486 { 2487 struct ena_rss *rss = &ena_dev->rss; 2488 struct ena_admin_feature_rss_hash_control *hash_ctrl = 2489 rss->hash_ctrl; 2490 u16 available_fields = 0; 2491 int rc, i; 2492 2493 /* Get the supported hash input */ 2494 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2495 if (unlikely(rc)) 2496 return rc; 2497 2498 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = 2499 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2500 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2501 2502 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = 2503 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2504 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2505 2506 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = 2507 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2508 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2509 2510 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = 2511 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2512 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2513 2514 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = 2515 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2516 2517 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = 2518 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2519 2520 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = 2521 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2522 2523 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = 2524 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; 2525 2526 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { 2527 available_fields = hash_ctrl->selected_fields[i].fields & 2528 hash_ctrl->supported_fields[i].fields; 2529 if (available_fields != hash_ctrl->selected_fields[i].fields) { 2530 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", 2531 i, hash_ctrl->supported_fields[i].fields, 2532 hash_ctrl->selected_fields[i].fields); 2533 return -EOPNOTSUPP; 2534 } 2535 } 2536 2537 rc = ena_com_set_hash_ctrl(ena_dev); 2538 2539 /* In case of failure, restore the old hash ctrl */ 2540 if (unlikely(rc)) 2541 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2542 2543 return rc; 2544 } 2545 2546 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, 2547 enum ena_admin_flow_hash_proto proto, 2548 u16 hash_fields) 2549 { 2550 struct ena_rss *rss = &ena_dev->rss; 2551 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2552 u16 supported_fields; 2553 int rc; 2554 2555 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { 2556 pr_err("Invalid proto num (%u)\n", proto); 2557 return -EINVAL; 2558 } 2559 2560 /* Get the ctrl table */ 2561 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); 2562 if (unlikely(rc)) 2563 return rc; 2564 2565 /* Make sure all the fields are supported */ 2566 supported_fields = hash_ctrl->supported_fields[proto].fields; 2567 if ((hash_fields & supported_fields) != hash_fields) { 2568 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n", 2569 proto, hash_fields, supported_fields); 2570 } 2571 2572 hash_ctrl->selected_fields[proto].fields = hash_fields; 2573 2574 rc = ena_com_set_hash_ctrl(ena_dev); 2575 2576 /* In case of failure, restore the old hash ctrl */ 2577 if (unlikely(rc)) 2578 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2579 2580 return 0; 2581 } 2582 2583 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, 2584 u16 entry_idx, u16 entry_value) 2585 { 2586 struct ena_rss *rss = &ena_dev->rss; 2587 2588 if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) 2589 return -EINVAL; 2590 2591 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) 2592 return -EINVAL; 2593 2594 rss->host_rss_ind_tbl[entry_idx] = entry_value; 2595 2596 return 0; 2597 } 2598 2599 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) 2600 { 2601 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2602 struct ena_rss *rss = &ena_dev->rss; 2603 struct ena_admin_set_feat_cmd cmd; 2604 struct ena_admin_set_feat_resp resp; 2605 int ret; 2606 2607 if (!ena_com_check_supported_feature_id( 2608 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { 2609 pr_debug("Feature %d isn't supported\n", 2610 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); 2611 return -EOPNOTSUPP; 2612 } 2613 2614 ret = ena_com_ind_tbl_convert_to_device(ena_dev); 2615 if (ret) { 2616 pr_err("Failed to convert host indirection table to device table\n"); 2617 return ret; 2618 } 2619 2620 memset(&cmd, 0x0, sizeof(cmd)); 2621 2622 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2623 cmd.aq_common_descriptor.flags = 2624 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2625 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG; 2626 cmd.u.ind_table.size = rss->tbl_log_size; 2627 cmd.u.ind_table.inline_index = 0xFFFFFFFF; 2628 2629 ret = ena_com_mem_addr_set(ena_dev, 2630 &cmd.control_buffer.address, 2631 rss->rss_ind_tbl_dma_addr); 2632 if (unlikely(ret)) { 2633 pr_err("memory address set failed\n"); 2634 return ret; 2635 } 2636 2637 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * 2638 sizeof(struct ena_admin_rss_ind_table_entry); 2639 2640 ret = ena_com_execute_admin_command(admin_queue, 2641 (struct ena_admin_aq_entry *)&cmd, 2642 sizeof(cmd), 2643 (struct ena_admin_acq_entry *)&resp, 2644 sizeof(resp)); 2645 2646 if (unlikely(ret)) 2647 pr_err("Failed to set indirect table. error: %d\n", ret); 2648 2649 return ret; 2650 } 2651 2652 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) 2653 { 2654 struct ena_rss *rss = &ena_dev->rss; 2655 struct ena_admin_get_feat_resp get_resp; 2656 u32 tbl_size; 2657 int i, rc; 2658 2659 tbl_size = (1ULL << rss->tbl_log_size) * 2660 sizeof(struct ena_admin_rss_ind_table_entry); 2661 2662 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2663 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 2664 rss->rss_ind_tbl_dma_addr, 2665 tbl_size, 0); 2666 if (unlikely(rc)) 2667 return rc; 2668 2669 if (!ind_tbl) 2670 return 0; 2671 2672 rc = ena_com_ind_tbl_convert_from_device(ena_dev); 2673 if (unlikely(rc)) 2674 return rc; 2675 2676 for (i = 0; i < (1 << rss->tbl_log_size); i++) 2677 ind_tbl[i] = rss->host_rss_ind_tbl[i]; 2678 2679 return 0; 2680 } 2681 2682 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) 2683 { 2684 int rc; 2685 2686 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2687 2688 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); 2689 if (unlikely(rc)) 2690 goto err_indr_tbl; 2691 2692 rc = ena_com_hash_key_allocate(ena_dev); 2693 if (unlikely(rc)) 2694 goto err_hash_key; 2695 2696 rc = ena_com_hash_ctrl_init(ena_dev); 2697 if (unlikely(rc)) 2698 goto err_hash_ctrl; 2699 2700 return 0; 2701 2702 err_hash_ctrl: 2703 ena_com_hash_key_destroy(ena_dev); 2704 err_hash_key: 2705 ena_com_indirect_table_destroy(ena_dev); 2706 err_indr_tbl: 2707 2708 return rc; 2709 } 2710 2711 void ena_com_rss_destroy(struct ena_com_dev *ena_dev) 2712 { 2713 ena_com_indirect_table_destroy(ena_dev); 2714 ena_com_hash_key_destroy(ena_dev); 2715 ena_com_hash_ctrl_destroy(ena_dev); 2716 2717 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2718 } 2719 2720 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) 2721 { 2722 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2723 2724 host_attr->host_info = 2725 dma_alloc_coherent(ena_dev->dmadev, SZ_4K, 2726 &host_attr->host_info_dma_addr, GFP_KERNEL); 2727 if (unlikely(!host_attr->host_info)) 2728 return -ENOMEM; 2729 2730 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << 2731 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | 2732 (ENA_COMMON_SPEC_VERSION_MINOR)); 2733 2734 return 0; 2735 } 2736 2737 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, 2738 u32 debug_area_size) 2739 { 2740 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2741 2742 host_attr->debug_area_virt_addr = 2743 dma_alloc_coherent(ena_dev->dmadev, debug_area_size, 2744 &host_attr->debug_area_dma_addr, 2745 GFP_KERNEL); 2746 if (unlikely(!host_attr->debug_area_virt_addr)) { 2747 host_attr->debug_area_size = 0; 2748 return -ENOMEM; 2749 } 2750 2751 host_attr->debug_area_size = debug_area_size; 2752 2753 return 0; 2754 } 2755 2756 void ena_com_delete_host_info(struct ena_com_dev *ena_dev) 2757 { 2758 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2759 2760 if (host_attr->host_info) { 2761 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info, 2762 host_attr->host_info_dma_addr); 2763 host_attr->host_info = NULL; 2764 } 2765 } 2766 2767 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) 2768 { 2769 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2770 2771 if (host_attr->debug_area_virt_addr) { 2772 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size, 2773 host_attr->debug_area_virt_addr, 2774 host_attr->debug_area_dma_addr); 2775 host_attr->debug_area_virt_addr = NULL; 2776 } 2777 } 2778 2779 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) 2780 { 2781 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2782 struct ena_com_admin_queue *admin_queue; 2783 struct ena_admin_set_feat_cmd cmd; 2784 struct ena_admin_set_feat_resp resp; 2785 2786 int ret; 2787 2788 /* Host attribute config is called before ena_com_get_dev_attr_feat 2789 * so ena_com can't check if the feature is supported. 2790 */ 2791 2792 memset(&cmd, 0x0, sizeof(cmd)); 2793 admin_queue = &ena_dev->admin_queue; 2794 2795 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2796 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; 2797 2798 ret = ena_com_mem_addr_set(ena_dev, 2799 &cmd.u.host_attr.debug_ba, 2800 host_attr->debug_area_dma_addr); 2801 if (unlikely(ret)) { 2802 pr_err("memory address set failed\n"); 2803 return ret; 2804 } 2805 2806 ret = ena_com_mem_addr_set(ena_dev, 2807 &cmd.u.host_attr.os_info_ba, 2808 host_attr->host_info_dma_addr); 2809 if (unlikely(ret)) { 2810 pr_err("memory address set failed\n"); 2811 return ret; 2812 } 2813 2814 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; 2815 2816 ret = ena_com_execute_admin_command(admin_queue, 2817 (struct ena_admin_aq_entry *)&cmd, 2818 sizeof(cmd), 2819 (struct ena_admin_acq_entry *)&resp, 2820 sizeof(resp)); 2821 2822 if (unlikely(ret)) 2823 pr_err("Failed to set host attributes: %d\n", ret); 2824 2825 return ret; 2826 } 2827 2828 /* Interrupt moderation */ 2829 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) 2830 { 2831 return ena_com_check_supported_feature_id(ena_dev, 2832 ENA_ADMIN_INTERRUPT_MODERATION); 2833 } 2834 2835 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, 2836 u32 tx_coalesce_usecs) 2837 { 2838 if (!ena_dev->intr_delay_resolution) { 2839 pr_err("Illegal interrupt delay granularity value\n"); 2840 return -EFAULT; 2841 } 2842 2843 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs / 2844 ena_dev->intr_delay_resolution; 2845 2846 return 0; 2847 } 2848 2849 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, 2850 u32 rx_coalesce_usecs) 2851 { 2852 if (!ena_dev->intr_delay_resolution) { 2853 pr_err("Illegal interrupt delay granularity value\n"); 2854 return -EFAULT; 2855 } 2856 2857 /* We use LOWEST entry of moderation table for storing 2858 * nonadaptive interrupt coalescing values 2859 */ 2860 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = 2861 rx_coalesce_usecs / ena_dev->intr_delay_resolution; 2862 2863 return 0; 2864 } 2865 2866 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev) 2867 { 2868 if (ena_dev->intr_moder_tbl) 2869 devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl); 2870 ena_dev->intr_moder_tbl = NULL; 2871 } 2872 2873 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) 2874 { 2875 struct ena_admin_get_feat_resp get_resp; 2876 u16 delay_resolution; 2877 int rc; 2878 2879 rc = ena_com_get_feature(ena_dev, &get_resp, 2880 ENA_ADMIN_INTERRUPT_MODERATION, 0); 2881 2882 if (rc) { 2883 if (rc == -EOPNOTSUPP) { 2884 pr_debug("Feature %d isn't supported\n", 2885 ENA_ADMIN_INTERRUPT_MODERATION); 2886 rc = 0; 2887 } else { 2888 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n", 2889 rc); 2890 } 2891 2892 /* no moderation supported, disable adaptive support */ 2893 ena_com_disable_adaptive_moderation(ena_dev); 2894 return rc; 2895 } 2896 2897 rc = ena_com_init_interrupt_moderation_table(ena_dev); 2898 if (rc) 2899 goto err; 2900 2901 /* if moderation is supported by device we set adaptive moderation */ 2902 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; 2903 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); 2904 2905 /* Disable adaptive moderation by default - can be enabled from 2906 * ethtool 2907 */ 2908 ena_com_disable_adaptive_moderation(ena_dev); 2909 2910 return 0; 2911 err: 2912 ena_com_destroy_interrupt_moderation(ena_dev); 2913 return rc; 2914 } 2915 2916 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) 2917 { 2918 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2919 2920 if (!intr_moder_tbl) 2921 return; 2922 2923 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = 2924 ENA_INTR_LOWEST_USECS; 2925 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval = 2926 ENA_INTR_LOWEST_PKTS; 2927 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval = 2928 ENA_INTR_LOWEST_BYTES; 2929 2930 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval = 2931 ENA_INTR_LOW_USECS; 2932 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval = 2933 ENA_INTR_LOW_PKTS; 2934 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval = 2935 ENA_INTR_LOW_BYTES; 2936 2937 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval = 2938 ENA_INTR_MID_USECS; 2939 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval = 2940 ENA_INTR_MID_PKTS; 2941 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval = 2942 ENA_INTR_MID_BYTES; 2943 2944 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval = 2945 ENA_INTR_HIGH_USECS; 2946 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval = 2947 ENA_INTR_HIGH_PKTS; 2948 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval = 2949 ENA_INTR_HIGH_BYTES; 2950 2951 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval = 2952 ENA_INTR_HIGHEST_USECS; 2953 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval = 2954 ENA_INTR_HIGHEST_PKTS; 2955 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval = 2956 ENA_INTR_HIGHEST_BYTES; 2957 } 2958 2959 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) 2960 { 2961 return ena_dev->intr_moder_tx_interval; 2962 } 2963 2964 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) 2965 { 2966 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2967 2968 if (intr_moder_tbl) 2969 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval; 2970 2971 return 0; 2972 } 2973 2974 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, 2975 enum ena_intr_moder_level level, 2976 struct ena_intr_moder_entry *entry) 2977 { 2978 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2979 2980 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) 2981 return; 2982 2983 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval; 2984 if (ena_dev->intr_delay_resolution) 2985 intr_moder_tbl[level].intr_moder_interval /= 2986 ena_dev->intr_delay_resolution; 2987 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval; 2988 2989 /* use hardcoded value until ethtool supports bytecount parameter */ 2990 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED) 2991 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; 2992 } 2993 2994 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, 2995 enum ena_intr_moder_level level, 2996 struct ena_intr_moder_entry *entry) 2997 { 2998 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; 2999 3000 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) 3001 return; 3002 3003 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval; 3004 if (ena_dev->intr_delay_resolution) 3005 entry->intr_moder_interval *= ena_dev->intr_delay_resolution; 3006 entry->pkts_per_interval = 3007 intr_moder_tbl[level].pkts_per_interval; 3008 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; 3009 } 3010 3011 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, 3012 struct ena_admin_feature_llq_desc *llq_features, 3013 struct ena_llq_configurations *llq_default_cfg) 3014 { 3015 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 3016 int rc; 3017 3018 if (!llq_features->max_llq_num) { 3019 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 3020 return 0; 3021 } 3022 3023 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); 3024 if (rc) 3025 return rc; 3026 3027 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size - 3028 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); 3029 3030 if (unlikely(ena_dev->tx_max_header_size == 0)) { 3031 pr_err("the size of the LLQ entry is smaller than needed\n"); 3032 return -EINVAL; 3033 } 3034 3035 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; 3036 3037 return 0; 3038 } 3039