Lines Matching +full:supports +full:- +full:cqe

1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
11 /* Timeout in micro-sec */
51 /* Abort - canceled by the driver */
75 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { in ena_com_mem_addr_set()
76 netdev_err(ena_dev->net_device, in ena_com_mem_addr_set()
77 "DMA address has more bits that the device supports\n"); in ena_com_mem_addr_set()
78 return -EINVAL; in ena_com_mem_addr_set()
81 ena_addr->mem_addr_low = lower_32_bits(addr); in ena_com_mem_addr_set()
82 ena_addr->mem_addr_high = (u16)upper_32_bits(addr); in ena_com_mem_addr_set()
89 struct ena_com_dev *ena_dev = admin_queue->ena_dev; in ena_com_admin_init_sq()
90 struct ena_com_admin_sq *sq = &admin_queue->sq; in ena_com_admin_init_sq()
91 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_sq()
93 sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL); in ena_com_admin_init_sq()
95 if (!sq->entries) { in ena_com_admin_init_sq()
96 netdev_err(ena_dev->net_device, "Memory allocation failed\n"); in ena_com_admin_init_sq()
97 return -ENOMEM; in ena_com_admin_init_sq()
100 sq->head = 0; in ena_com_admin_init_sq()
101 sq->tail = 0; in ena_com_admin_init_sq()
102 sq->phase = 1; in ena_com_admin_init_sq()
104 sq->db_addr = NULL; in ena_com_admin_init_sq()
111 struct ena_com_dev *ena_dev = admin_queue->ena_dev; in ena_com_admin_init_cq()
112 struct ena_com_admin_cq *cq = &admin_queue->cq; in ena_com_admin_init_cq()
113 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_cq()
115 cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL); in ena_com_admin_init_cq()
117 if (!cq->entries) { in ena_com_admin_init_cq()
118 netdev_err(ena_dev->net_device, "Memory allocation failed\n"); in ena_com_admin_init_cq()
119 return -ENOMEM; in ena_com_admin_init_cq()
122 cq->head = 0; in ena_com_admin_init_cq()
123 cq->phase = 1; in ena_com_admin_init_cq()
131 struct ena_com_aenq *aenq = &ena_dev->aenq; in ena_com_admin_init_aenq()
135 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; in ena_com_admin_init_aenq()
137 aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL); in ena_com_admin_init_aenq()
139 if (!aenq->entries) { in ena_com_admin_init_aenq()
140 netdev_err(ena_dev->net_device, "Memory allocation failed\n"); in ena_com_admin_init_aenq()
141 return -ENOMEM; in ena_com_admin_init_aenq()
144 aenq->head = aenq->q_depth; in ena_com_admin_init_aenq()
145 aenq->phase = 1; in ena_com_admin_init_aenq()
147 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); in ena_com_admin_init_aenq()
148 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); in ena_com_admin_init_aenq()
150 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); in ena_com_admin_init_aenq()
151 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); in ena_com_admin_init_aenq()
154 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; in ena_com_admin_init_aenq()
158 writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); in ena_com_admin_init_aenq()
161 netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n"); in ena_com_admin_init_aenq()
162 return -EINVAL; in ena_com_admin_init_aenq()
165 aenq->aenq_handlers = aenq_handlers; in ena_com_admin_init_aenq()
173 comp_ctx->occupied = false; in comp_ctxt_release()
174 atomic_dec(&queue->outstanding_cmds); in comp_ctxt_release()
180 if (unlikely(command_id >= admin_queue->q_depth)) { in get_comp_ctxt()
181 netdev_err(admin_queue->ena_dev->net_device, in get_comp_ctxt()
183 command_id, admin_queue->q_depth); in get_comp_ctxt()
187 if (unlikely(!admin_queue->comp_ctx)) { in get_comp_ctxt()
188 netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n"); in get_comp_ctxt()
192 if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) { in get_comp_ctxt()
193 netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n"); in get_comp_ctxt()
198 atomic_inc(&admin_queue->outstanding_cmds); in get_comp_ctxt()
199 admin_queue->comp_ctx[command_id].occupied = true; in get_comp_ctxt()
202 return &admin_queue->comp_ctx[command_id]; in get_comp_ctxt()
216 queue_size_mask = admin_queue->q_depth - 1; in __ena_com_submit_admin_cmd()
218 tail_masked = admin_queue->sq.tail & queue_size_mask; in __ena_com_submit_admin_cmd()
221 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); in __ena_com_submit_admin_cmd()
222 if (cnt >= admin_queue->q_depth) { in __ena_com_submit_admin_cmd()
223 netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n"); in __ena_com_submit_admin_cmd()
224 admin_queue->stats.out_of_space++; in __ena_com_submit_admin_cmd()
225 return ERR_PTR(-ENOSPC); in __ena_com_submit_admin_cmd()
228 cmd_id = admin_queue->curr_cmd_id; in __ena_com_submit_admin_cmd()
230 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & in __ena_com_submit_admin_cmd()
233 cmd->aq_common_descriptor.command_id |= cmd_id & in __ena_com_submit_admin_cmd()
238 return ERR_PTR(-EINVAL); in __ena_com_submit_admin_cmd()
240 comp_ctx->status = ENA_CMD_SUBMITTED; in __ena_com_submit_admin_cmd()
241 comp_ctx->comp_size = (u32)comp_size_in_bytes; in __ena_com_submit_admin_cmd()
242 comp_ctx->user_cqe = comp; in __ena_com_submit_admin_cmd()
243 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; in __ena_com_submit_admin_cmd()
245 reinit_completion(&comp_ctx->wait_event); in __ena_com_submit_admin_cmd()
247 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); in __ena_com_submit_admin_cmd()
249 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & in __ena_com_submit_admin_cmd()
252 admin_queue->sq.tail++; in __ena_com_submit_admin_cmd()
253 admin_queue->stats.submitted_cmd++; in __ena_com_submit_admin_cmd()
255 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) in __ena_com_submit_admin_cmd()
256 admin_queue->sq.phase = !admin_queue->sq.phase; in __ena_com_submit_admin_cmd()
258 writel(admin_queue->sq.tail, admin_queue->sq.db_addr); in __ena_com_submit_admin_cmd()
265 struct ena_com_dev *ena_dev = admin_queue->ena_dev; in ena_com_init_comp_ctxt()
266 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); in ena_com_init_comp_ctxt()
270 admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL); in ena_com_init_comp_ctxt()
271 if (unlikely(!admin_queue->comp_ctx)) { in ena_com_init_comp_ctxt()
272 netdev_err(ena_dev->net_device, "Memory allocation failed\n"); in ena_com_init_comp_ctxt()
273 return -ENOMEM; in ena_com_init_comp_ctxt()
276 for (i = 0; i < admin_queue->q_depth; i++) { in ena_com_init_comp_ctxt()
279 init_completion(&comp_ctx->wait_event); in ena_com_init_comp_ctxt()
294 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_submit_admin_cmd()
295 if (unlikely(!admin_queue->running_state)) { in ena_com_submit_admin_cmd()
296 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_submit_admin_cmd()
297 return ERR_PTR(-ENODEV); in ena_com_submit_admin_cmd()
304 admin_queue->running_state = false; in ena_com_submit_admin_cmd()
305 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_submit_admin_cmd()
316 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); in ena_com_init_io_sq()
318 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; in ena_com_init_io_sq()
319 io_sq->desc_entry_size = in ena_com_init_io_sq()
320 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? in ena_com_init_io_sq()
324 size = io_sq->desc_entry_size * io_sq->q_depth; in ena_com_init_io_sq()
326 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { in ena_com_init_io_sq()
327 io_sq->desc_addr.virt_addr = in ena_com_init_io_sq()
328 dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr, in ena_com_init_io_sq()
330 if (!io_sq->desc_addr.virt_addr) { in ena_com_init_io_sq()
331 io_sq->desc_addr.virt_addr = in ena_com_init_io_sq()
332 dma_alloc_coherent(ena_dev->dmadev, size, in ena_com_init_io_sq()
333 &io_sq->desc_addr.phys_addr, GFP_KERNEL); in ena_com_init_io_sq()
336 if (!io_sq->desc_addr.virt_addr) { in ena_com_init_io_sq()
337 netdev_err(ena_dev->net_device, "Memory allocation failed\n"); in ena_com_init_io_sq()
338 return -ENOMEM; in ena_com_init_io_sq()
342 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_com_init_io_sq()
344 io_sq->bounce_buf_ctrl.buffer_size = in ena_com_init_io_sq()
345 ena_dev->llq_info.desc_list_entry_size; in ena_com_init_io_sq()
346 io_sq->bounce_buf_ctrl.buffers_num = in ena_com_init_io_sq()
348 io_sq->bounce_buf_ctrl.next_to_use = 0; in ena_com_init_io_sq()
350 size = (size_t)io_sq->bounce_buf_ctrl.buffer_size * in ena_com_init_io_sq()
351 io_sq->bounce_buf_ctrl.buffers_num; in ena_com_init_io_sq()
353 io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); in ena_com_init_io_sq()
354 if (!io_sq->bounce_buf_ctrl.base_buffer) in ena_com_init_io_sq()
355 io_sq->bounce_buf_ctrl.base_buffer = in ena_com_init_io_sq()
356 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); in ena_com_init_io_sq()
358 if (!io_sq->bounce_buf_ctrl.base_buffer) { in ena_com_init_io_sq()
359 netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n"); in ena_com_init_io_sq()
360 return -ENOMEM; in ena_com_init_io_sq()
363 memcpy(&io_sq->llq_info, &ena_dev->llq_info, in ena_com_init_io_sq()
364 sizeof(io_sq->llq_info)); in ena_com_init_io_sq()
367 io_sq->llq_buf_ctrl.curr_bounce_buf = in ena_com_init_io_sq()
368 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); in ena_com_init_io_sq()
369 memset(io_sq->llq_buf_ctrl.curr_bounce_buf, in ena_com_init_io_sq()
370 0x0, io_sq->llq_info.desc_list_entry_size); in ena_com_init_io_sq()
371 io_sq->llq_buf_ctrl.descs_left_in_line = in ena_com_init_io_sq()
372 io_sq->llq_info.descs_num_before_header; in ena_com_init_io_sq()
373 io_sq->disable_meta_caching = in ena_com_init_io_sq()
374 io_sq->llq_info.disable_meta_caching; in ena_com_init_io_sq()
376 if (io_sq->llq_info.max_entries_in_tx_burst > 0) in ena_com_init_io_sq()
377 io_sq->entries_in_tx_burst_left = in ena_com_init_io_sq()
378 io_sq->llq_info.max_entries_in_tx_burst; in ena_com_init_io_sq()
381 io_sq->tail = 0; in ena_com_init_io_sq()
382 io_sq->next_to_comp = 0; in ena_com_init_io_sq()
383 io_sq->phase = 1; in ena_com_init_io_sq()
394 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); in ena_com_init_io_cq()
397 io_cq->cdesc_entry_size_in_bytes = in ena_com_init_io_cq()
398 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? in ena_com_init_io_cq()
402 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; in ena_com_init_io_cq()
404 io_cq->cdesc_addr.virt_addr = in ena_com_init_io_cq()
405 dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); in ena_com_init_io_cq()
406 if (!io_cq->cdesc_addr.virt_addr) { in ena_com_init_io_cq()
407 io_cq->cdesc_addr.virt_addr = in ena_com_init_io_cq()
408 dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, in ena_com_init_io_cq()
412 if (!io_cq->cdesc_addr.virt_addr) { in ena_com_init_io_cq()
413 netdev_err(ena_dev->net_device, "Memory allocation failed\n"); in ena_com_init_io_cq()
414 return -ENOMEM; in ena_com_init_io_cq()
417 io_cq->phase = 1; in ena_com_init_io_cq()
418 io_cq->head = 0; in ena_com_init_io_cq()
424 struct ena_admin_acq_entry *cqe) in ena_com_handle_single_admin_completion() argument
429 cmd_id = cqe->acq_common_descriptor.command & in ena_com_handle_single_admin_completion()
434 netdev_err(admin_queue->ena_dev->net_device, in ena_com_handle_single_admin_completion()
436 admin_queue->running_state = false; in ena_com_handle_single_admin_completion()
440 comp_ctx->status = ENA_CMD_COMPLETED; in ena_com_handle_single_admin_completion()
441 comp_ctx->comp_status = cqe->acq_common_descriptor.status; in ena_com_handle_single_admin_completion()
443 if (comp_ctx->user_cqe) in ena_com_handle_single_admin_completion()
444 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); in ena_com_handle_single_admin_completion()
446 if (!admin_queue->polling) in ena_com_handle_single_admin_completion()
447 complete(&comp_ctx->wait_event); in ena_com_handle_single_admin_completion()
452 struct ena_admin_acq_entry *cqe = NULL; in ena_com_handle_admin_completion() local
457 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); in ena_com_handle_admin_completion()
458 phase = admin_queue->cq.phase; in ena_com_handle_admin_completion()
460 cqe = &admin_queue->cq.entries[head_masked]; in ena_com_handle_admin_completion()
463 while ((READ_ONCE(cqe->acq_common_descriptor.flags) & in ena_com_handle_admin_completion()
469 ena_com_handle_single_admin_completion(admin_queue, cqe); in ena_com_handle_admin_completion()
473 if (unlikely(head_masked == admin_queue->q_depth)) { in ena_com_handle_admin_completion()
478 cqe = &admin_queue->cq.entries[head_masked]; in ena_com_handle_admin_completion()
481 admin_queue->cq.head += comp_num; in ena_com_handle_admin_completion()
482 admin_queue->cq.phase = phase; in ena_com_handle_admin_completion()
483 admin_queue->sq.head += comp_num; in ena_com_handle_admin_completion()
484 admin_queue->stats.completed_cmd += comp_num; in ena_com_handle_admin_completion()
491 netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n", in ena_com_comp_status_to_errno()
498 return -ENOMEM; in ena_com_comp_status_to_errno()
500 return -EOPNOTSUPP; in ena_com_comp_status_to_errno()
505 return -EINVAL; in ena_com_comp_status_to_errno()
507 return -EAGAIN; in ena_com_comp_status_to_errno()
510 return -EINVAL; in ena_com_comp_status_to_errno()
529 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); in ena_com_wait_and_process_admin_cq_polling()
532 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_polling()
534 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_polling()
536 if (comp_ctx->status != ENA_CMD_SUBMITTED) in ena_com_wait_and_process_admin_cq_polling()
540 netdev_err(admin_queue->ena_dev->net_device, in ena_com_wait_and_process_admin_cq_polling()
543 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_polling()
544 admin_queue->stats.no_completion++; in ena_com_wait_and_process_admin_cq_polling()
545 admin_queue->running_state = false; in ena_com_wait_and_process_admin_cq_polling()
546 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_polling()
548 ret = -ETIME; in ena_com_wait_and_process_admin_cq_polling()
553 admin_queue->ena_dev->ena_min_poll_delay_us); in ena_com_wait_and_process_admin_cq_polling()
556 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { in ena_com_wait_and_process_admin_cq_polling()
557 netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n"); in ena_com_wait_and_process_admin_cq_polling()
558 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_polling()
559 admin_queue->stats.aborted_cmd++; in ena_com_wait_and_process_admin_cq_polling()
560 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_polling()
561 ret = -ENODEV; in ena_com_wait_and_process_admin_cq_polling()
565 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status); in ena_com_wait_and_process_admin_cq_polling()
567 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); in ena_com_wait_and_process_admin_cq_polling()
584 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; in ena_com_set_llq()
588 admin_queue = &ena_dev->admin_queue; in ena_com_set_llq()
593 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; in ena_com_set_llq()
594 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; in ena_com_set_llq()
595 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; in ena_com_set_llq()
596 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; in ena_com_set_llq()
609 netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret); in ena_com_set_llq()
618 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; in ena_com_config_llq_info()
625 supported_feat = llq_features->header_location_ctrl_supported; in ena_com_config_llq_info()
627 if (likely(supported_feat & llq_default_cfg->llq_header_location)) { in ena_com_config_llq_info()
628 llq_info->header_location_ctrl = in ena_com_config_llq_info()
629 llq_default_cfg->llq_header_location; in ena_com_config_llq_info()
631 netdev_err(ena_dev->net_device, in ena_com_config_llq_info()
633 return -EINVAL; in ena_com_config_llq_info()
636 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { in ena_com_config_llq_info()
637 supported_feat = llq_features->descriptors_stride_ctrl_supported; in ena_com_config_llq_info()
638 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { in ena_com_config_llq_info()
639 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; in ena_com_config_llq_info()
642 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; in ena_com_config_llq_info()
644 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; in ena_com_config_llq_info()
646 netdev_err(ena_dev->net_device, in ena_com_config_llq_info()
649 return -EINVAL; in ena_com_config_llq_info()
652 netdev_err(ena_dev->net_device, in ena_com_config_llq_info()
654 llq_default_cfg->llq_stride_ctrl, supported_feat, in ena_com_config_llq_info()
655 llq_info->desc_stride_ctrl); in ena_com_config_llq_info()
658 llq_info->desc_stride_ctrl = 0; in ena_com_config_llq_info()
661 supported_feat = llq_features->entry_size_ctrl_supported; in ena_com_config_llq_info()
662 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { in ena_com_config_llq_info()
663 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; in ena_com_config_llq_info()
664 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; in ena_com_config_llq_info()
667 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; in ena_com_config_llq_info()
668 llq_info->desc_list_entry_size = 128; in ena_com_config_llq_info()
670 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; in ena_com_config_llq_info()
671 llq_info->desc_list_entry_size = 192; in ena_com_config_llq_info()
673 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; in ena_com_config_llq_info()
674 llq_info->desc_list_entry_size = 256; in ena_com_config_llq_info()
676 netdev_err(ena_dev->net_device, in ena_com_config_llq_info()
678 return -EINVAL; in ena_com_config_llq_info()
681 netdev_err(ena_dev->net_device, in ena_com_config_llq_info()
683 llq_default_cfg->llq_ring_entry_size, supported_feat, in ena_com_config_llq_info()
684 llq_info->desc_list_entry_size); in ena_com_config_llq_info()
686 if (unlikely(llq_info->desc_list_entry_size & 0x7)) { in ena_com_config_llq_info()
690 netdev_err(ena_dev->net_device, "Illegal entry size %d\n", in ena_com_config_llq_info()
691 llq_info->desc_list_entry_size); in ena_com_config_llq_info()
692 return -EINVAL; in ena_com_config_llq_info()
695 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) in ena_com_config_llq_info()
696 llq_info->descs_per_entry = llq_info->desc_list_entry_size / in ena_com_config_llq_info()
699 llq_info->descs_per_entry = 1; in ena_com_config_llq_info()
701 supported_feat = llq_features->desc_num_before_header_supported; in ena_com_config_llq_info()
702 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { in ena_com_config_llq_info()
703 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; in ena_com_config_llq_info()
706 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; in ena_com_config_llq_info()
708 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; in ena_com_config_llq_info()
710 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; in ena_com_config_llq_info()
712 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; in ena_com_config_llq_info()
714 netdev_err(ena_dev->net_device, in ena_com_config_llq_info()
717 return -EINVAL; in ena_com_config_llq_info()
720 netdev_err(ena_dev->net_device, in ena_com_config_llq_info()
722 llq_default_cfg->llq_num_decs_before_header, supported_feat, in ena_com_config_llq_info()
723 llq_info->descs_num_before_header); in ena_com_config_llq_info()
726 llq_accel_mode_get = llq_features->accel_mode.u.get; in ena_com_config_llq_info()
728 llq_info->disable_meta_caching = in ena_com_config_llq_info()
733 llq_info->max_entries_in_tx_burst = in ena_com_config_llq_info()
735 llq_default_cfg->llq_ring_entry_size_value; in ena_com_config_llq_info()
739 netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc); in ena_com_config_llq_info()
750 wait_for_completion_timeout(&comp_ctx->wait_event, in ena_com_wait_and_process_admin_cq_interrupts()
751 usecs_to_jiffies(admin_queue->completion_timeout)); in ena_com_wait_and_process_admin_cq_interrupts()
756 * 2) There is completion but the device didn't get any msi-x interrupt. in ena_com_wait_and_process_admin_cq_interrupts()
758 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { in ena_com_wait_and_process_admin_cq_interrupts()
759 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_interrupts()
761 admin_queue->stats.no_completion++; in ena_com_wait_and_process_admin_cq_interrupts()
762 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_wait_and_process_admin_cq_interrupts()
764 if (comp_ctx->status == ENA_CMD_COMPLETED) { in ena_com_wait_and_process_admin_cq_interrupts()
765 netdev_err(admin_queue->ena_dev->net_device, in ena_com_wait_and_process_admin_cq_interrupts()
766 … "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d)\n", in ena_com_wait_and_process_admin_cq_interrupts()
767 comp_ctx->cmd_opcode); in ena_com_wait_and_process_admin_cq_interrupts()
769 netdev_err(admin_queue->ena_dev->net_device, in ena_com_wait_and_process_admin_cq_interrupts()
771 comp_ctx->cmd_opcode, comp_ctx->status); in ena_com_wait_and_process_admin_cq_interrupts()
773 admin_queue->running_state = false; in ena_com_wait_and_process_admin_cq_interrupts()
774 ret = -ETIME; in ena_com_wait_and_process_admin_cq_interrupts()
778 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); in ena_com_wait_and_process_admin_cq_interrupts()
790 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; in ena_com_reg_bar_read32()
792 mmio_read->read_resp; in ena_com_reg_bar_read32()
795 u32 timeout = mmio_read->reg_read_to; in ena_com_reg_bar_read32()
803 if (!mmio_read->readless_supported) in ena_com_reg_bar_read32()
804 return readl(ena_dev->reg_bar + offset); in ena_com_reg_bar_read32()
806 spin_lock_irqsave(&mmio_read->lock, flags); in ena_com_reg_bar_read32()
807 mmio_read->seq_num++; in ena_com_reg_bar_read32()
809 read_resp->req_id = mmio_read->seq_num + 0xDEAD; in ena_com_reg_bar_read32()
812 mmio_read_reg |= mmio_read->seq_num & in ena_com_reg_bar_read32()
815 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); in ena_com_reg_bar_read32()
818 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) in ena_com_reg_bar_read32()
825 netdev_err(ena_dev->net_device, in ena_com_reg_bar_read32()
827 mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off); in ena_com_reg_bar_read32()
832 if (read_resp->reg_off != offset) { in ena_com_reg_bar_read32()
833 netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n"); in ena_com_reg_bar_read32()
836 ret = read_resp->reg_val; in ena_com_reg_bar_read32()
839 spin_unlock_irqrestore(&mmio_read->lock, flags); in ena_com_reg_bar_read32()
845 * Polling mode - wait until the completion is available.
846 * Async mode - wait on wait queue until the completion is ready
854 if (admin_queue->polling) in ena_com_wait_and_process_admin_cq()
865 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_destroy_io_sq()
873 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) in ena_com_destroy_io_sq()
882 destroy_cmd.sq.sq_idx = io_sq->idx; in ena_com_destroy_io_sq()
891 if (unlikely(ret && (ret != -ENODEV))) in ena_com_destroy_io_sq()
892 netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret); in ena_com_destroy_io_sq()
903 if (io_cq->cdesc_addr.virt_addr) { in ena_com_io_queue_free()
904 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; in ena_com_io_queue_free()
906 dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr, in ena_com_io_queue_free()
907 io_cq->cdesc_addr.phys_addr); in ena_com_io_queue_free()
909 io_cq->cdesc_addr.virt_addr = NULL; in ena_com_io_queue_free()
912 if (io_sq->desc_addr.virt_addr) { in ena_com_io_queue_free()
913 size = io_sq->desc_entry_size * io_sq->q_depth; in ena_com_io_queue_free()
915 dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr, in ena_com_io_queue_free()
916 io_sq->desc_addr.phys_addr); in ena_com_io_queue_free()
918 io_sq->desc_addr.virt_addr = NULL; in ena_com_io_queue_free()
921 if (io_sq->bounce_buf_ctrl.base_buffer) { in ena_com_io_queue_free()
922 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); in ena_com_io_queue_free()
923 io_sq->bounce_buf_ctrl.base_buffer = NULL; in ena_com_io_queue_free()
940 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); in wait_for_reset_state()
941 return -ETIME; in wait_for_reset_state()
949 return -ETIME; in wait_for_reset_state()
951 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); in wait_for_reset_state()
962 !(ena_dev->supported_features & feature_mask)) in ena_com_check_supported_feature_id()
980 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id); in ena_com_get_feature_ex()
981 return -EOPNOTSUPP; in ena_com_get_feature_ex()
985 admin_queue = &ena_dev->admin_queue; in ena_com_get_feature_ex()
999 netdev_err(ena_dev->net_device, "Memory address set failed\n"); in ena_com_get_feature_ex()
1016 netdev_err(ena_dev->net_device, in ena_com_get_feature_ex()
1037 return ena_dev->rss.hash_func; in ena_com_get_current_hash_function()
1043 (ena_dev->rss).hash_key; in ena_com_hash_key_fill_default_key()
1045 netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key)); in ena_com_hash_key_fill_default_key()
1049 hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS; in ena_com_hash_key_fill_default_key()
1054 struct ena_rss *rss = &ena_dev->rss; in ena_com_hash_key_allocate()
1057 return -EOPNOTSUPP; in ena_com_hash_key_allocate()
1059 rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), in ena_com_hash_key_allocate()
1060 &rss->hash_key_dma_addr, GFP_KERNEL); in ena_com_hash_key_allocate()
1062 if (unlikely(!rss->hash_key)) in ena_com_hash_key_allocate()
1063 return -ENOMEM; in ena_com_hash_key_allocate()
1070 struct ena_rss *rss = &ena_dev->rss; in ena_com_hash_key_destroy()
1072 if (rss->hash_key) in ena_com_hash_key_destroy()
1073 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key, in ena_com_hash_key_destroy()
1074 rss->hash_key_dma_addr); in ena_com_hash_key_destroy()
1075 rss->hash_key = NULL; in ena_com_hash_key_destroy()
1080 struct ena_rss *rss = &ena_dev->rss; in ena_com_hash_ctrl_init()
1082 rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), in ena_com_hash_ctrl_init()
1083 &rss->hash_ctrl_dma_addr, GFP_KERNEL); in ena_com_hash_ctrl_init()
1085 if (unlikely(!rss->hash_ctrl)) in ena_com_hash_ctrl_init()
1086 return -ENOMEM; in ena_com_hash_ctrl_init()
1093 struct ena_rss *rss = &ena_dev->rss; in ena_com_hash_ctrl_destroy()
1095 if (rss->hash_ctrl) in ena_com_hash_ctrl_destroy()
1096 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl, in ena_com_hash_ctrl_destroy()
1097 rss->hash_ctrl_dma_addr); in ena_com_hash_ctrl_destroy()
1098 rss->hash_ctrl = NULL; in ena_com_hash_ctrl_destroy()
1104 struct ena_rss *rss = &ena_dev->rss; in ena_com_indirect_table_allocate()
1116 netdev_err(ena_dev->net_device, in ena_com_indirect_table_allocate()
1120 return -EINVAL; in ena_com_indirect_table_allocate()
1126 rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr, in ena_com_indirect_table_allocate()
1128 if (unlikely(!rss->rss_ind_tbl)) in ena_com_indirect_table_allocate()
1132 rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); in ena_com_indirect_table_allocate()
1133 if (unlikely(!rss->host_rss_ind_tbl)) in ena_com_indirect_table_allocate()
1136 rss->tbl_log_size = log_size; in ena_com_indirect_table_allocate()
1144 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr); in ena_com_indirect_table_allocate()
1145 rss->rss_ind_tbl = NULL; in ena_com_indirect_table_allocate()
1147 rss->tbl_log_size = 0; in ena_com_indirect_table_allocate()
1148 return -ENOMEM; in ena_com_indirect_table_allocate()
1153 struct ena_rss *rss = &ena_dev->rss; in ena_com_indirect_table_destroy()
1154 size_t tbl_size = (1ULL << rss->tbl_log_size) * in ena_com_indirect_table_destroy()
1157 if (rss->rss_ind_tbl) in ena_com_indirect_table_destroy()
1158 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, in ena_com_indirect_table_destroy()
1159 rss->rss_ind_tbl_dma_addr); in ena_com_indirect_table_destroy()
1160 rss->rss_ind_tbl = NULL; in ena_com_indirect_table_destroy()
1162 if (rss->host_rss_ind_tbl) in ena_com_indirect_table_destroy()
1163 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl); in ena_com_indirect_table_destroy()
1164 rss->host_rss_ind_tbl = NULL; in ena_com_indirect_table_destroy()
1170 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_create_io_sq()
1180 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) in ena_com_create_io_sq()
1189 create_cmd.sq_caps_2 |= io_sq->mem_queue_type & in ena_com_create_io_sq()
1200 create_cmd.sq_depth = io_sq->q_depth; in ena_com_create_io_sq()
1202 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { in ena_com_create_io_sq()
1205 io_sq->desc_addr.phys_addr); in ena_com_create_io_sq()
1207 netdev_err(ena_dev->net_device, "Memory address set failed\n"); in ena_com_create_io_sq()
1218 netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret); in ena_com_create_io_sq()
1222 io_sq->idx = cmd_completion.sq_idx; in ena_com_create_io_sq()
1224 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + in ena_com_create_io_sq()
1227 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_com_create_io_sq()
1228 io_sq->desc_addr.pbuf_dev_addr = in ena_com_create_io_sq()
1229 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + in ena_com_create_io_sq()
1233 netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); in ena_com_create_io_sq()
1240 struct ena_rss *rss = &ena_dev->rss; in ena_com_ind_tbl_convert_to_device()
1245 for (i = 0; i < 1 << rss->tbl_log_size; i++) { in ena_com_ind_tbl_convert_to_device()
1246 qid = rss->host_rss_ind_tbl[i]; in ena_com_ind_tbl_convert_to_device()
1248 return -EINVAL; in ena_com_ind_tbl_convert_to_device()
1250 io_sq = &ena_dev->io_sq_queues[qid]; in ena_com_ind_tbl_convert_to_device()
1252 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) in ena_com_ind_tbl_convert_to_device()
1253 return -EINVAL; in ena_com_ind_tbl_convert_to_device()
1255 rss->rss_ind_tbl[i].cq_idx = io_sq->idx; in ena_com_ind_tbl_convert_to_device()
1264 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; in ena_com_update_intr_delay_resolution()
1267 netdev_err(ena_dev->net_device, in ena_com_update_intr_delay_resolution()
1273 ena_dev->intr_moder_rx_interval = in ena_com_update_intr_delay_resolution()
1274 ena_dev->intr_moder_rx_interval * in ena_com_update_intr_delay_resolution()
1279 ena_dev->intr_moder_tx_interval = in ena_com_update_intr_delay_resolution()
1280 ena_dev->intr_moder_tx_interval * in ena_com_update_intr_delay_resolution()
1284 ena_dev->intr_delay_resolution = intr_delay_resolution; in ena_com_update_intr_delay_resolution()
1304 if (ret == -ENODEV) in ena_com_execute_admin_command()
1305 netdev_dbg(admin_queue->ena_dev->net_device, in ena_com_execute_admin_command()
1308 netdev_err(admin_queue->ena_dev->net_device, in ena_com_execute_admin_command()
1316 if (admin_queue->running_state) in ena_com_execute_admin_command()
1317 netdev_err(admin_queue->ena_dev->net_device, in ena_com_execute_admin_command()
1320 netdev_dbg(admin_queue->ena_dev->net_device, in ena_com_execute_admin_command()
1329 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_create_io_cq()
1338 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & in ena_com_create_io_cq()
1343 create_cmd.msix_vector = io_cq->msix_vector; in ena_com_create_io_cq()
1344 create_cmd.cq_depth = io_cq->q_depth; in ena_com_create_io_cq()
1348 io_cq->cdesc_addr.phys_addr); in ena_com_create_io_cq()
1350 netdev_err(ena_dev->net_device, "Memory address set failed\n"); in ena_com_create_io_cq()
1360 netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret); in ena_com_create_io_cq()
1364 io_cq->idx = cmd_completion.cq_idx; in ena_com_create_io_cq()
1366 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + in ena_com_create_io_cq()
1370 io_cq->numa_node_cfg_reg = in ena_com_create_io_cq()
1371 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + in ena_com_create_io_cq()
1374 netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); in ena_com_create_io_cq()
1384 netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid, in ena_com_get_io_handlers()
1386 return -EINVAL; in ena_com_get_io_handlers()
1389 *io_sq = &ena_dev->io_sq_queues[qid]; in ena_com_get_io_handlers()
1390 *io_cq = &ena_dev->io_cq_queues[qid]; in ena_com_get_io_handlers()
1397 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_abort_admin_commands()
1401 if (!admin_queue->comp_ctx) in ena_com_abort_admin_commands()
1404 for (i = 0; i < admin_queue->q_depth; i++) { in ena_com_abort_admin_commands()
1409 comp_ctx->status = ENA_CMD_ABORTED; in ena_com_abort_admin_commands()
1411 complete(&comp_ctx->wait_event); in ena_com_abort_admin_commands()
1417 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_wait_for_abort_completion()
1421 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_wait_for_abort_completion()
1422 while (atomic_read(&admin_queue->outstanding_cmds) != 0) { in ena_com_wait_for_abort_completion()
1423 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_wait_for_abort_completion()
1424 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); in ena_com_wait_for_abort_completion()
1425 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_wait_for_abort_completion()
1427 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_wait_for_abort_completion()
1433 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_destroy_io_cq()
1440 destroy_cmd.cq_idx = io_cq->idx; in ena_com_destroy_io_cq()
1449 if (unlikely(ret && (ret != -ENODEV))) in ena_com_destroy_io_cq()
1450 netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret); in ena_com_destroy_io_cq()
1457 return ena_dev->admin_queue.running_state; in ena_com_get_admin_running_state()
1462 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_set_admin_running_state()
1465 spin_lock_irqsave(&admin_queue->q_lock, flags); in ena_com_set_admin_running_state()
1466 ena_dev->admin_queue.running_state = state; in ena_com_set_admin_running_state()
1467 spin_unlock_irqrestore(&admin_queue->q_lock, flags); in ena_com_set_admin_running_state()
1472 u16 depth = ena_dev->aenq.q_depth; in ena_com_admin_aenq_enable()
1474 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); in ena_com_admin_aenq_enable()
1479 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); in ena_com_admin_aenq_enable()
1492 dev_info(ena_dev->dmadev, "Can't get aenq configuration\n"); in ena_com_set_aenq_config()
1497 netdev_warn(ena_dev->net_device, in ena_com_set_aenq_config()
1500 return -EOPNOTSUPP; in ena_com_set_aenq_config()
1504 admin_queue = &ena_dev->admin_queue; in ena_com_set_aenq_config()
1518 netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret); in ena_com_set_aenq_config()
1529 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); in ena_com_get_dma_width()
1530 return -ETIME; in ena_com_get_dma_width()
1536 netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width); in ena_com_get_dma_width()
1539 netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width); in ena_com_get_dma_width()
1540 return -EINVAL; in ena_com_get_dma_width()
1543 ena_dev->dma_addr_bits = width; in ena_com_get_dma_width()
1562 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); in ena_com_validate_version()
1563 return -ETIME; in ena_com_validate_version()
1566 dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n", in ena_com_validate_version()
1570 dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n", in ena_com_validate_version()
1586 netdev_err(ena_dev->net_device, in ena_com_validate_version()
1587 "ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); in ena_com_validate_version()
1588 return -1; in ena_com_validate_version()
1599 if (!admin_queue->comp_ctx) in ena_com_free_ena_admin_queue_comp_ctx()
1602 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx); in ena_com_free_ena_admin_queue_comp_ctx()
1604 admin_queue->comp_ctx = NULL; in ena_com_free_ena_admin_queue_comp_ctx()
1609 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_admin_destroy()
1610 struct ena_com_admin_cq *cq = &admin_queue->cq; in ena_com_admin_destroy()
1611 struct ena_com_admin_sq *sq = &admin_queue->sq; in ena_com_admin_destroy()
1612 struct ena_com_aenq *aenq = &ena_dev->aenq; in ena_com_admin_destroy()
1617 size = ADMIN_SQ_SIZE(admin_queue->q_depth); in ena_com_admin_destroy()
1618 if (sq->entries) in ena_com_admin_destroy()
1619 dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr); in ena_com_admin_destroy()
1620 sq->entries = NULL; in ena_com_admin_destroy()
1622 size = ADMIN_CQ_SIZE(admin_queue->q_depth); in ena_com_admin_destroy()
1623 if (cq->entries) in ena_com_admin_destroy()
1624 dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr); in ena_com_admin_destroy()
1625 cq->entries = NULL; in ena_com_admin_destroy()
1627 size = ADMIN_AENQ_SIZE(aenq->q_depth); in ena_com_admin_destroy()
1628 if (ena_dev->aenq.entries) in ena_com_admin_destroy()
1629 dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr); in ena_com_admin_destroy()
1630 aenq->entries = NULL; in ena_com_admin_destroy()
1640 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); in ena_com_set_admin_polling_mode()
1641 ena_dev->admin_queue.polling = polling; in ena_com_set_admin_polling_mode()
1646 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; in ena_com_mmio_reg_read_request_init()
1648 spin_lock_init(&mmio_read->lock); in ena_com_mmio_reg_read_request_init()
1649 mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), in ena_com_mmio_reg_read_request_init()
1650 &mmio_read->read_resp_dma_addr, GFP_KERNEL); in ena_com_mmio_reg_read_request_init()
1651 if (unlikely(!mmio_read->read_resp)) in ena_com_mmio_reg_read_request_init()
1656 mmio_read->read_resp->req_id = 0x0; in ena_com_mmio_reg_read_request_init()
1657 mmio_read->seq_num = 0x0; in ena_com_mmio_reg_read_request_init()
1658 mmio_read->readless_supported = true; in ena_com_mmio_reg_read_request_init()
1664 return -ENOMEM; in ena_com_mmio_reg_read_request_init()
1669 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; in ena_com_set_mmio_read_mode()
1671 mmio_read->readless_supported = readless_supported; in ena_com_set_mmio_read_mode()
1676 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; in ena_com_mmio_reg_read_request_destroy()
1678 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); in ena_com_mmio_reg_read_request_destroy()
1679 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); in ena_com_mmio_reg_read_request_destroy()
1681 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp, in ena_com_mmio_reg_read_request_destroy()
1682 mmio_read->read_resp_dma_addr); in ena_com_mmio_reg_read_request_destroy()
1684 mmio_read->read_resp = NULL; in ena_com_mmio_reg_read_request_destroy()
1689 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; in ena_com_mmio_reg_read_request_write_dev_addr()
1692 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); in ena_com_mmio_reg_read_request_write_dev_addr()
1693 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); in ena_com_mmio_reg_read_request_write_dev_addr()
1695 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); in ena_com_mmio_reg_read_request_write_dev_addr()
1696 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); in ena_com_mmio_reg_read_request_write_dev_addr()
1702 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_admin_init()
1709 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); in ena_com_admin_init()
1710 return -ETIME; in ena_com_admin_init()
1714 netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n"); in ena_com_admin_init()
1715 return -ENODEV; in ena_com_admin_init()
1718 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; in ena_com_admin_init()
1720 admin_queue->q_dmadev = ena_dev->dmadev; in ena_com_admin_init()
1721 admin_queue->polling = false; in ena_com_admin_init()
1722 admin_queue->curr_cmd_id = 0; in ena_com_admin_init()
1724 atomic_set(&admin_queue->outstanding_cmds, 0); in ena_com_admin_init()
1726 spin_lock_init(&admin_queue->q_lock); in ena_com_admin_init()
1740 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + in ena_com_admin_init()
1743 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); in ena_com_admin_init()
1744 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); in ena_com_admin_init()
1746 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); in ena_com_admin_init()
1747 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); in ena_com_admin_init()
1749 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); in ena_com_admin_init()
1750 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); in ena_com_admin_init()
1752 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); in ena_com_admin_init()
1753 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); in ena_com_admin_init()
1756 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; in ena_com_admin_init()
1762 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; in ena_com_admin_init()
1767 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); in ena_com_admin_init()
1768 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); in ena_com_admin_init()
1773 admin_queue->ena_dev = ena_dev; in ena_com_admin_init()
1774 admin_queue->running_state = true; in ena_com_admin_init()
1790 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { in ena_com_create_io_queue()
1791 netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n", in ena_com_create_io_queue()
1792 ctx->qid, ENA_TOTAL_NUM_QUEUES); in ena_com_create_io_queue()
1793 return -EINVAL; in ena_com_create_io_queue()
1796 io_sq = &ena_dev->io_sq_queues[ctx->qid]; in ena_com_create_io_queue()
1797 io_cq = &ena_dev->io_cq_queues[ctx->qid]; in ena_com_create_io_queue()
1803 io_cq->q_depth = ctx->queue_size; in ena_com_create_io_queue()
1804 io_cq->direction = ctx->direction; in ena_com_create_io_queue()
1805 io_cq->qid = ctx->qid; in ena_com_create_io_queue()
1807 io_cq->msix_vector = ctx->msix_vector; in ena_com_create_io_queue()
1809 io_sq->q_depth = ctx->queue_size; in ena_com_create_io_queue()
1810 io_sq->direction = ctx->direction; in ena_com_create_io_queue()
1811 io_sq->qid = ctx->qid; in ena_com_create_io_queue()
1813 io_sq->mem_queue_type = ctx->mem_queue_type; in ena_com_create_io_queue()
1815 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) in ena_com_create_io_queue()
1817 io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256); in ena_com_create_io_queue()
1830 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); in ena_com_create_io_queue()
1849 netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n", in ena_com_destroy_io_queue()
1854 io_sq = &ena_dev->io_sq_queues[qid]; in ena_com_destroy_io_queue()
1855 io_cq = &ena_dev->io_cq_queues[qid]; in ena_com_destroy_io_queue()
1873 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; in ena_get_dev_stats()
1874 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; in ena_get_dev_stats()
1878 admin_queue = &ena_dev->admin_queue; in ena_get_dev_stats()
1880 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; in ena_get_dev_stats()
1881 get_cmd->aq_common_descriptor.flags = 0; in ena_get_dev_stats()
1882 get_cmd->type = type; in ena_get_dev_stats()
1891 netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret); in ena_get_dev_stats()
1902 customer_metrics = &ena_dev->customer_metrics; in ena_com_set_supported_customer_metrics()
1904 customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK; in ena_com_set_supported_customer_metrics()
1912 customer_metrics->supported_metrics = in ena_com_set_supported_customer_metrics()
1915 netdev_err(ena_dev->net_device, in ena_com_set_supported_customer_metrics()
1930 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, in ena_com_get_dev_attr_feat()
1933 ena_dev->supported_features = get_resp.u.dev_attr.supported_features; in ena_com_get_dev_attr_feat()
1934 ena_dev->capabilities = get_resp.u.dev_attr.capabilities; in ena_com_get_dev_attr_feat()
1936 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { in ena_com_get_dev_attr_feat()
1944 return -EINVAL; in ena_com_get_dev_attr_feat()
1946 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, in ena_com_get_dev_attr_feat()
1948 ena_dev->tx_max_header_size = in ena_com_get_dev_attr_feat()
1953 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, in ena_com_get_dev_attr_feat()
1955 ena_dev->tx_max_header_size = in ena_com_get_dev_attr_feat()
1967 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, in ena_com_get_dev_attr_feat()
1975 memcpy(&get_feat_ctx->offload, &get_resp.u.offload, in ena_com_get_dev_attr_feat()
1984 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints)); in ena_com_get_dev_attr_feat()
1985 else if (rc == -EOPNOTSUPP) in ena_com_get_dev_attr_feat()
1986 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints)); in ena_com_get_dev_attr_feat()
1992 memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq)); in ena_com_get_dev_attr_feat()
1993 else if (rc == -EOPNOTSUPP) in ena_com_get_dev_attr_feat()
1994 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); in ena_com_get_dev_attr_feat()
2005 ena_com_handle_admin_completion(&ena_dev->admin_queue); in ena_com_admin_q_comp_intr_handler()
2014 struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers; in ena_com_get_specific_aenq_cb()
2016 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) in ena_com_get_specific_aenq_cb()
2017 return aenq_handlers->handlers[group]; in ena_com_get_specific_aenq_cb()
2019 return aenq_handlers->unimplemented_handler; in ena_com_get_specific_aenq_cb()
2030 struct ena_com_aenq *aenq = &ena_dev->aenq; in ena_com_aenq_intr_handler()
2036 masked_head = aenq->head & (aenq->q_depth - 1); in ena_com_aenq_intr_handler()
2037 phase = aenq->phase; in ena_com_aenq_intr_handler()
2038 aenq_e = &aenq->entries[masked_head]; /* Get first entry */ in ena_com_aenq_intr_handler()
2039 aenq_common = &aenq_e->aenq_common_desc; in ena_com_aenq_intr_handler()
2042 while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { in ena_com_aenq_intr_handler()
2048 timestamp = (u64)aenq_common->timestamp_low | in ena_com_aenq_intr_handler()
2049 ((u64)aenq_common->timestamp_high << 32); in ena_com_aenq_intr_handler()
2051 netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n", in ena_com_aenq_intr_handler()
2052 aenq_common->group, aenq_common->syndrome, timestamp); in ena_com_aenq_intr_handler()
2056 aenq_common->group); in ena_com_aenq_intr_handler()
2063 if (unlikely(masked_head == aenq->q_depth)) { in ena_com_aenq_intr_handler()
2067 aenq_e = &aenq->entries[masked_head]; in ena_com_aenq_intr_handler()
2068 aenq_common = &aenq_e->aenq_common_desc; in ena_com_aenq_intr_handler()
2071 aenq->head += processed; in ena_com_aenq_intr_handler()
2072 aenq->phase = phase; in ena_com_aenq_intr_handler()
2080 writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); in ena_com_aenq_intr_handler()
2093 netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n"); in ena_com_dev_reset()
2094 return -ETIME; in ena_com_dev_reset()
2098 netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n"); in ena_com_dev_reset()
2099 return -EINVAL; in ena_com_dev_reset()
2105 netdev_err(ena_dev->net_device, "Invalid timeout value\n"); in ena_com_dev_reset()
2106 return -EINVAL; in ena_com_dev_reset()
2113 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); in ena_com_dev_reset()
2121 netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n"); in ena_com_dev_reset()
2126 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); in ena_com_dev_reset()
2129 netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n"); in ena_com_dev_reset()
2137 ena_dev->admin_queue.completion_timeout = timeout * 100000; in ena_com_dev_reset()
2139 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; in ena_com_dev_reset()
2151 netdev_err(ena_dev->net_device, "Capability %d isn't supported\n", in ena_com_get_eni_stats()
2153 return -EOPNOTSUPP; in ena_com_get_eni_stats()
2172 netdev_err(ena_dev->net_device, "Capability %d isn't supported\n", in ena_com_get_ena_srd_info()
2174 return -EOPNOTSUPP; in ena_com_get_ena_srd_info()
2192 if (unlikely(len > ena_dev->customer_metrics.buffer_len)) { in ena_com_get_customer_metrics()
2193 netdev_err(ena_dev->net_device, in ena_com_get_customer_metrics()
2195 return -EINVAL; in ena_com_get_customer_metrics()
2199 netdev_err(ena_dev->net_device, "Capability %d not supported.\n", in ena_com_get_customer_metrics()
2201 return -EOPNOTSUPP; in ena_com_get_customer_metrics()
2204 if (!ena_dev->customer_metrics.supported_metrics) { in ena_com_get_customer_metrics()
2205 netdev_err(ena_dev->net_device, "No supported customer metrics.\n"); in ena_com_get_customer_metrics()
2206 return -EOPNOTSUPP; in ena_com_get_customer_metrics()
2212 &get_cmd->u.control_buffer.address, in ena_com_get_customer_metrics()
2213 ena_dev->customer_metrics.buffer_dma_addr); in ena_com_get_customer_metrics()
2215 netdev_err(ena_dev->net_device, "Memory address set failed.\n"); in ena_com_get_customer_metrics()
2219 get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len; in ena_com_get_customer_metrics()
2220 get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics; in ena_com_get_customer_metrics()
2223 memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len); in ena_com_get_customer_metrics()
2225 netdev_err(ena_dev->net_device, "Failed to get customer metrics. error: %d\n", ret); in ena_com_get_customer_metrics()
2238 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU); in ena_com_set_dev_mtu()
2239 return -EOPNOTSUPP; in ena_com_set_dev_mtu()
2243 admin_queue = &ena_dev->admin_queue; in ena_com_set_dev_mtu()
2257 netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret); in ena_com_set_dev_mtu()
2264 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_set_hash_function()
2265 struct ena_rss *rss = &ena_dev->rss; in ena_com_set_hash_function()
2272 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", in ena_com_set_hash_function()
2274 return -EOPNOTSUPP; in ena_com_set_hash_function()
2283 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { in ena_com_set_hash_function()
2284 netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n", in ena_com_set_hash_function()
2285 rss->hash_func); in ena_com_set_hash_function()
2286 return -EOPNOTSUPP; in ena_com_set_hash_function()
2295 cmd.u.flow_hash_func.init_val = rss->hash_init_val; in ena_com_set_hash_function()
2296 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; in ena_com_set_hash_function()
2300 rss->hash_key_dma_addr); in ena_com_set_hash_function()
2302 netdev_err(ena_dev->net_device, "Memory address set failed\n"); in ena_com_set_hash_function()
2306 cmd.control_buffer.length = sizeof(*rss->hash_key); in ena_com_set_hash_function()
2314 netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n", in ena_com_set_hash_function()
2315 rss->hash_func, ret); in ena_com_set_hash_function()
2316 return -EINVAL; in ena_com_set_hash_function()
2329 struct ena_rss *rss = &ena_dev->rss; in ena_com_fill_hash_function()
2332 hash_key = rss->hash_key; in ena_com_fill_hash_function()
2336 return -EINVAL; in ena_com_fill_hash_function()
2340 rss->hash_key_dma_addr, in ena_com_fill_hash_function()
2341 sizeof(*rss->hash_key), 0); in ena_com_fill_hash_function()
2346 netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func); in ena_com_fill_hash_function()
2347 return -EOPNOTSUPP; in ena_com_fill_hash_function()
2351 if (key_len != sizeof(hash_key->key)) { in ena_com_fill_hash_function()
2352 netdev_err(ena_dev->net_device, in ena_com_fill_hash_function()
2354 sizeof(hash_key->key)); in ena_com_fill_hash_function()
2355 return -EINVAL; in ena_com_fill_hash_function()
2357 memcpy(hash_key->key, key, key_len); in ena_com_fill_hash_function()
2358 hash_key->key_parts = key_len / sizeof(hash_key->key[0]); in ena_com_fill_hash_function()
2361 rss->hash_init_val = init_val; in ena_com_fill_hash_function()
2362 old_func = rss->hash_func; in ena_com_fill_hash_function()
2363 rss->hash_func = func; in ena_com_fill_hash_function()
2368 rss->hash_func = old_func; in ena_com_fill_hash_function()
2376 struct ena_rss *rss = &ena_dev->rss; in ena_com_get_hash_function()
2381 return -EINVAL; in ena_com_get_hash_function()
2385 rss->hash_key_dma_addr, in ena_com_get_hash_function()
2386 sizeof(*rss->hash_key), 0); in ena_com_get_hash_function()
2391 rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func); in ena_com_get_hash_function()
2392 if (rss->hash_func) in ena_com_get_hash_function()
2393 rss->hash_func--; in ena_com_get_hash_function()
2395 *func = rss->hash_func; in ena_com_get_hash_function()
2403 ena_dev->rss.hash_key; in ena_com_get_hash_key()
2406 memcpy(key, hash_key->key, in ena_com_get_hash_key()
2407 (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0])); in ena_com_get_hash_key()
2416 struct ena_rss *rss = &ena_dev->rss; in ena_com_get_hash_ctrl()
2422 rss->hash_ctrl_dma_addr, in ena_com_get_hash_ctrl()
2423 sizeof(*rss->hash_ctrl), 0); in ena_com_get_hash_ctrl()
2428 *fields = rss->hash_ctrl->selected_fields[proto].fields; in ena_com_get_hash_ctrl()
2435 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_set_hash_ctrl()
2436 struct ena_rss *rss = &ena_dev->rss; in ena_com_set_hash_ctrl()
2437 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; in ena_com_set_hash_ctrl()
2443 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", in ena_com_set_hash_ctrl()
2445 return -EOPNOTSUPP; in ena_com_set_hash_ctrl()
2460 rss->hash_ctrl_dma_addr); in ena_com_set_hash_ctrl()
2462 netdev_err(ena_dev->net_device, "Memory address set failed\n"); in ena_com_set_hash_ctrl()
2473 netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret); in ena_com_set_hash_ctrl()
2480 struct ena_rss *rss = &ena_dev->rss; in ena_com_set_default_hash_ctrl()
2482 rss->hash_ctrl; in ena_com_set_default_hash_ctrl()
2491 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = in ena_com_set_default_hash_ctrl()
2495 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = in ena_com_set_default_hash_ctrl()
2499 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = in ena_com_set_default_hash_ctrl()
2503 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = in ena_com_set_default_hash_ctrl()
2507 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = in ena_com_set_default_hash_ctrl()
2510 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = in ena_com_set_default_hash_ctrl()
2513 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = in ena_com_set_default_hash_ctrl()
2516 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = in ena_com_set_default_hash_ctrl()
2520 available_fields = hash_ctrl->selected_fields[i].fields & in ena_com_set_default_hash_ctrl()
2521 hash_ctrl->supported_fields[i].fields; in ena_com_set_default_hash_ctrl()
2522 if (available_fields != hash_ctrl->selected_fields[i].fields) { in ena_com_set_default_hash_ctrl()
2523 netdev_err(ena_dev->net_device, in ena_com_set_default_hash_ctrl()
2525 i, hash_ctrl->supported_fields[i].fields, in ena_com_set_default_hash_ctrl()
2526 hash_ctrl->selected_fields[i].fields); in ena_com_set_default_hash_ctrl()
2527 return -EOPNOTSUPP; in ena_com_set_default_hash_ctrl()
2544 struct ena_rss *rss = &ena_dev->rss; in ena_com_fill_hash_ctrl()
2545 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; in ena_com_fill_hash_ctrl()
2550 netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto); in ena_com_fill_hash_ctrl()
2551 return -EINVAL; in ena_com_fill_hash_ctrl()
2560 supported_fields = hash_ctrl->supported_fields[proto].fields; in ena_com_fill_hash_ctrl()
2562 netdev_err(ena_dev->net_device, in ena_com_fill_hash_ctrl()
2563 "Proto %d doesn't support the required fields %x. supports only: %x\n", in ena_com_fill_hash_ctrl()
2567 hash_ctrl->selected_fields[proto].fields = hash_fields; in ena_com_fill_hash_ctrl()
2581 struct ena_rss *rss = &ena_dev->rss; in ena_com_indirect_table_fill_entry()
2583 if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) in ena_com_indirect_table_fill_entry()
2584 return -EINVAL; in ena_com_indirect_table_fill_entry()
2587 return -EINVAL; in ena_com_indirect_table_fill_entry()
2589 rss->host_rss_ind_tbl[entry_idx] = entry_value; in ena_com_indirect_table_fill_entry()
2596 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; in ena_com_indirect_table_set()
2597 struct ena_rss *rss = &ena_dev->rss; in ena_com_indirect_table_set()
2603 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", in ena_com_indirect_table_set()
2605 return -EOPNOTSUPP; in ena_com_indirect_table_set()
2610 netdev_err(ena_dev->net_device, in ena_com_indirect_table_set()
2621 cmd.u.ind_table.size = rss->tbl_log_size; in ena_com_indirect_table_set()
2626 rss->rss_ind_tbl_dma_addr); in ena_com_indirect_table_set()
2628 netdev_err(ena_dev->net_device, "Memory address set failed\n"); in ena_com_indirect_table_set()
2632 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * in ena_com_indirect_table_set()
2642 netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret); in ena_com_indirect_table_set()
2649 struct ena_rss *rss = &ena_dev->rss; in ena_com_indirect_table_get()
2654 tbl_size = (1ULL << rss->tbl_log_size) * in ena_com_indirect_table_get()
2659 rss->rss_ind_tbl_dma_addr, in ena_com_indirect_table_get()
2667 for (i = 0; i < (1 << rss->tbl_log_size); i++) in ena_com_indirect_table_get()
2668 ind_tbl[i] = rss->host_rss_ind_tbl[i]; in ena_com_indirect_table_get()
2677 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); in ena_com_rss_init()
2690 else if (rc != -EOPNOTSUPP) in ena_com_rss_init()
2714 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); in ena_com_rss_destroy()
2719 struct ena_host_attribute *host_attr = &ena_dev->host_attr; in ena_com_allocate_host_info()
2721 host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K, in ena_com_allocate_host_info()
2722 &host_attr->host_info_dma_addr, GFP_KERNEL); in ena_com_allocate_host_info()
2723 if (unlikely(!host_attr->host_info)) in ena_com_allocate_host_info()
2724 return -ENOMEM; in ena_com_allocate_host_info()
2726 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << in ena_com_allocate_host_info()
2736 struct ena_host_attribute *host_attr = &ena_dev->host_attr; in ena_com_allocate_debug_area()
2738 host_attr->debug_area_virt_addr = in ena_com_allocate_debug_area()
2739 dma_alloc_coherent(ena_dev->dmadev, debug_area_size, in ena_com_allocate_debug_area()
2740 &host_attr->debug_area_dma_addr, GFP_KERNEL); in ena_com_allocate_debug_area()
2741 if (unlikely(!host_attr->debug_area_virt_addr)) { in ena_com_allocate_debug_area()
2742 host_attr->debug_area_size = 0; in ena_com_allocate_debug_area()
2743 return -ENOMEM; in ena_com_allocate_debug_area()
2746 host_attr->debug_area_size = debug_area_size; in ena_com_allocate_debug_area()
2753 struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics; in ena_com_allocate_customer_metrics_buffer()
2755 customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE; in ena_com_allocate_customer_metrics_buffer()
2756 customer_metrics->buffer_virt_addr = NULL; in ena_com_allocate_customer_metrics_buffer()
2758 customer_metrics->buffer_virt_addr = in ena_com_allocate_customer_metrics_buffer()
2759 dma_alloc_coherent(ena_dev->dmadev, customer_metrics->buffer_len, in ena_com_allocate_customer_metrics_buffer()
2760 &customer_metrics->buffer_dma_addr, GFP_KERNEL); in ena_com_allocate_customer_metrics_buffer()
2761 if (!customer_metrics->buffer_virt_addr) { in ena_com_allocate_customer_metrics_buffer()
2762 customer_metrics->buffer_len = 0; in ena_com_allocate_customer_metrics_buffer()
2763 return -ENOMEM; in ena_com_allocate_customer_metrics_buffer()
2771 struct ena_host_attribute *host_attr = &ena_dev->host_attr; in ena_com_delete_host_info()
2773 if (host_attr->host_info) { in ena_com_delete_host_info()
2774 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info, in ena_com_delete_host_info()
2775 host_attr->host_info_dma_addr); in ena_com_delete_host_info()
2776 host_attr->host_info = NULL; in ena_com_delete_host_info()
2782 struct ena_host_attribute *host_attr = &ena_dev->host_attr; in ena_com_delete_debug_area()
2784 if (host_attr->debug_area_virt_addr) { in ena_com_delete_debug_area()
2785 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size, in ena_com_delete_debug_area()
2786 host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr); in ena_com_delete_debug_area()
2787 host_attr->debug_area_virt_addr = NULL; in ena_com_delete_debug_area()
2793 struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics; in ena_com_delete_customer_metrics_buffer()
2795 if (customer_metrics->buffer_virt_addr) { in ena_com_delete_customer_metrics_buffer()
2796 dma_free_coherent(ena_dev->dmadev, customer_metrics->buffer_len, in ena_com_delete_customer_metrics_buffer()
2797 customer_metrics->buffer_virt_addr, in ena_com_delete_customer_metrics_buffer()
2798 customer_metrics->buffer_dma_addr); in ena_com_delete_customer_metrics_buffer()
2799 customer_metrics->buffer_virt_addr = NULL; in ena_com_delete_customer_metrics_buffer()
2800 customer_metrics->buffer_len = 0; in ena_com_delete_customer_metrics_buffer()
2806 struct ena_host_attribute *host_attr = &ena_dev->host_attr; in ena_com_set_host_attributes()
2818 admin_queue = &ena_dev->admin_queue; in ena_com_set_host_attributes()
2825 host_attr->debug_area_dma_addr); in ena_com_set_host_attributes()
2827 netdev_err(ena_dev->net_device, "Memory address set failed\n"); in ena_com_set_host_attributes()
2833 host_attr->host_info_dma_addr); in ena_com_set_host_attributes()
2835 netdev_err(ena_dev->net_device, "Memory address set failed\n"); in ena_com_set_host_attributes()
2839 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; in ena_com_set_host_attributes()
2848 netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret); in ena_com_set_host_attributes()
2866 netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n"); in ena_com_update_nonadaptive_moderation_interval()
2867 return -EFAULT; in ena_com_update_nonadaptive_moderation_interval()
2880 ena_dev->intr_delay_resolution, in ena_com_update_nonadaptive_moderation_interval_tx()
2881 &ena_dev->intr_moder_tx_interval); in ena_com_update_nonadaptive_moderation_interval_tx()
2889 ena_dev->intr_delay_resolution, in ena_com_update_nonadaptive_moderation_interval_rx()
2890 &ena_dev->intr_moder_rx_interval); in ena_com_update_nonadaptive_moderation_interval_rx()
2903 if (rc == -EOPNOTSUPP) { in ena_com_init_interrupt_moderation()
2904 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", in ena_com_init_interrupt_moderation()
2908 netdev_err(ena_dev->net_device, in ena_com_init_interrupt_moderation()
2921 /* Disable adaptive moderation by default - can be enabled later */ in ena_com_init_interrupt_moderation()
2929 return ena_dev->intr_moder_tx_interval; in ena_com_get_nonadaptive_moderation_interval_tx()
2934 return ena_dev->intr_moder_rx_interval; in ena_com_get_nonadaptive_moderation_interval_rx()
2941 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; in ena_com_config_dev_mode()
2944 if (!llq_features->max_llq_num) { in ena_com_config_dev_mode()
2945 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; in ena_com_config_dev_mode()
2953 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size - in ena_com_config_dev_mode()
2954 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); in ena_com_config_dev_mode()
2956 if (unlikely(ena_dev->tx_max_header_size == 0)) { in ena_com_config_dev_mode()
2957 netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n"); in ena_com_config_dev_mode()
2958 return -EINVAL; in ena_com_config_dev_mode()
2961 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; in ena_com_config_dev_mode()