xref: /freebsd/sys/contrib/ena-com/ena_com.c (revision 6be3386466ab79a84b48429ae66244f21526d3df)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of copyright holder nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "ena_com.h"
35 
36 /*****************************************************************************/
37 /*****************************************************************************/
38 
39 /* Timeout in micro-sec */
40 #define ADMIN_CMD_TIMEOUT_US (3000000)
41 
42 #define ENA_ASYNC_QUEUE_DEPTH 16
43 #define ENA_ADMIN_QUEUE_DEPTH 32
44 
45 #ifdef ENA_EXTENDED_STATS
46 
47 #define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
48 #define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
49 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
50 
51 #endif /* ENA_EXTENDED_STATS */
52 
53 #define ENA_CTRL_MAJOR		0
54 #define ENA_CTRL_MINOR		0
55 #define ENA_CTRL_SUB_MINOR	1
56 
57 #define MIN_ENA_CTRL_VER \
58 	(((ENA_CTRL_MAJOR) << \
59 	(ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
60 	((ENA_CTRL_MINOR) << \
61 	(ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
62 	(ENA_CTRL_SUB_MINOR))
63 
64 #define ENA_DMA_ADDR_TO_UINT32_LOW(x)	((u32)((u64)(x)))
65 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x)	((u32)(((u64)(x)) >> 32))
66 
67 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
68 
69 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT	4
70 
71 #define ENA_REGS_ADMIN_INTR_MASK 1
72 
73 #define ENA_MIN_ADMIN_POLL_US 100
74 
75 #define ENA_MAX_ADMIN_POLL_US 5000
76 
77 /*****************************************************************************/
78 /*****************************************************************************/
79 /*****************************************************************************/
80 
81 enum ena_cmd_status {
82 	ENA_CMD_SUBMITTED,
83 	ENA_CMD_COMPLETED,
84 	/* Abort - canceled by the driver */
85 	ENA_CMD_ABORTED,
86 };
87 
88 struct ena_comp_ctx {
89 	ena_wait_event_t wait_event;
90 	struct ena_admin_acq_entry *user_cqe;
91 	u32 comp_size;
92 	enum ena_cmd_status status;
93 	/* status from the device */
94 	u8 comp_status;
95 	u8 cmd_opcode;
96 	bool occupied;
97 };
98 
99 struct ena_com_stats_ctx {
100 	struct ena_admin_aq_get_stats_cmd get_cmd;
101 	struct ena_admin_acq_get_stats_resp get_resp;
102 };
103 
104 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
105 				       struct ena_common_mem_addr *ena_addr,
106 				       dma_addr_t addr)
107 {
108 	if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
109 		ena_trc_err(ena_dev, "DMA address has more bits that the device supports\n");
110 		return ENA_COM_INVAL;
111 	}
112 
113 	ena_addr->mem_addr_low = lower_32_bits(addr);
114 	ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
115 
116 	return 0;
117 }
118 
119 static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
120 {
121 	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
122 	struct ena_com_admin_sq *sq = &admin_queue->sq;
123 	u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
124 
125 	ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr,
126 			       sq->mem_handle);
127 
128 	if (!sq->entries) {
129 		ena_trc_err(ena_dev, "Memory allocation failed\n");
130 		return ENA_COM_NO_MEM;
131 	}
132 
133 	sq->head = 0;
134 	sq->tail = 0;
135 	sq->phase = 1;
136 
137 	sq->db_addr = NULL;
138 
139 	return 0;
140 }
141 
142 static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
143 {
144 	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
145 	struct ena_com_admin_cq *cq = &admin_queue->cq;
146 	u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
147 
148 	ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, cq->entries, cq->dma_addr,
149 			       cq->mem_handle);
150 
151 	if (!cq->entries)  {
152 		ena_trc_err(ena_dev, "Memory allocation failed\n");
153 		return ENA_COM_NO_MEM;
154 	}
155 
156 	cq->head = 0;
157 	cq->phase = 1;
158 
159 	return 0;
160 }
161 
162 static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
163 				   struct ena_aenq_handlers *aenq_handlers)
164 {
165 	struct ena_com_aenq *aenq = &ena_dev->aenq;
166 	u32 addr_low, addr_high, aenq_caps;
167 	u16 size;
168 
169 	ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
170 	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
171 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size,
172 			aenq->entries,
173 			aenq->dma_addr,
174 			aenq->mem_handle);
175 
176 	if (!aenq->entries) {
177 		ena_trc_err(ena_dev, "Memory allocation failed\n");
178 		return ENA_COM_NO_MEM;
179 	}
180 
181 	aenq->head = aenq->q_depth;
182 	aenq->phase = 1;
183 
184 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
185 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
186 
187 	ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
188 	ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
189 
190 	aenq_caps = 0;
191 	aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
192 	aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
193 		ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
194 		ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
195 	ENA_REG_WRITE32(ena_dev->bus, aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
196 
197 	if (unlikely(!aenq_handlers)) {
198 		ena_trc_err(ena_dev, "AENQ handlers pointer is NULL\n");
199 		return ENA_COM_INVAL;
200 	}
201 
202 	aenq->aenq_handlers = aenq_handlers;
203 
204 	return 0;
205 }
206 
207 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
208 				     struct ena_comp_ctx *comp_ctx)
209 {
210 	comp_ctx->occupied = false;
211 	ATOMIC32_DEC(&queue->outstanding_cmds);
212 }
213 
214 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
215 					  u16 command_id, bool capture)
216 {
217 	if (unlikely(command_id >= admin_queue->q_depth)) {
218 		ena_trc_err(admin_queue->ena_dev,
219 			    "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
220 			    command_id, admin_queue->q_depth);
221 		return NULL;
222 	}
223 
224 	if (unlikely(!admin_queue->comp_ctx)) {
225 		ena_trc_err(admin_queue->ena_dev,
226 			    "Completion context is NULL\n");
227 		return NULL;
228 	}
229 
230 	if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
231 		ena_trc_err(admin_queue->ena_dev,
232 			    "Completion context is occupied\n");
233 		return NULL;
234 	}
235 
236 	if (capture) {
237 		ATOMIC32_INC(&admin_queue->outstanding_cmds);
238 		admin_queue->comp_ctx[command_id].occupied = true;
239 	}
240 
241 	return &admin_queue->comp_ctx[command_id];
242 }
243 
244 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
245 						       struct ena_admin_aq_entry *cmd,
246 						       size_t cmd_size_in_bytes,
247 						       struct ena_admin_acq_entry *comp,
248 						       size_t comp_size_in_bytes)
249 {
250 	struct ena_comp_ctx *comp_ctx;
251 	u16 tail_masked, cmd_id;
252 	u16 queue_size_mask;
253 	u16 cnt;
254 
255 	queue_size_mask = admin_queue->q_depth - 1;
256 
257 	tail_masked = admin_queue->sq.tail & queue_size_mask;
258 
259 	/* In case of queue FULL */
260 	cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
261 	if (cnt >= admin_queue->q_depth) {
262 		ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n");
263 		admin_queue->stats.out_of_space++;
264 		return ERR_PTR(ENA_COM_NO_SPACE);
265 	}
266 
267 	cmd_id = admin_queue->curr_cmd_id;
268 
269 	cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
270 		ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
271 
272 	cmd->aq_common_descriptor.command_id |= cmd_id &
273 		ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
274 
275 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
276 	if (unlikely(!comp_ctx))
277 		return ERR_PTR(ENA_COM_INVAL);
278 
279 	comp_ctx->status = ENA_CMD_SUBMITTED;
280 	comp_ctx->comp_size = (u32)comp_size_in_bytes;
281 	comp_ctx->user_cqe = comp;
282 	comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
283 
284 	ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
285 
286 	memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
287 
288 	admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
289 		queue_size_mask;
290 
291 	admin_queue->sq.tail++;
292 	admin_queue->stats.submitted_cmd++;
293 
294 	if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
295 		admin_queue->sq.phase = !admin_queue->sq.phase;
296 
297 	ENA_DB_SYNC(&admin_queue->sq.mem_handle);
298 	ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
299 			admin_queue->sq.db_addr);
300 
301 	return comp_ctx;
302 }
303 
304 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
305 {
306 	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
307 	size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
308 	struct ena_comp_ctx *comp_ctx;
309 	u16 i;
310 
311 	admin_queue->comp_ctx = ENA_MEM_ALLOC(admin_queue->q_dmadev, size);
312 	if (unlikely(!admin_queue->comp_ctx)) {
313 		ena_trc_err(ena_dev, "Memory allocation failed\n");
314 		return ENA_COM_NO_MEM;
315 	}
316 
317 	for (i = 0; i < admin_queue->q_depth; i++) {
318 		comp_ctx = get_comp_ctxt(admin_queue, i, false);
319 		if (comp_ctx)
320 			ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
321 	}
322 
323 	return 0;
324 }
325 
326 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
327 						     struct ena_admin_aq_entry *cmd,
328 						     size_t cmd_size_in_bytes,
329 						     struct ena_admin_acq_entry *comp,
330 						     size_t comp_size_in_bytes)
331 {
332 	unsigned long flags = 0;
333 	struct ena_comp_ctx *comp_ctx;
334 
335 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
336 	if (unlikely(!admin_queue->running_state)) {
337 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
338 		return ERR_PTR(ENA_COM_NO_DEVICE);
339 	}
340 	comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
341 					      cmd_size_in_bytes,
342 					      comp,
343 					      comp_size_in_bytes);
344 	if (IS_ERR(comp_ctx))
345 		admin_queue->running_state = false;
346 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
347 
348 	return comp_ctx;
349 }
350 
351 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
352 			      struct ena_com_create_io_ctx *ctx,
353 			      struct ena_com_io_sq *io_sq)
354 {
355 	size_t size;
356 	int dev_node = 0;
357 
358 	memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
359 
360 	io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
361 	io_sq->desc_entry_size =
362 		(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
363 		sizeof(struct ena_eth_io_tx_desc) :
364 		sizeof(struct ena_eth_io_rx_desc);
365 
366 	size = io_sq->desc_entry_size * io_sq->q_depth;
367 	io_sq->bus = ena_dev->bus;
368 
369 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
370 		ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
371 					    size,
372 					    io_sq->desc_addr.virt_addr,
373 					    io_sq->desc_addr.phys_addr,
374 					    io_sq->desc_addr.mem_handle,
375 					    ctx->numa_node,
376 					    dev_node);
377 		if (!io_sq->desc_addr.virt_addr) {
378 			ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
379 					       size,
380 					       io_sq->desc_addr.virt_addr,
381 					       io_sq->desc_addr.phys_addr,
382 					       io_sq->desc_addr.mem_handle);
383 		}
384 
385 		if (!io_sq->desc_addr.virt_addr) {
386 			ena_trc_err(ena_dev, "Memory allocation failed\n");
387 			return ENA_COM_NO_MEM;
388 		}
389 	}
390 
391 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
392 		/* Allocate bounce buffers */
393 		io_sq->bounce_buf_ctrl.buffer_size =
394 			ena_dev->llq_info.desc_list_entry_size;
395 		io_sq->bounce_buf_ctrl.buffers_num =
396 			ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
397 		io_sq->bounce_buf_ctrl.next_to_use = 0;
398 
399 		size = io_sq->bounce_buf_ctrl.buffer_size *
400 			io_sq->bounce_buf_ctrl.buffers_num;
401 
402 		ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
403 				   size,
404 				   io_sq->bounce_buf_ctrl.base_buffer,
405 				   ctx->numa_node,
406 				   dev_node);
407 		if (!io_sq->bounce_buf_ctrl.base_buffer)
408 			io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
409 
410 		if (!io_sq->bounce_buf_ctrl.base_buffer) {
411 			ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n");
412 			return ENA_COM_NO_MEM;
413 		}
414 
415 		memcpy(&io_sq->llq_info, &ena_dev->llq_info,
416 		       sizeof(io_sq->llq_info));
417 
418 		/* Initiate the first bounce buffer */
419 		io_sq->llq_buf_ctrl.curr_bounce_buf =
420 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
421 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
422 		       0x0, io_sq->llq_info.desc_list_entry_size);
423 		io_sq->llq_buf_ctrl.descs_left_in_line =
424 			io_sq->llq_info.descs_num_before_header;
425 		io_sq->disable_meta_caching =
426 			io_sq->llq_info.disable_meta_caching;
427 
428 		if (io_sq->llq_info.max_entries_in_tx_burst > 0)
429 			io_sq->entries_in_tx_burst_left =
430 				io_sq->llq_info.max_entries_in_tx_burst;
431 	}
432 
433 	io_sq->tail = 0;
434 	io_sq->next_to_comp = 0;
435 	io_sq->phase = 1;
436 
437 	return 0;
438 }
439 
440 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
441 			      struct ena_com_create_io_ctx *ctx,
442 			      struct ena_com_io_cq *io_cq)
443 {
444 	size_t size;
445 	int prev_node = 0;
446 
447 	memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
448 
449 	/* Use the basic completion descriptor for Rx */
450 	io_cq->cdesc_entry_size_in_bytes =
451 		(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
452 		sizeof(struct ena_eth_io_tx_cdesc) :
453 		sizeof(struct ena_eth_io_rx_cdesc_base);
454 
455 	size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
456 	io_cq->bus = ena_dev->bus;
457 
458 	ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
459 					    size,
460 					    io_cq->cdesc_addr.virt_addr,
461 					    io_cq->cdesc_addr.phys_addr,
462 					    io_cq->cdesc_addr.mem_handle,
463 					    ctx->numa_node,
464 					    prev_node,
465 					    ENA_CDESC_RING_SIZE_ALIGNMENT);
466 	if (!io_cq->cdesc_addr.virt_addr) {
467 		ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
468 					       size,
469 					       io_cq->cdesc_addr.virt_addr,
470 					       io_cq->cdesc_addr.phys_addr,
471 					       io_cq->cdesc_addr.mem_handle,
472 					       ENA_CDESC_RING_SIZE_ALIGNMENT);
473 	}
474 
475 	if (!io_cq->cdesc_addr.virt_addr) {
476 		ena_trc_err(ena_dev, "Memory allocation failed\n");
477 		return ENA_COM_NO_MEM;
478 	}
479 
480 	io_cq->phase = 1;
481 	io_cq->head = 0;
482 
483 	return 0;
484 }
485 
486 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
487 						   struct ena_admin_acq_entry *cqe)
488 {
489 	struct ena_comp_ctx *comp_ctx;
490 	u16 cmd_id;
491 
492 	cmd_id = cqe->acq_common_descriptor.command &
493 		ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
494 
495 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
496 	if (unlikely(!comp_ctx)) {
497 		ena_trc_err(admin_queue->ena_dev,
498 			    "comp_ctx is NULL. Changing the admin queue running state\n");
499 		admin_queue->running_state = false;
500 		return;
501 	}
502 
503 	comp_ctx->status = ENA_CMD_COMPLETED;
504 	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
505 
506 	if (comp_ctx->user_cqe)
507 		memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
508 
509 	if (!admin_queue->polling)
510 		ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
511 }
512 
513 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
514 {
515 	struct ena_admin_acq_entry *cqe = NULL;
516 	u16 comp_num = 0;
517 	u16 head_masked;
518 	u8 phase;
519 
520 	head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
521 	phase = admin_queue->cq.phase;
522 
523 	cqe = &admin_queue->cq.entries[head_masked];
524 
525 	/* Go over all the completions */
526 	while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
527 			ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
528 		/* Do not read the rest of the completion entry before the
529 		 * phase bit was validated
530 		 */
531 		dma_rmb();
532 		ena_com_handle_single_admin_completion(admin_queue, cqe);
533 
534 		head_masked++;
535 		comp_num++;
536 		if (unlikely(head_masked == admin_queue->q_depth)) {
537 			head_masked = 0;
538 			phase = !phase;
539 		}
540 
541 		cqe = &admin_queue->cq.entries[head_masked];
542 	}
543 
544 	admin_queue->cq.head += comp_num;
545 	admin_queue->cq.phase = phase;
546 	admin_queue->sq.head += comp_num;
547 	admin_queue->stats.completed_cmd += comp_num;
548 }
549 
550 static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
551 					u8 comp_status)
552 {
553 	if (unlikely(comp_status != 0))
554 		ena_trc_err(admin_queue->ena_dev,
555 			    "Admin command failed[%u]\n", comp_status);
556 
557 	switch (comp_status) {
558 	case ENA_ADMIN_SUCCESS:
559 		return ENA_COM_OK;
560 	case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
561 		return ENA_COM_NO_MEM;
562 	case ENA_ADMIN_UNSUPPORTED_OPCODE:
563 		return ENA_COM_UNSUPPORTED;
564 	case ENA_ADMIN_BAD_OPCODE:
565 	case ENA_ADMIN_MALFORMED_REQUEST:
566 	case ENA_ADMIN_ILLEGAL_PARAMETER:
567 	case ENA_ADMIN_UNKNOWN_ERROR:
568 		return ENA_COM_INVAL;
569 	case ENA_ADMIN_RESOURCE_BUSY:
570 		return ENA_COM_TRY_AGAIN;
571 	}
572 
573 	return ENA_COM_INVAL;
574 }
575 
576 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
577 {
578 	delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us);
579 	delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
580 	ENA_USLEEP(delay_us);
581 }
582 
583 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
584 						     struct ena_com_admin_queue *admin_queue)
585 {
586 	unsigned long flags = 0;
587 	ena_time_t timeout;
588 	int ret;
589 	u32 exp = 0;
590 
591 	timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
592 
593 	while (1) {
594 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
595 		ena_com_handle_admin_completion(admin_queue);
596 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
597 
598 		if (comp_ctx->status != ENA_CMD_SUBMITTED)
599 			break;
600 
601 		if (ENA_TIME_EXPIRE(timeout)) {
602 			ena_trc_err(admin_queue->ena_dev,
603 				    "Wait for completion (polling) timeout\n");
604 			/* ENA didn't have any completion */
605 			ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
606 			admin_queue->stats.no_completion++;
607 			admin_queue->running_state = false;
608 			ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
609 
610 			ret = ENA_COM_TIMER_EXPIRED;
611 			goto err;
612 		}
613 
614 		ena_delay_exponential_backoff_us(exp++,
615 						 admin_queue->ena_dev->ena_min_poll_delay_us);
616 	}
617 
618 	if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
619 		ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
620 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
621 		admin_queue->stats.aborted_cmd++;
622 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
623 		ret = ENA_COM_NO_DEVICE;
624 		goto err;
625 	}
626 
627 	ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
628 		 admin_queue->ena_dev, "Invalid comp status %d\n",
629 		 comp_ctx->status);
630 
631 	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
632 err:
633 	comp_ctxt_release(admin_queue, comp_ctx);
634 	return ret;
635 }
636 
637 /*
638  * Set the LLQ configurations of the firmware
639  *
640  * The driver provides only the enabled feature values to the device,
641  * which in turn, checks if they are supported.
642  */
643 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
644 {
645 	struct ena_com_admin_queue *admin_queue;
646 	struct ena_admin_set_feat_cmd cmd;
647 	struct ena_admin_set_feat_resp resp;
648 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
649 	int ret;
650 
651 	memset(&cmd, 0x0, sizeof(cmd));
652 	admin_queue = &ena_dev->admin_queue;
653 
654 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
655 	cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
656 
657 	cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
658 	cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
659 	cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
660 	cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
661 
662 	cmd.u.llq.accel_mode.u.set.enabled_flags =
663 		BIT(ENA_ADMIN_DISABLE_META_CACHING) |
664 		BIT(ENA_ADMIN_LIMIT_TX_BURST);
665 
666 	ret = ena_com_execute_admin_command(admin_queue,
667 					    (struct ena_admin_aq_entry *)&cmd,
668 					    sizeof(cmd),
669 					    (struct ena_admin_acq_entry *)&resp,
670 					    sizeof(resp));
671 
672 	if (unlikely(ret))
673 		ena_trc_err(ena_dev, "Failed to set LLQ configurations: %d\n", ret);
674 
675 	return ret;
676 }
677 
678 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
679 				   struct ena_admin_feature_llq_desc *llq_features,
680 				   struct ena_llq_configurations *llq_default_cfg)
681 {
682 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
683 	struct ena_admin_accel_mode_get llq_accel_mode_get;
684 	u16 supported_feat;
685 	int rc;
686 
687 	memset(llq_info, 0, sizeof(*llq_info));
688 
689 	supported_feat = llq_features->header_location_ctrl_supported;
690 
691 	if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
692 		llq_info->header_location_ctrl =
693 			llq_default_cfg->llq_header_location;
694 	} else {
695 		ena_trc_err(ena_dev, "Invalid header location control, supported: 0x%x\n",
696 			    supported_feat);
697 		return -EINVAL;
698 	}
699 
700 	if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
701 		supported_feat = llq_features->descriptors_stride_ctrl_supported;
702 		if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
703 			llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
704 		} else	{
705 			if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
706 				llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
707 			} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
708 				llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
709 			} else {
710 				ena_trc_err(ena_dev, "Invalid desc_stride_ctrl, supported: 0x%x\n",
711 					    supported_feat);
712 				return -EINVAL;
713 			}
714 
715 			ena_trc_err(ena_dev, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
716 				    llq_default_cfg->llq_stride_ctrl,
717 				    supported_feat,
718 				    llq_info->desc_stride_ctrl);
719 		}
720 	} else {
721 		llq_info->desc_stride_ctrl = 0;
722 	}
723 
724 	supported_feat = llq_features->entry_size_ctrl_supported;
725 	if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
726 		llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
727 		llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
728 	} else {
729 		if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
730 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
731 			llq_info->desc_list_entry_size = 128;
732 		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
733 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
734 			llq_info->desc_list_entry_size = 192;
735 		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
736 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
737 			llq_info->desc_list_entry_size = 256;
738 		} else {
739 			ena_trc_err(ena_dev, "Invalid entry_size_ctrl, supported: 0x%x\n",
740 				    supported_feat);
741 			return -EINVAL;
742 		}
743 
744 		ena_trc_err(ena_dev, "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
745 			    llq_default_cfg->llq_ring_entry_size,
746 			    supported_feat,
747 			    llq_info->desc_list_entry_size);
748 	}
749 	if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
750 		/* The desc list entry size should be whole multiply of 8
751 		 * This requirement comes from __iowrite64_copy()
752 		 */
753 		ena_trc_err(ena_dev, "Illegal entry size %d\n",
754 			    llq_info->desc_list_entry_size);
755 		return -EINVAL;
756 	}
757 
758 	if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
759 		llq_info->descs_per_entry = llq_info->desc_list_entry_size /
760 			sizeof(struct ena_eth_io_tx_desc);
761 	else
762 		llq_info->descs_per_entry = 1;
763 
764 	supported_feat = llq_features->desc_num_before_header_supported;
765 	if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
766 		llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
767 	} else {
768 		if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
769 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
770 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
771 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
772 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
773 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
774 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
775 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
776 		} else {
777 			ena_trc_err(ena_dev, "Invalid descs_num_before_header, supported: 0x%x\n",
778 				    supported_feat);
779 			return -EINVAL;
780 		}
781 
782 		ena_trc_err(ena_dev, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
783 			    llq_default_cfg->llq_num_decs_before_header,
784 			    supported_feat,
785 			    llq_info->descs_num_before_header);
786 	}
787 	/* Check for accelerated queue supported */
788 	llq_accel_mode_get = llq_features->accel_mode.u.get;
789 
790 	llq_info->disable_meta_caching =
791 		!!(llq_accel_mode_get.supported_flags &
792 		   BIT(ENA_ADMIN_DISABLE_META_CACHING));
793 
794 	if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
795 		llq_info->max_entries_in_tx_burst =
796 			llq_accel_mode_get.max_tx_burst_size /
797 			llq_default_cfg->llq_ring_entry_size_value;
798 
799 	rc = ena_com_set_llq(ena_dev);
800 	if (rc)
801 		ena_trc_err(ena_dev, "Cannot set LLQ configuration: %d\n", rc);
802 
803 	return rc;
804 }
805 
806 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
807 							struct ena_com_admin_queue *admin_queue)
808 {
809 	unsigned long flags = 0;
810 	int ret;
811 
812 	ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
813 			    admin_queue->completion_timeout);
814 
815 	/* In case the command wasn't completed find out the root cause.
816 	 * There might be 2 kinds of errors
817 	 * 1) No completion (timeout reached)
818 	 * 2) There is completion but the device didn't get any msi-x interrupt.
819 	 */
820 	if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
821 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
822 		ena_com_handle_admin_completion(admin_queue);
823 		admin_queue->stats.no_completion++;
824 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
825 
826 		if (comp_ctx->status == ENA_CMD_COMPLETED) {
827 			ena_trc_err(admin_queue->ena_dev,
828 				    "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
829 				    comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
830 			/* Check if fallback to polling is enabled */
831 			if (admin_queue->auto_polling)
832 				admin_queue->polling = true;
833 		} else {
834 			ena_trc_err(admin_queue->ena_dev,
835 				    "The ena device didn't send a completion for the admin cmd %d status %d\n",
836 				    comp_ctx->cmd_opcode, comp_ctx->status);
837 		}
838 		/* Check if shifted to polling mode.
839 		 * This will happen if there is a completion without an interrupt
840 		 * and autopolling mode is enabled. Continuing normal execution in such case
841 		 */
842 		if (!admin_queue->polling) {
843 			admin_queue->running_state = false;
844 			ret = ENA_COM_TIMER_EXPIRED;
845 			goto err;
846 		}
847 	}
848 
849 	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
850 err:
851 	comp_ctxt_release(admin_queue, comp_ctx);
852 	return ret;
853 }
854 
855 /* This method read the hardware device register through posting writes
856  * and waiting for response
857  * On timeout the function will return ENA_MMIO_READ_TIMEOUT
858  */
859 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
860 {
861 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
862 	volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
863 		mmio_read->read_resp;
864 	u32 mmio_read_reg, ret, i;
865 	unsigned long flags = 0;
866 	u32 timeout = mmio_read->reg_read_to;
867 
868 	ENA_MIGHT_SLEEP();
869 
870 	if (timeout == 0)
871 		timeout = ENA_REG_READ_TIMEOUT;
872 
873 	/* If readless is disabled, perform regular read */
874 	if (!mmio_read->readless_supported)
875 		return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
876 
877 	ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
878 	mmio_read->seq_num++;
879 
880 	read_resp->req_id = mmio_read->seq_num + 0xDEAD;
881 	mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
882 			ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
883 	mmio_read_reg |= mmio_read->seq_num &
884 			ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
885 
886 	ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
887 			ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
888 
889 	for (i = 0; i < timeout; i++) {
890 		if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
891 			break;
892 
893 		ENA_UDELAY(1);
894 	}
895 
896 	if (unlikely(i == timeout)) {
897 		ena_trc_err(ena_dev, "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
898 			    mmio_read->seq_num,
899 			    offset,
900 			    read_resp->req_id,
901 			    read_resp->reg_off);
902 		ret = ENA_MMIO_READ_TIMEOUT;
903 		goto err;
904 	}
905 
906 	if (read_resp->reg_off != offset) {
907 		ena_trc_err(ena_dev, "Read failure: wrong offset provided\n");
908 		ret = ENA_MMIO_READ_TIMEOUT;
909 	} else {
910 		ret = read_resp->reg_val;
911 	}
912 err:
913 	ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
914 
915 	return ret;
916 }
917 
918 /* There are two types to wait for completion.
919  * Polling mode - wait until the completion is available.
920  * Async mode - wait on wait queue until the completion is ready
921  * (or the timeout expired).
922  * It is expected that the IRQ called ena_com_handle_admin_completion
923  * to mark the completions.
924  */
925 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
926 					     struct ena_com_admin_queue *admin_queue)
927 {
928 	if (admin_queue->polling)
929 		return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
930 								 admin_queue);
931 
932 	return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
933 							    admin_queue);
934 }
935 
936 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
937 				 struct ena_com_io_sq *io_sq)
938 {
939 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
940 	struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
941 	struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
942 	u8 direction;
943 	int ret;
944 
945 	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
946 
947 	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
948 		direction = ENA_ADMIN_SQ_DIRECTION_TX;
949 	else
950 		direction = ENA_ADMIN_SQ_DIRECTION_RX;
951 
952 	destroy_cmd.sq.sq_identity |= (direction <<
953 		ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
954 		ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
955 
956 	destroy_cmd.sq.sq_idx = io_sq->idx;
957 	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
958 
959 	ret = ena_com_execute_admin_command(admin_queue,
960 					    (struct ena_admin_aq_entry *)&destroy_cmd,
961 					    sizeof(destroy_cmd),
962 					    (struct ena_admin_acq_entry *)&destroy_resp,
963 					    sizeof(destroy_resp));
964 
965 	if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
966 		ena_trc_err(ena_dev, "Failed to destroy io sq error: %d\n", ret);
967 
968 	return ret;
969 }
970 
971 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
972 				  struct ena_com_io_sq *io_sq,
973 				  struct ena_com_io_cq *io_cq)
974 {
975 	size_t size;
976 
977 	if (io_cq->cdesc_addr.virt_addr) {
978 		size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
979 
980 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
981 				      size,
982 				      io_cq->cdesc_addr.virt_addr,
983 				      io_cq->cdesc_addr.phys_addr,
984 				      io_cq->cdesc_addr.mem_handle);
985 
986 		io_cq->cdesc_addr.virt_addr = NULL;
987 	}
988 
989 	if (io_sq->desc_addr.virt_addr) {
990 		size = io_sq->desc_entry_size * io_sq->q_depth;
991 
992 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
993 				      size,
994 				      io_sq->desc_addr.virt_addr,
995 				      io_sq->desc_addr.phys_addr,
996 				      io_sq->desc_addr.mem_handle);
997 
998 		io_sq->desc_addr.virt_addr = NULL;
999 	}
1000 
1001 	if (io_sq->bounce_buf_ctrl.base_buffer) {
1002 		ENA_MEM_FREE(ena_dev->dmadev,
1003 			     io_sq->bounce_buf_ctrl.base_buffer,
1004 			     (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
1005 		io_sq->bounce_buf_ctrl.base_buffer = NULL;
1006 	}
1007 }
1008 
1009 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
1010 				u16 exp_state)
1011 {
1012 	u32 val, exp = 0;
1013 	ena_time_t timeout_stamp;
1014 
1015 	/* Convert timeout from resolution of 100ms to us resolution. */
1016 	timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout);
1017 
1018 	while (1) {
1019 		val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1020 
1021 		if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
1022 			ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1023 			return ENA_COM_TIMER_EXPIRED;
1024 		}
1025 
1026 		if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
1027 			exp_state)
1028 			return 0;
1029 
1030 		if (ENA_TIME_EXPIRE(timeout_stamp))
1031 			return ENA_COM_TIMER_EXPIRED;
1032 
1033 		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1034 	}
1035 }
1036 
1037 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
1038 					       enum ena_admin_aq_feature_id feature_id)
1039 {
1040 	u32 feature_mask = 1 << feature_id;
1041 
1042 	/* Device attributes is always supported */
1043 	if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
1044 	    !(ena_dev->supported_features & feature_mask))
1045 		return false;
1046 
1047 	return true;
1048 }
1049 
1050 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1051 				  struct ena_admin_get_feat_resp *get_resp,
1052 				  enum ena_admin_aq_feature_id feature_id,
1053 				  dma_addr_t control_buf_dma_addr,
1054 				  u32 control_buff_size,
1055 				  u8 feature_ver)
1056 {
1057 	struct ena_com_admin_queue *admin_queue;
1058 	struct ena_admin_get_feat_cmd get_cmd;
1059 	int ret;
1060 
1061 	if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1062 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", feature_id);
1063 		return ENA_COM_UNSUPPORTED;
1064 	}
1065 
1066 	memset(&get_cmd, 0x0, sizeof(get_cmd));
1067 	admin_queue = &ena_dev->admin_queue;
1068 
1069 	get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1070 
1071 	if (control_buff_size)
1072 		get_cmd.aq_common_descriptor.flags =
1073 			ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1074 	else
1075 		get_cmd.aq_common_descriptor.flags = 0;
1076 
1077 	ret = ena_com_mem_addr_set(ena_dev,
1078 				   &get_cmd.control_buffer.address,
1079 				   control_buf_dma_addr);
1080 	if (unlikely(ret)) {
1081 		ena_trc_err(ena_dev, "Memory address set failed\n");
1082 		return ret;
1083 	}
1084 
1085 	get_cmd.control_buffer.length = control_buff_size;
1086 	get_cmd.feat_common.feature_version = feature_ver;
1087 	get_cmd.feat_common.feature_id = feature_id;
1088 
1089 	ret = ena_com_execute_admin_command(admin_queue,
1090 					    (struct ena_admin_aq_entry *)
1091 					    &get_cmd,
1092 					    sizeof(get_cmd),
1093 					    (struct ena_admin_acq_entry *)
1094 					    get_resp,
1095 					    sizeof(*get_resp));
1096 
1097 	if (unlikely(ret))
1098 		ena_trc_err(ena_dev, "Failed to submit get_feature command %d error: %d\n",
1099 			    feature_id, ret);
1100 
1101 	return ret;
1102 }
1103 
1104 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1105 			       struct ena_admin_get_feat_resp *get_resp,
1106 			       enum ena_admin_aq_feature_id feature_id,
1107 			       u8 feature_ver)
1108 {
1109 	return ena_com_get_feature_ex(ena_dev,
1110 				      get_resp,
1111 				      feature_id,
1112 				      0,
1113 				      0,
1114 				      feature_ver);
1115 }
1116 
1117 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1118 {
1119 	return ena_dev->rss.hash_func;
1120 }
1121 
1122 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1123 {
1124 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
1125 		(ena_dev->rss).hash_key;
1126 
1127 	ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
1128 	/* The key buffer is stored in the device in an array of
1129 	 * uint32 elements.
1130 	 */
1131 	hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1132 }
1133 
1134 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1135 {
1136 	struct ena_rss *rss = &ena_dev->rss;
1137 
1138 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
1139 		return ENA_COM_UNSUPPORTED;
1140 
1141 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1142 			       sizeof(*rss->hash_key),
1143 			       rss->hash_key,
1144 			       rss->hash_key_dma_addr,
1145 			       rss->hash_key_mem_handle);
1146 
1147 	if (unlikely(!rss->hash_key))
1148 		return ENA_COM_NO_MEM;
1149 
1150 	return 0;
1151 }
1152 
1153 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1154 {
1155 	struct ena_rss *rss = &ena_dev->rss;
1156 
1157 	if (rss->hash_key)
1158 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1159 				      sizeof(*rss->hash_key),
1160 				      rss->hash_key,
1161 				      rss->hash_key_dma_addr,
1162 				      rss->hash_key_mem_handle);
1163 	rss->hash_key = NULL;
1164 }
1165 
1166 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1167 {
1168 	struct ena_rss *rss = &ena_dev->rss;
1169 
1170 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1171 			       sizeof(*rss->hash_ctrl),
1172 			       rss->hash_ctrl,
1173 			       rss->hash_ctrl_dma_addr,
1174 			       rss->hash_ctrl_mem_handle);
1175 
1176 	if (unlikely(!rss->hash_ctrl))
1177 		return ENA_COM_NO_MEM;
1178 
1179 	return 0;
1180 }
1181 
1182 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1183 {
1184 	struct ena_rss *rss = &ena_dev->rss;
1185 
1186 	if (rss->hash_ctrl)
1187 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1188 				      sizeof(*rss->hash_ctrl),
1189 				      rss->hash_ctrl,
1190 				      rss->hash_ctrl_dma_addr,
1191 				      rss->hash_ctrl_mem_handle);
1192 	rss->hash_ctrl = NULL;
1193 }
1194 
1195 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1196 					   u16 log_size)
1197 {
1198 	struct ena_rss *rss = &ena_dev->rss;
1199 	struct ena_admin_get_feat_resp get_resp;
1200 	size_t tbl_size;
1201 	int ret;
1202 
1203 	ret = ena_com_get_feature(ena_dev, &get_resp,
1204 				  ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1205 	if (unlikely(ret))
1206 		return ret;
1207 
1208 	if ((get_resp.u.ind_table.min_size > log_size) ||
1209 	    (get_resp.u.ind_table.max_size < log_size)) {
1210 		ena_trc_err(ena_dev, "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1211 			    1 << log_size,
1212 			    1 << get_resp.u.ind_table.min_size,
1213 			    1 << get_resp.u.ind_table.max_size);
1214 		return ENA_COM_INVAL;
1215 	}
1216 
1217 	tbl_size = (1ULL << log_size) *
1218 		sizeof(struct ena_admin_rss_ind_table_entry);
1219 
1220 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1221 			     tbl_size,
1222 			     rss->rss_ind_tbl,
1223 			     rss->rss_ind_tbl_dma_addr,
1224 			     rss->rss_ind_tbl_mem_handle);
1225 	if (unlikely(!rss->rss_ind_tbl))
1226 		goto mem_err1;
1227 
1228 	tbl_size = (1ULL << log_size) * sizeof(u16);
1229 	rss->host_rss_ind_tbl =
1230 		ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1231 	if (unlikely(!rss->host_rss_ind_tbl))
1232 		goto mem_err2;
1233 
1234 	rss->tbl_log_size = log_size;
1235 
1236 	return 0;
1237 
1238 mem_err2:
1239 	tbl_size = (1ULL << log_size) *
1240 		sizeof(struct ena_admin_rss_ind_table_entry);
1241 
1242 	ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1243 			      tbl_size,
1244 			      rss->rss_ind_tbl,
1245 			      rss->rss_ind_tbl_dma_addr,
1246 			      rss->rss_ind_tbl_mem_handle);
1247 	rss->rss_ind_tbl = NULL;
1248 mem_err1:
1249 	rss->tbl_log_size = 0;
1250 	return ENA_COM_NO_MEM;
1251 }
1252 
1253 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1254 {
1255 	struct ena_rss *rss = &ena_dev->rss;
1256 	size_t tbl_size = (1ULL << rss->tbl_log_size) *
1257 		sizeof(struct ena_admin_rss_ind_table_entry);
1258 
1259 	if (rss->rss_ind_tbl)
1260 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1261 				      tbl_size,
1262 				      rss->rss_ind_tbl,
1263 				      rss->rss_ind_tbl_dma_addr,
1264 				      rss->rss_ind_tbl_mem_handle);
1265 	rss->rss_ind_tbl = NULL;
1266 
1267 	if (rss->host_rss_ind_tbl)
1268 		ENA_MEM_FREE(ena_dev->dmadev,
1269 			     rss->host_rss_ind_tbl,
1270 			     ((1ULL << rss->tbl_log_size) * sizeof(u16)));
1271 	rss->host_rss_ind_tbl = NULL;
1272 }
1273 
1274 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1275 				struct ena_com_io_sq *io_sq, u16 cq_idx)
1276 {
1277 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1278 	struct ena_admin_aq_create_sq_cmd create_cmd;
1279 	struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1280 	u8 direction;
1281 	int ret;
1282 
1283 	memset(&create_cmd, 0x0, sizeof(create_cmd));
1284 
1285 	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1286 
1287 	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1288 		direction = ENA_ADMIN_SQ_DIRECTION_TX;
1289 	else
1290 		direction = ENA_ADMIN_SQ_DIRECTION_RX;
1291 
1292 	create_cmd.sq_identity |= (direction <<
1293 		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1294 		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1295 
1296 	create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1297 		ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1298 
1299 	create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1300 		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1301 		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1302 
1303 	create_cmd.sq_caps_3 |=
1304 		ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1305 
1306 	create_cmd.cq_idx = cq_idx;
1307 	create_cmd.sq_depth = io_sq->q_depth;
1308 
1309 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1310 		ret = ena_com_mem_addr_set(ena_dev,
1311 					   &create_cmd.sq_ba,
1312 					   io_sq->desc_addr.phys_addr);
1313 		if (unlikely(ret)) {
1314 			ena_trc_err(ena_dev, "Memory address set failed\n");
1315 			return ret;
1316 		}
1317 	}
1318 
1319 	ret = ena_com_execute_admin_command(admin_queue,
1320 					    (struct ena_admin_aq_entry *)&create_cmd,
1321 					    sizeof(create_cmd),
1322 					    (struct ena_admin_acq_entry *)&cmd_completion,
1323 					    sizeof(cmd_completion));
1324 	if (unlikely(ret)) {
1325 		ena_trc_err(ena_dev, "Failed to create IO SQ. error: %d\n", ret);
1326 		return ret;
1327 	}
1328 
1329 	io_sq->idx = cmd_completion.sq_idx;
1330 
1331 	io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1332 		(uintptr_t)cmd_completion.sq_doorbell_offset);
1333 
1334 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1335 		io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1336 				+ cmd_completion.llq_headers_offset);
1337 
1338 		io_sq->desc_addr.pbuf_dev_addr =
1339 			(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1340 			cmd_completion.llq_descriptors_offset);
1341 	}
1342 
1343 	ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1344 
1345 	return ret;
1346 }
1347 
1348 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1349 {
1350 	struct ena_rss *rss = &ena_dev->rss;
1351 	struct ena_com_io_sq *io_sq;
1352 	u16 qid;
1353 	int i;
1354 
1355 	for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1356 		qid = rss->host_rss_ind_tbl[i];
1357 		if (qid >= ENA_TOTAL_NUM_QUEUES)
1358 			return ENA_COM_INVAL;
1359 
1360 		io_sq = &ena_dev->io_sq_queues[qid];
1361 
1362 		if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1363 			return ENA_COM_INVAL;
1364 
1365 		rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1366 	}
1367 
1368 	return 0;
1369 }
1370 
1371 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1372 						 u16 intr_delay_resolution)
1373 {
1374 	u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1375 
1376 	if (unlikely(!intr_delay_resolution)) {
1377 		ena_trc_err(ena_dev, "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1378 		intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1379 	}
1380 
1381 	/* update Rx */
1382 	ena_dev->intr_moder_rx_interval =
1383 		ena_dev->intr_moder_rx_interval *
1384 		prev_intr_delay_resolution /
1385 		intr_delay_resolution;
1386 
1387 	/* update Tx */
1388 	ena_dev->intr_moder_tx_interval =
1389 		ena_dev->intr_moder_tx_interval *
1390 		prev_intr_delay_resolution /
1391 		intr_delay_resolution;
1392 
1393 	ena_dev->intr_delay_resolution = intr_delay_resolution;
1394 }
1395 
1396 /*****************************************************************************/
1397 /*******************************      API       ******************************/
1398 /*****************************************************************************/
1399 
1400 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1401 				  struct ena_admin_aq_entry *cmd,
1402 				  size_t cmd_size,
1403 				  struct ena_admin_acq_entry *comp,
1404 				  size_t comp_size)
1405 {
1406 	struct ena_comp_ctx *comp_ctx;
1407 	int ret;
1408 
1409 	comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1410 					    comp, comp_size);
1411 	if (IS_ERR(comp_ctx)) {
1412 		if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1413 			ena_trc_dbg(admin_queue->ena_dev,
1414 				    "Failed to submit command [%ld]\n",
1415 				    PTR_ERR(comp_ctx));
1416 		else
1417 			ena_trc_err(admin_queue->ena_dev,
1418 				    "Failed to submit command [%ld]\n",
1419 				    PTR_ERR(comp_ctx));
1420 
1421 		return (int)PTR_ERR(comp_ctx);
1422 	}
1423 
1424 	ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1425 	if (unlikely(ret)) {
1426 		if (admin_queue->running_state)
1427 			ena_trc_err(admin_queue->ena_dev,
1428 				    "Failed to process command. ret = %d\n", ret);
1429 		else
1430 			ena_trc_dbg(admin_queue->ena_dev,
1431 				    "Failed to process command. ret = %d\n", ret);
1432 	}
1433 	return ret;
1434 }
1435 
1436 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1437 			 struct ena_com_io_cq *io_cq)
1438 {
1439 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1440 	struct ena_admin_aq_create_cq_cmd create_cmd;
1441 	struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1442 	int ret;
1443 
1444 	memset(&create_cmd, 0x0, sizeof(create_cmd));
1445 
1446 	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1447 
1448 	create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1449 		ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1450 	create_cmd.cq_caps_1 |=
1451 		ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1452 
1453 	create_cmd.msix_vector = io_cq->msix_vector;
1454 	create_cmd.cq_depth = io_cq->q_depth;
1455 
1456 	ret = ena_com_mem_addr_set(ena_dev,
1457 				   &create_cmd.cq_ba,
1458 				   io_cq->cdesc_addr.phys_addr);
1459 	if (unlikely(ret)) {
1460 		ena_trc_err(ena_dev, "Memory address set failed\n");
1461 		return ret;
1462 	}
1463 
1464 	ret = ena_com_execute_admin_command(admin_queue,
1465 					    (struct ena_admin_aq_entry *)&create_cmd,
1466 					    sizeof(create_cmd),
1467 					    (struct ena_admin_acq_entry *)&cmd_completion,
1468 					    sizeof(cmd_completion));
1469 	if (unlikely(ret)) {
1470 		ena_trc_err(ena_dev, "Failed to create IO CQ. error: %d\n", ret);
1471 		return ret;
1472 	}
1473 
1474 	io_cq->idx = cmd_completion.cq_idx;
1475 
1476 	io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1477 		cmd_completion.cq_interrupt_unmask_register_offset);
1478 
1479 	if (cmd_completion.cq_head_db_register_offset)
1480 		io_cq->cq_head_db_reg =
1481 			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1482 			cmd_completion.cq_head_db_register_offset);
1483 
1484 	if (cmd_completion.numa_node_register_offset)
1485 		io_cq->numa_node_cfg_reg =
1486 			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1487 			cmd_completion.numa_node_register_offset);
1488 
1489 	ena_trc_dbg(ena_dev, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1490 
1491 	return ret;
1492 }
1493 
1494 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1495 			    struct ena_com_io_sq **io_sq,
1496 			    struct ena_com_io_cq **io_cq)
1497 {
1498 	if (qid >= ENA_TOTAL_NUM_QUEUES) {
1499 		ena_trc_err(ena_dev, "Invalid queue number %d but the max is %d\n",
1500 			    qid, ENA_TOTAL_NUM_QUEUES);
1501 		return ENA_COM_INVAL;
1502 	}
1503 
1504 	*io_sq = &ena_dev->io_sq_queues[qid];
1505 	*io_cq = &ena_dev->io_cq_queues[qid];
1506 
1507 	return 0;
1508 }
1509 
1510 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1511 {
1512 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1513 	struct ena_comp_ctx *comp_ctx;
1514 	u16 i;
1515 
1516 	if (!admin_queue->comp_ctx)
1517 		return;
1518 
1519 	for (i = 0; i < admin_queue->q_depth; i++) {
1520 		comp_ctx = get_comp_ctxt(admin_queue, i, false);
1521 		if (unlikely(!comp_ctx))
1522 			break;
1523 
1524 		comp_ctx->status = ENA_CMD_ABORTED;
1525 
1526 		ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1527 	}
1528 }
1529 
1530 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1531 {
1532 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1533 	unsigned long flags = 0;
1534 	u32 exp = 0;
1535 
1536 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1537 	while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1538 		ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1539 		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1540 		ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1541 	}
1542 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1543 }
1544 
1545 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1546 			  struct ena_com_io_cq *io_cq)
1547 {
1548 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1549 	struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1550 	struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1551 	int ret;
1552 
1553 	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1554 
1555 	destroy_cmd.cq_idx = io_cq->idx;
1556 	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1557 
1558 	ret = ena_com_execute_admin_command(admin_queue,
1559 					    (struct ena_admin_aq_entry *)&destroy_cmd,
1560 					    sizeof(destroy_cmd),
1561 					    (struct ena_admin_acq_entry *)&destroy_resp,
1562 					    sizeof(destroy_resp));
1563 
1564 	if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1565 		ena_trc_err(ena_dev, "Failed to destroy IO CQ. error: %d\n", ret);
1566 
1567 	return ret;
1568 }
1569 
1570 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1571 {
1572 	return ena_dev->admin_queue.running_state;
1573 }
1574 
1575 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1576 {
1577 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1578 	unsigned long flags = 0;
1579 
1580 	ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1581 	ena_dev->admin_queue.running_state = state;
1582 	ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1583 }
1584 
1585 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1586 {
1587 	u16 depth = ena_dev->aenq.q_depth;
1588 
1589 	ENA_WARN(ena_dev->aenq.head != depth, ena_dev, "Invalid AENQ state\n");
1590 
1591 	/* Init head_db to mark that all entries in the queue
1592 	 * are initially available
1593 	 */
1594 	ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1595 }
1596 
1597 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1598 {
1599 	struct ena_com_admin_queue *admin_queue;
1600 	struct ena_admin_set_feat_cmd cmd;
1601 	struct ena_admin_set_feat_resp resp;
1602 	struct ena_admin_get_feat_resp get_resp;
1603 	int ret;
1604 
1605 	ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1606 	if (ret) {
1607 		ena_trc_info(ena_dev, "Can't get aenq configuration\n");
1608 		return ret;
1609 	}
1610 
1611 	if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1612 		ena_trc_warn(ena_dev, "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1613 			     get_resp.u.aenq.supported_groups,
1614 			     groups_flag);
1615 		return ENA_COM_UNSUPPORTED;
1616 	}
1617 
1618 	memset(&cmd, 0x0, sizeof(cmd));
1619 	admin_queue = &ena_dev->admin_queue;
1620 
1621 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1622 	cmd.aq_common_descriptor.flags = 0;
1623 	cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1624 	cmd.u.aenq.enabled_groups = groups_flag;
1625 
1626 	ret = ena_com_execute_admin_command(admin_queue,
1627 					    (struct ena_admin_aq_entry *)&cmd,
1628 					    sizeof(cmd),
1629 					    (struct ena_admin_acq_entry *)&resp,
1630 					    sizeof(resp));
1631 
1632 	if (unlikely(ret))
1633 		ena_trc_err(ena_dev, "Failed to config AENQ ret: %d\n", ret);
1634 
1635 	return ret;
1636 }
1637 
1638 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1639 {
1640 	u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1641 	u32 width;
1642 
1643 	if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1644 		ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1645 		return ENA_COM_TIMER_EXPIRED;
1646 	}
1647 
1648 	width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1649 		ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1650 
1651 	ena_trc_dbg(ena_dev, "ENA dma width: %d\n", width);
1652 
1653 	if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1654 		ena_trc_err(ena_dev, "DMA width illegal value: %d\n", width);
1655 		return ENA_COM_INVAL;
1656 	}
1657 
1658 	ena_dev->dma_addr_bits = width;
1659 
1660 	return width;
1661 }
1662 
1663 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1664 {
1665 	u32 ver;
1666 	u32 ctrl_ver;
1667 	u32 ctrl_ver_masked;
1668 
1669 	/* Make sure the ENA version and the controller version are at least
1670 	 * as the driver expects
1671 	 */
1672 	ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1673 	ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1674 					  ENA_REGS_CONTROLLER_VERSION_OFF);
1675 
1676 	if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1677 		     (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1678 		ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1679 		return ENA_COM_TIMER_EXPIRED;
1680 	}
1681 
1682 	ena_trc_info(ena_dev, "ENA device version: %d.%d\n",
1683 		     (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1684 		     ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1685 		     ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1686 
1687 	ena_trc_info(ena_dev, "ENA controller version: %d.%d.%d implementation version %d\n",
1688 		     (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1689 		     >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1690 		     (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1691 		     >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1692 		     (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1693 		     (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1694 		     ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1695 
1696 	ctrl_ver_masked =
1697 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1698 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1699 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1700 
1701 	/* Validate the ctrl version without the implementation ID */
1702 	if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1703 		ena_trc_err(ena_dev, "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1704 		return -1;
1705 	}
1706 
1707 	return 0;
1708 }
1709 
1710 static void
1711 ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1712 				      struct ena_com_admin_queue *admin_queue)
1713 
1714 {
1715 	if (!admin_queue->comp_ctx)
1716 		return;
1717 
1718 	ENA_WAIT_EVENTS_DESTROY(admin_queue);
1719 	ENA_MEM_FREE(ena_dev->dmadev,
1720 		     admin_queue->comp_ctx,
1721 		     (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
1722 
1723 	admin_queue->comp_ctx = NULL;
1724 }
1725 
1726 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1727 {
1728 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1729 	struct ena_com_admin_cq *cq = &admin_queue->cq;
1730 	struct ena_com_admin_sq *sq = &admin_queue->sq;
1731 	struct ena_com_aenq *aenq = &ena_dev->aenq;
1732 	u16 size;
1733 
1734 	ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1735 
1736 	size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1737 	if (sq->entries)
1738 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1739 				      sq->dma_addr, sq->mem_handle);
1740 	sq->entries = NULL;
1741 
1742 	size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1743 	if (cq->entries)
1744 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1745 				      cq->dma_addr, cq->mem_handle);
1746 	cq->entries = NULL;
1747 
1748 	size = ADMIN_AENQ_SIZE(aenq->q_depth);
1749 	if (ena_dev->aenq.entries)
1750 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1751 				      aenq->dma_addr, aenq->mem_handle);
1752 	aenq->entries = NULL;
1753 	ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1754 }
1755 
1756 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1757 {
1758 	u32 mask_value = 0;
1759 
1760 	if (polling)
1761 		mask_value = ENA_REGS_ADMIN_INTR_MASK;
1762 
1763 	ENA_REG_WRITE32(ena_dev->bus, mask_value,
1764 			ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1765 	ena_dev->admin_queue.polling = polling;
1766 }
1767 
1768 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
1769 {
1770 	return ena_dev->admin_queue.polling;
1771 }
1772 
1773 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1774 					 bool polling)
1775 {
1776 	ena_dev->admin_queue.auto_polling = polling;
1777 }
1778 
1779 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1780 {
1781 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1782 
1783 	ENA_SPINLOCK_INIT(mmio_read->lock);
1784 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1785 			       sizeof(*mmio_read->read_resp),
1786 			       mmio_read->read_resp,
1787 			       mmio_read->read_resp_dma_addr,
1788 			       mmio_read->read_resp_mem_handle);
1789 	if (unlikely(!mmio_read->read_resp))
1790 		goto err;
1791 
1792 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1793 
1794 	mmio_read->read_resp->req_id = 0x0;
1795 	mmio_read->seq_num = 0x0;
1796 	mmio_read->readless_supported = true;
1797 
1798 	return 0;
1799 
1800 err:
1801 		ENA_SPINLOCK_DESTROY(mmio_read->lock);
1802 		return ENA_COM_NO_MEM;
1803 }
1804 
1805 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1806 {
1807 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1808 
1809 	mmio_read->readless_supported = readless_supported;
1810 }
1811 
1812 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1813 {
1814 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1815 
1816 	ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1817 	ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1818 
1819 	ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1820 			      sizeof(*mmio_read->read_resp),
1821 			      mmio_read->read_resp,
1822 			      mmio_read->read_resp_dma_addr,
1823 			      mmio_read->read_resp_mem_handle);
1824 
1825 	mmio_read->read_resp = NULL;
1826 	ENA_SPINLOCK_DESTROY(mmio_read->lock);
1827 }
1828 
1829 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1830 {
1831 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1832 	u32 addr_low, addr_high;
1833 
1834 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1835 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1836 
1837 	ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1838 	ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1839 }
1840 
1841 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1842 		       struct ena_aenq_handlers *aenq_handlers)
1843 {
1844 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1845 	u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1846 	int ret;
1847 
1848 	dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1849 
1850 	if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1851 		ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1852 		return ENA_COM_TIMER_EXPIRED;
1853 	}
1854 
1855 	if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1856 		ena_trc_err(ena_dev, "Device isn't ready, abort com init\n");
1857 		return ENA_COM_NO_DEVICE;
1858 	}
1859 
1860 	admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1861 
1862 	admin_queue->bus = ena_dev->bus;
1863 	admin_queue->q_dmadev = ena_dev->dmadev;
1864 	admin_queue->polling = false;
1865 	admin_queue->curr_cmd_id = 0;
1866 
1867 	ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1868 
1869 	ENA_SPINLOCK_INIT(admin_queue->q_lock);
1870 
1871 	ret = ena_com_init_comp_ctxt(admin_queue);
1872 	if (ret)
1873 		goto error;
1874 
1875 	ret = ena_com_admin_init_sq(admin_queue);
1876 	if (ret)
1877 		goto error;
1878 
1879 	ret = ena_com_admin_init_cq(admin_queue);
1880 	if (ret)
1881 		goto error;
1882 
1883 	admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1884 		ENA_REGS_AQ_DB_OFF);
1885 
1886 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1887 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1888 
1889 	ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1890 	ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1891 
1892 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1893 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1894 
1895 	ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1896 	ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1897 
1898 	aq_caps = 0;
1899 	aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1900 	aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1901 			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1902 			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1903 
1904 	acq_caps = 0;
1905 	acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1906 	acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1907 		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1908 		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1909 
1910 	ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1911 	ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1912 	ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1913 	if (ret)
1914 		goto error;
1915 
1916 	admin_queue->ena_dev = ena_dev;
1917 	admin_queue->running_state = true;
1918 
1919 	return 0;
1920 error:
1921 	ena_com_admin_destroy(ena_dev);
1922 
1923 	return ret;
1924 }
1925 
1926 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1927 			    struct ena_com_create_io_ctx *ctx)
1928 {
1929 	struct ena_com_io_sq *io_sq;
1930 	struct ena_com_io_cq *io_cq;
1931 	int ret;
1932 
1933 	if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1934 		ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
1935 			    ctx->qid, ENA_TOTAL_NUM_QUEUES);
1936 		return ENA_COM_INVAL;
1937 	}
1938 
1939 	io_sq = &ena_dev->io_sq_queues[ctx->qid];
1940 	io_cq = &ena_dev->io_cq_queues[ctx->qid];
1941 
1942 	memset(io_sq, 0x0, sizeof(*io_sq));
1943 	memset(io_cq, 0x0, sizeof(*io_cq));
1944 
1945 	/* Init CQ */
1946 	io_cq->q_depth = ctx->queue_size;
1947 	io_cq->direction = ctx->direction;
1948 	io_cq->qid = ctx->qid;
1949 
1950 	io_cq->msix_vector = ctx->msix_vector;
1951 
1952 	io_sq->q_depth = ctx->queue_size;
1953 	io_sq->direction = ctx->direction;
1954 	io_sq->qid = ctx->qid;
1955 
1956 	io_sq->mem_queue_type = ctx->mem_queue_type;
1957 
1958 	if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1959 		/* header length is limited to 8 bits */
1960 		io_sq->tx_max_header_size =
1961 			ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1962 
1963 	ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1964 	if (ret)
1965 		goto error;
1966 	ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1967 	if (ret)
1968 		goto error;
1969 
1970 	ret = ena_com_create_io_cq(ena_dev, io_cq);
1971 	if (ret)
1972 		goto error;
1973 
1974 	ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1975 	if (ret)
1976 		goto destroy_io_cq;
1977 
1978 	return 0;
1979 
1980 destroy_io_cq:
1981 	ena_com_destroy_io_cq(ena_dev, io_cq);
1982 error:
1983 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1984 	return ret;
1985 }
1986 
1987 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1988 {
1989 	struct ena_com_io_sq *io_sq;
1990 	struct ena_com_io_cq *io_cq;
1991 
1992 	if (qid >= ENA_TOTAL_NUM_QUEUES) {
1993 		ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
1994 			    qid, ENA_TOTAL_NUM_QUEUES);
1995 		return;
1996 	}
1997 
1998 	io_sq = &ena_dev->io_sq_queues[qid];
1999 	io_cq = &ena_dev->io_cq_queues[qid];
2000 
2001 	ena_com_destroy_io_sq(ena_dev, io_sq);
2002 	ena_com_destroy_io_cq(ena_dev, io_cq);
2003 
2004 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
2005 }
2006 
2007 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
2008 			    struct ena_admin_get_feat_resp *resp)
2009 {
2010 	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
2011 }
2012 
2013 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
2014 			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
2015 {
2016 	struct ena_admin_get_feat_resp get_resp;
2017 	int rc;
2018 
2019 	rc = ena_com_get_feature(ena_dev, &get_resp,
2020 				 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
2021 	if (rc)
2022 		return rc;
2023 
2024 	memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
2025 	       sizeof(get_resp.u.dev_attr));
2026 
2027 	ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
2028 
2029 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2030 		rc = ena_com_get_feature(ena_dev, &get_resp,
2031 					 ENA_ADMIN_MAX_QUEUES_EXT,
2032 					 ENA_FEATURE_MAX_QUEUE_EXT_VER);
2033 		if (rc)
2034 			return rc;
2035 
2036 		if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
2037 			return -EINVAL;
2038 
2039 		memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
2040 		       sizeof(get_resp.u.max_queue_ext));
2041 		ena_dev->tx_max_header_size =
2042 			get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
2043 	} else {
2044 		rc = ena_com_get_feature(ena_dev, &get_resp,
2045 					 ENA_ADMIN_MAX_QUEUES_NUM, 0);
2046 		memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
2047 		       sizeof(get_resp.u.max_queue));
2048 		ena_dev->tx_max_header_size =
2049 			get_resp.u.max_queue.max_header_size;
2050 
2051 		if (rc)
2052 			return rc;
2053 	}
2054 
2055 	rc = ena_com_get_feature(ena_dev, &get_resp,
2056 				 ENA_ADMIN_AENQ_CONFIG, 0);
2057 	if (rc)
2058 		return rc;
2059 
2060 	memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2061 	       sizeof(get_resp.u.aenq));
2062 
2063 	rc = ena_com_get_feature(ena_dev, &get_resp,
2064 				 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2065 	if (rc)
2066 		return rc;
2067 
2068 	memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2069 	       sizeof(get_resp.u.offload));
2070 
2071 	/* Driver hints isn't mandatory admin command. So in case the
2072 	 * command isn't supported set driver hints to 0
2073 	 */
2074 	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2075 
2076 	if (!rc)
2077 		memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2078 		       sizeof(get_resp.u.hw_hints));
2079 	else if (rc == ENA_COM_UNSUPPORTED)
2080 		memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2081 	else
2082 		return rc;
2083 
2084 	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2085 	if (!rc)
2086 		memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2087 		       sizeof(get_resp.u.llq));
2088 	else if (rc == ENA_COM_UNSUPPORTED)
2089 		memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2090 	else
2091 		return rc;
2092 
2093 	return 0;
2094 }
2095 
2096 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2097 {
2098 	ena_com_handle_admin_completion(&ena_dev->admin_queue);
2099 }
2100 
2101 /* ena_handle_specific_aenq_event:
2102  * return the handler that is relevant to the specific event group
2103  */
2104 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
2105 						     u16 group)
2106 {
2107 	struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2108 
2109 	if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2110 		return aenq_handlers->handlers[group];
2111 
2112 	return aenq_handlers->unimplemented_handler;
2113 }
2114 
2115 /* ena_aenq_intr_handler:
2116  * handles the aenq incoming events.
2117  * pop events from the queue and apply the specific handler
2118  */
2119 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2120 {
2121 	struct ena_admin_aenq_entry *aenq_e;
2122 	struct ena_admin_aenq_common_desc *aenq_common;
2123 	struct ena_com_aenq *aenq  = &ena_dev->aenq;
2124 	u64 timestamp;
2125 	ena_aenq_handler handler_cb;
2126 	u16 masked_head, processed = 0;
2127 	u8 phase;
2128 
2129 	masked_head = aenq->head & (aenq->q_depth - 1);
2130 	phase = aenq->phase;
2131 	aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2132 	aenq_common = &aenq_e->aenq_common_desc;
2133 
2134 	/* Go over all the events */
2135 	while ((READ_ONCE8(aenq_common->flags) &
2136 		ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2137 		/* Make sure the phase bit (ownership) is as expected before
2138 		 * reading the rest of the descriptor.
2139 		 */
2140 		dma_rmb();
2141 
2142 		timestamp = (u64)aenq_common->timestamp_low |
2143 			((u64)aenq_common->timestamp_high << 32);
2144 
2145 		ena_trc_dbg(ena_dev, "AENQ! Group[%x] Syndrome[%x] timestamp: [%" ENA_PRIu64 "s]\n",
2146 			    aenq_common->group,
2147 			    aenq_common->syndrome,
2148 			    timestamp);
2149 
2150 		/* Handle specific event*/
2151 		handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2152 							  aenq_common->group);
2153 		handler_cb(data, aenq_e); /* call the actual event handler*/
2154 
2155 		/* Get next event entry */
2156 		masked_head++;
2157 		processed++;
2158 
2159 		if (unlikely(masked_head == aenq->q_depth)) {
2160 			masked_head = 0;
2161 			phase = !phase;
2162 		}
2163 		aenq_e = &aenq->entries[masked_head];
2164 		aenq_common = &aenq_e->aenq_common_desc;
2165 	}
2166 
2167 	aenq->head += processed;
2168 	aenq->phase = phase;
2169 
2170 	/* Don't update aenq doorbell if there weren't any processed events */
2171 	if (!processed)
2172 		return;
2173 
2174 	/* write the aenq doorbell after all AENQ descriptors were read */
2175 	mb();
2176 	ENA_REG_WRITE32_RELAXED(ena_dev->bus, (u32)aenq->head,
2177 				ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2178 	mmiowb();
2179 }
2180 #ifdef ENA_EXTENDED_STATS
2181 /*
2182  * Sets the function Idx and Queue Idx to be used for
2183  * get full statistics feature
2184  *
2185  */
2186 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
2187 					  u32 func_queue)
2188 {
2189 
2190 	/* Function & Queue is acquired from user in the following format :
2191 	 * Bottom Half word:	funct
2192 	 * Top Half Word:	queue
2193 	 */
2194 	ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
2195 	ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
2196 
2197 	return 0;
2198 }
2199 
2200 #endif /* ENA_EXTENDED_STATS */
2201 
2202 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2203 		      enum ena_regs_reset_reason_types reset_reason)
2204 {
2205 	u32 stat, timeout, cap, reset_val;
2206 	int rc;
2207 
2208 	stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2209 	cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2210 
2211 	if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2212 		     (cap == ENA_MMIO_READ_TIMEOUT))) {
2213 		ena_trc_err(ena_dev, "Reg read32 timeout occurred\n");
2214 		return ENA_COM_TIMER_EXPIRED;
2215 	}
2216 
2217 	if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2218 		ena_trc_err(ena_dev, "Device isn't ready, can't reset device\n");
2219 		return ENA_COM_INVAL;
2220 	}
2221 
2222 	timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2223 			ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2224 	if (timeout == 0) {
2225 		ena_trc_err(ena_dev, "Invalid timeout value\n");
2226 		return ENA_COM_INVAL;
2227 	}
2228 
2229 	/* start reset */
2230 	reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2231 	reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2232 			ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2233 	ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2234 
2235 	/* Write again the MMIO read request address */
2236 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2237 
2238 	rc = wait_for_reset_state(ena_dev, timeout,
2239 				  ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2240 	if (rc != 0) {
2241 		ena_trc_err(ena_dev, "Reset indication didn't turn on\n");
2242 		return rc;
2243 	}
2244 
2245 	/* reset done */
2246 	ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2247 	rc = wait_for_reset_state(ena_dev, timeout, 0);
2248 	if (rc != 0) {
2249 		ena_trc_err(ena_dev, "Reset indication didn't turn off\n");
2250 		return rc;
2251 	}
2252 
2253 	timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2254 		ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2255 	if (timeout)
2256 		/* the resolution of timeout reg is 100ms */
2257 		ena_dev->admin_queue.completion_timeout = timeout * 100000;
2258 	else
2259 		ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2260 
2261 	return 0;
2262 }
2263 
2264 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2265 			     struct ena_com_stats_ctx *ctx,
2266 			     enum ena_admin_get_stats_type type)
2267 {
2268 	struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2269 	struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2270 	struct ena_com_admin_queue *admin_queue;
2271 	int ret;
2272 
2273 	admin_queue = &ena_dev->admin_queue;
2274 
2275 	get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2276 	get_cmd->aq_common_descriptor.flags = 0;
2277 	get_cmd->type = type;
2278 
2279 	ret =  ena_com_execute_admin_command(admin_queue,
2280 					     (struct ena_admin_aq_entry *)get_cmd,
2281 					     sizeof(*get_cmd),
2282 					     (struct ena_admin_acq_entry *)get_resp,
2283 					     sizeof(*get_resp));
2284 
2285 	if (unlikely(ret))
2286 		ena_trc_err(ena_dev, "Failed to get stats. error: %d\n", ret);
2287 
2288 	return ret;
2289 }
2290 
2291 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2292 			  struct ena_admin_eni_stats *stats)
2293 {
2294 	struct ena_com_stats_ctx ctx;
2295 	int ret;
2296 
2297 	memset(&ctx, 0x0, sizeof(ctx));
2298 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2299 	if (likely(ret == 0))
2300 		memcpy(stats, &ctx.get_resp.u.eni_stats,
2301 		       sizeof(ctx.get_resp.u.eni_stats));
2302 
2303 	return ret;
2304 }
2305 
2306 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2307 				struct ena_admin_basic_stats *stats)
2308 {
2309 	struct ena_com_stats_ctx ctx;
2310 	int ret;
2311 
2312 	memset(&ctx, 0x0, sizeof(ctx));
2313 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2314 	if (likely(ret == 0))
2315 		memcpy(stats, &ctx.get_resp.u.basic_stats,
2316 		       sizeof(ctx.get_resp.u.basic_stats));
2317 
2318 	return ret;
2319 }
2320 #ifdef ENA_EXTENDED_STATS
2321 
2322 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
2323 				   u32 len)
2324 {
2325 	struct ena_com_stats_ctx ctx;
2326 	struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
2327 	ena_mem_handle_t mem_handle;
2328 	void *virt_addr;
2329 	dma_addr_t phys_addr;
2330 	int ret;
2331 
2332 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
2333 			       virt_addr, phys_addr, mem_handle);
2334 	if (!virt_addr) {
2335 		ret = ENA_COM_NO_MEM;
2336 		goto done;
2337 	}
2338 	memset(&ctx, 0x0, sizeof(ctx));
2339 	ret = ena_com_mem_addr_set(ena_dev,
2340 				   &get_cmd->u.control_buffer.address,
2341 				   phys_addr);
2342 	if (unlikely(ret)) {
2343 		ena_trc_err(ena_dev, "Memory address set failed\n");
2344 		goto free_ext_stats_mem;
2345 	}
2346 	get_cmd->u.control_buffer.length = len;
2347 
2348 	get_cmd->device_id = ena_dev->stats_func;
2349 	get_cmd->queue_idx = ena_dev->stats_queue;
2350 
2351 	ret = ena_get_dev_stats(ena_dev, &ctx,
2352 				ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
2353 	if (ret < 0)
2354 		goto free_ext_stats_mem;
2355 
2356 	ret = snprintf(buff, len, "%s", (char *)virt_addr);
2357 
2358 free_ext_stats_mem:
2359 	ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
2360 			      mem_handle);
2361 done:
2362 	return ret;
2363 }
2364 #endif
2365 
2366 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2367 {
2368 	struct ena_com_admin_queue *admin_queue;
2369 	struct ena_admin_set_feat_cmd cmd;
2370 	struct ena_admin_set_feat_resp resp;
2371 	int ret;
2372 
2373 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2374 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
2375 		return ENA_COM_UNSUPPORTED;
2376 	}
2377 
2378 	memset(&cmd, 0x0, sizeof(cmd));
2379 	admin_queue = &ena_dev->admin_queue;
2380 
2381 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2382 	cmd.aq_common_descriptor.flags = 0;
2383 	cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2384 	cmd.u.mtu.mtu = (u32)mtu;
2385 
2386 	ret = ena_com_execute_admin_command(admin_queue,
2387 					    (struct ena_admin_aq_entry *)&cmd,
2388 					    sizeof(cmd),
2389 					    (struct ena_admin_acq_entry *)&resp,
2390 					    sizeof(resp));
2391 
2392 	if (unlikely(ret))
2393 		ena_trc_err(ena_dev, "Failed to set mtu %d. error: %d\n", mtu, ret);
2394 
2395 	return ret;
2396 }
2397 
2398 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2399 				 struct ena_admin_feature_offload_desc *offload)
2400 {
2401 	int ret;
2402 	struct ena_admin_get_feat_resp resp;
2403 
2404 	ret = ena_com_get_feature(ena_dev, &resp,
2405 				  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2406 	if (unlikely(ret)) {
2407 		ena_trc_err(ena_dev, "Failed to get offload capabilities %d\n", ret);
2408 		return ret;
2409 	}
2410 
2411 	memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2412 
2413 	return 0;
2414 }
2415 
2416 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2417 {
2418 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2419 	struct ena_rss *rss = &ena_dev->rss;
2420 	struct ena_admin_set_feat_cmd cmd;
2421 	struct ena_admin_set_feat_resp resp;
2422 	struct ena_admin_get_feat_resp get_resp;
2423 	int ret;
2424 
2425 	if (!ena_com_check_supported_feature_id(ena_dev,
2426 						ENA_ADMIN_RSS_HASH_FUNCTION)) {
2427 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2428 			    ENA_ADMIN_RSS_HASH_FUNCTION);
2429 		return ENA_COM_UNSUPPORTED;
2430 	}
2431 
2432 	/* Validate hash function is supported */
2433 	ret = ena_com_get_feature(ena_dev, &get_resp,
2434 				  ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2435 	if (unlikely(ret))
2436 		return ret;
2437 
2438 	if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2439 		ena_trc_err(ena_dev, "Func hash %d isn't supported by device, abort\n",
2440 			    rss->hash_func);
2441 		return ENA_COM_UNSUPPORTED;
2442 	}
2443 
2444 	memset(&cmd, 0x0, sizeof(cmd));
2445 
2446 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2447 	cmd.aq_common_descriptor.flags =
2448 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2449 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2450 	cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2451 	cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2452 
2453 	ret = ena_com_mem_addr_set(ena_dev,
2454 				   &cmd.control_buffer.address,
2455 				   rss->hash_key_dma_addr);
2456 	if (unlikely(ret)) {
2457 		ena_trc_err(ena_dev, "Memory address set failed\n");
2458 		return ret;
2459 	}
2460 
2461 	cmd.control_buffer.length = sizeof(*rss->hash_key);
2462 
2463 	ret = ena_com_execute_admin_command(admin_queue,
2464 					    (struct ena_admin_aq_entry *)&cmd,
2465 					    sizeof(cmd),
2466 					    (struct ena_admin_acq_entry *)&resp,
2467 					    sizeof(resp));
2468 	if (unlikely(ret)) {
2469 		ena_trc_err(ena_dev, "Failed to set hash function %d. error: %d\n",
2470 			    rss->hash_func, ret);
2471 		return ENA_COM_INVAL;
2472 	}
2473 
2474 	return 0;
2475 }
2476 
2477 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2478 			       enum ena_admin_hash_functions func,
2479 			       const u8 *key, u16 key_len, u32 init_val)
2480 {
2481 	struct ena_admin_feature_rss_flow_hash_control *hash_key;
2482 	struct ena_admin_get_feat_resp get_resp;
2483 	enum ena_admin_hash_functions old_func;
2484 	struct ena_rss *rss = &ena_dev->rss;
2485 	int rc;
2486 
2487 	hash_key = rss->hash_key;
2488 
2489 	/* Make sure size is a mult of DWs */
2490 	if (unlikely(key_len & 0x3))
2491 		return ENA_COM_INVAL;
2492 
2493 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2494 				    ENA_ADMIN_RSS_HASH_FUNCTION,
2495 				    rss->hash_key_dma_addr,
2496 				    sizeof(*rss->hash_key), 0);
2497 	if (unlikely(rc))
2498 		return rc;
2499 
2500 	if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2501 		ena_trc_err(ena_dev, "Flow hash function %d isn't supported\n", func);
2502 		return ENA_COM_UNSUPPORTED;
2503 	}
2504 
2505 	switch (func) {
2506 	case ENA_ADMIN_TOEPLITZ:
2507 		if (key) {
2508 			if (key_len != sizeof(hash_key->key)) {
2509 				ena_trc_err(ena_dev, "key len (%hu) doesn't equal the supported size (%zu)\n",
2510 					     key_len, sizeof(hash_key->key));
2511 				return ENA_COM_INVAL;
2512 			}
2513 			memcpy(hash_key->key, key, key_len);
2514 			rss->hash_init_val = init_val;
2515 			hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2516 		}
2517 		break;
2518 	case ENA_ADMIN_CRC32:
2519 		rss->hash_init_val = init_val;
2520 		break;
2521 	default:
2522 		ena_trc_err(ena_dev, "Invalid hash function (%d)\n", func);
2523 		return ENA_COM_INVAL;
2524 	}
2525 
2526 	old_func = rss->hash_func;
2527 	rss->hash_func = func;
2528 	rc = ena_com_set_hash_function(ena_dev);
2529 
2530 	/* Restore the old function */
2531 	if (unlikely(rc))
2532 		rss->hash_func = old_func;
2533 
2534 	return rc;
2535 }
2536 
2537 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2538 			      enum ena_admin_hash_functions *func)
2539 {
2540 	struct ena_rss *rss = &ena_dev->rss;
2541 	struct ena_admin_get_feat_resp get_resp;
2542 	int rc;
2543 
2544 	if (unlikely(!func))
2545 		return ENA_COM_INVAL;
2546 
2547 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2548 				    ENA_ADMIN_RSS_HASH_FUNCTION,
2549 				    rss->hash_key_dma_addr,
2550 				    sizeof(*rss->hash_key), 0);
2551 	if (unlikely(rc))
2552 		return rc;
2553 
2554 	/* ENA_FFS() returns 1 in case the lsb is set */
2555 	rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
2556 	if (rss->hash_func)
2557 		rss->hash_func--;
2558 
2559 	*func = rss->hash_func;
2560 
2561 	return 0;
2562 }
2563 
2564 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2565 {
2566 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
2567 		ena_dev->rss.hash_key;
2568 
2569 	if (key)
2570 		memcpy(key, hash_key->key,
2571 		       (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2572 
2573 	return 0;
2574 }
2575 
2576 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2577 			  enum ena_admin_flow_hash_proto proto,
2578 			  u16 *fields)
2579 {
2580 	struct ena_rss *rss = &ena_dev->rss;
2581 	struct ena_admin_get_feat_resp get_resp;
2582 	int rc;
2583 
2584 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2585 				    ENA_ADMIN_RSS_HASH_INPUT,
2586 				    rss->hash_ctrl_dma_addr,
2587 				    sizeof(*rss->hash_ctrl), 0);
2588 	if (unlikely(rc))
2589 		return rc;
2590 
2591 	if (fields)
2592 		*fields = rss->hash_ctrl->selected_fields[proto].fields;
2593 
2594 	return 0;
2595 }
2596 
2597 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2598 {
2599 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2600 	struct ena_rss *rss = &ena_dev->rss;
2601 	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2602 	struct ena_admin_set_feat_cmd cmd;
2603 	struct ena_admin_set_feat_resp resp;
2604 	int ret;
2605 
2606 	if (!ena_com_check_supported_feature_id(ena_dev,
2607 						ENA_ADMIN_RSS_HASH_INPUT)) {
2608 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2609 			    ENA_ADMIN_RSS_HASH_INPUT);
2610 		return ENA_COM_UNSUPPORTED;
2611 	}
2612 
2613 	memset(&cmd, 0x0, sizeof(cmd));
2614 
2615 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2616 	cmd.aq_common_descriptor.flags =
2617 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2618 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2619 	cmd.u.flow_hash_input.enabled_input_sort =
2620 		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2621 		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2622 
2623 	ret = ena_com_mem_addr_set(ena_dev,
2624 				   &cmd.control_buffer.address,
2625 				   rss->hash_ctrl_dma_addr);
2626 	if (unlikely(ret)) {
2627 		ena_trc_err(ena_dev, "Memory address set failed\n");
2628 		return ret;
2629 	}
2630 	cmd.control_buffer.length = sizeof(*hash_ctrl);
2631 
2632 	ret = ena_com_execute_admin_command(admin_queue,
2633 					    (struct ena_admin_aq_entry *)&cmd,
2634 					    sizeof(cmd),
2635 					    (struct ena_admin_acq_entry *)&resp,
2636 					    sizeof(resp));
2637 	if (unlikely(ret))
2638 		ena_trc_err(ena_dev, "Failed to set hash input. error: %d\n", ret);
2639 
2640 	return ret;
2641 }
2642 
2643 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2644 {
2645 	struct ena_rss *rss = &ena_dev->rss;
2646 	struct ena_admin_feature_rss_hash_control *hash_ctrl =
2647 		rss->hash_ctrl;
2648 	u16 available_fields = 0;
2649 	int rc, i;
2650 
2651 	/* Get the supported hash input */
2652 	rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2653 	if (unlikely(rc))
2654 		return rc;
2655 
2656 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2657 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2658 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2659 
2660 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2661 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2662 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2663 
2664 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2665 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2666 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2667 
2668 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2669 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2670 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2671 
2672 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2673 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2674 
2675 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2676 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2677 
2678 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2679 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2680 
2681 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2682 		ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2683 
2684 	for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2685 		available_fields = hash_ctrl->selected_fields[i].fields &
2686 				hash_ctrl->supported_fields[i].fields;
2687 		if (available_fields != hash_ctrl->selected_fields[i].fields) {
2688 			ena_trc_err(ena_dev, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2689 				    i, hash_ctrl->supported_fields[i].fields,
2690 				    hash_ctrl->selected_fields[i].fields);
2691 			return ENA_COM_UNSUPPORTED;
2692 		}
2693 	}
2694 
2695 	rc = ena_com_set_hash_ctrl(ena_dev);
2696 
2697 	/* In case of failure, restore the old hash ctrl */
2698 	if (unlikely(rc))
2699 		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2700 
2701 	return rc;
2702 }
2703 
2704 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2705 			   enum ena_admin_flow_hash_proto proto,
2706 			   u16 hash_fields)
2707 {
2708 	struct ena_rss *rss = &ena_dev->rss;
2709 	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2710 	u16 supported_fields;
2711 	int rc;
2712 
2713 	if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2714 		ena_trc_err(ena_dev, "Invalid proto num (%u)\n", proto);
2715 		return ENA_COM_INVAL;
2716 	}
2717 
2718 	/* Get the ctrl table */
2719 	rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2720 	if (unlikely(rc))
2721 		return rc;
2722 
2723 	/* Make sure all the fields are supported */
2724 	supported_fields = hash_ctrl->supported_fields[proto].fields;
2725 	if ((hash_fields & supported_fields) != hash_fields) {
2726 		ena_trc_err(ena_dev, "Proto %d doesn't support the required fields %x. supports only: %x\n",
2727 			    proto, hash_fields, supported_fields);
2728 	}
2729 
2730 	hash_ctrl->selected_fields[proto].fields = hash_fields;
2731 
2732 	rc = ena_com_set_hash_ctrl(ena_dev);
2733 
2734 	/* In case of failure, restore the old hash ctrl */
2735 	if (unlikely(rc))
2736 		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2737 
2738 	return 0;
2739 }
2740 
2741 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2742 				      u16 entry_idx, u16 entry_value)
2743 {
2744 	struct ena_rss *rss = &ena_dev->rss;
2745 
2746 	if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2747 		return ENA_COM_INVAL;
2748 
2749 	if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2750 		return ENA_COM_INVAL;
2751 
2752 	rss->host_rss_ind_tbl[entry_idx] = entry_value;
2753 
2754 	return 0;
2755 }
2756 
2757 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2758 {
2759 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2760 	struct ena_rss *rss = &ena_dev->rss;
2761 	struct ena_admin_set_feat_cmd cmd;
2762 	struct ena_admin_set_feat_resp resp;
2763 	int ret;
2764 
2765 	if (!ena_com_check_supported_feature_id(ena_dev,
2766 						ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
2767 		ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2768 			    ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
2769 		return ENA_COM_UNSUPPORTED;
2770 	}
2771 
2772 	ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2773 	if (ret) {
2774 		ena_trc_err(ena_dev, "Failed to convert host indirection table to device table\n");
2775 		return ret;
2776 	}
2777 
2778 	memset(&cmd, 0x0, sizeof(cmd));
2779 
2780 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2781 	cmd.aq_common_descriptor.flags =
2782 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2783 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
2784 	cmd.u.ind_table.size = rss->tbl_log_size;
2785 	cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2786 
2787 	ret = ena_com_mem_addr_set(ena_dev,
2788 				   &cmd.control_buffer.address,
2789 				   rss->rss_ind_tbl_dma_addr);
2790 	if (unlikely(ret)) {
2791 		ena_trc_err(ena_dev, "Memory address set failed\n");
2792 		return ret;
2793 	}
2794 
2795 	cmd.control_buffer.length = (u32)(1ULL << rss->tbl_log_size) *
2796 		sizeof(struct ena_admin_rss_ind_table_entry);
2797 
2798 	ret = ena_com_execute_admin_command(admin_queue,
2799 					    (struct ena_admin_aq_entry *)&cmd,
2800 					    sizeof(cmd),
2801 					    (struct ena_admin_acq_entry *)&resp,
2802 					    sizeof(resp));
2803 
2804 	if (unlikely(ret))
2805 		ena_trc_err(ena_dev, "Failed to set indirect table. error: %d\n", ret);
2806 
2807 	return ret;
2808 }
2809 
2810 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2811 {
2812 	struct ena_rss *rss = &ena_dev->rss;
2813 	struct ena_admin_get_feat_resp get_resp;
2814 	u32 tbl_size;
2815 	int i, rc;
2816 
2817 	tbl_size = (u32)(1ULL << rss->tbl_log_size) *
2818 		sizeof(struct ena_admin_rss_ind_table_entry);
2819 
2820 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2821 				    ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
2822 				    rss->rss_ind_tbl_dma_addr,
2823 				    tbl_size, 0);
2824 	if (unlikely(rc))
2825 		return rc;
2826 
2827 	if (!ind_tbl)
2828 		return 0;
2829 
2830 	for (i = 0; i < (1 << rss->tbl_log_size); i++)
2831 		ind_tbl[i] = rss->host_rss_ind_tbl[i];
2832 
2833 	return 0;
2834 }
2835 
2836 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2837 {
2838 	int rc;
2839 
2840 	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2841 
2842 	rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2843 	if (unlikely(rc))
2844 		goto err_indr_tbl;
2845 
2846 	/* The following function might return unsupported in case the
2847 	 * device doesn't support setting the key / hash function. We can safely
2848 	 * ignore this error and have indirection table support only.
2849 	 */
2850 	rc = ena_com_hash_key_allocate(ena_dev);
2851 	if (likely(!rc))
2852 		ena_com_hash_key_fill_default_key(ena_dev);
2853 	else if (rc != ENA_COM_UNSUPPORTED)
2854 		goto err_hash_key;
2855 
2856 	rc = ena_com_hash_ctrl_init(ena_dev);
2857 	if (unlikely(rc))
2858 		goto err_hash_ctrl;
2859 
2860 	return 0;
2861 
2862 err_hash_ctrl:
2863 	ena_com_hash_key_destroy(ena_dev);
2864 err_hash_key:
2865 	ena_com_indirect_table_destroy(ena_dev);
2866 err_indr_tbl:
2867 
2868 	return rc;
2869 }
2870 
2871 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2872 {
2873 	ena_com_indirect_table_destroy(ena_dev);
2874 	ena_com_hash_key_destroy(ena_dev);
2875 	ena_com_hash_ctrl_destroy(ena_dev);
2876 
2877 	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2878 }
2879 
2880 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2881 {
2882 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2883 
2884 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2885 			       SZ_4K,
2886 			       host_attr->host_info,
2887 			       host_attr->host_info_dma_addr,
2888 			       host_attr->host_info_dma_handle);
2889 	if (unlikely(!host_attr->host_info))
2890 		return ENA_COM_NO_MEM;
2891 
2892 	host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2893 		ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2894 		(ENA_COMMON_SPEC_VERSION_MINOR));
2895 
2896 	return 0;
2897 }
2898 
2899 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2900 				u32 debug_area_size)
2901 {
2902 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2903 
2904 	ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2905 			       debug_area_size,
2906 			       host_attr->debug_area_virt_addr,
2907 			       host_attr->debug_area_dma_addr,
2908 			       host_attr->debug_area_dma_handle);
2909 	if (unlikely(!host_attr->debug_area_virt_addr)) {
2910 		host_attr->debug_area_size = 0;
2911 		return ENA_COM_NO_MEM;
2912 	}
2913 
2914 	host_attr->debug_area_size = debug_area_size;
2915 
2916 	return 0;
2917 }
2918 
2919 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2920 {
2921 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2922 
2923 	if (host_attr->host_info) {
2924 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2925 				      SZ_4K,
2926 				      host_attr->host_info,
2927 				      host_attr->host_info_dma_addr,
2928 				      host_attr->host_info_dma_handle);
2929 		host_attr->host_info = NULL;
2930 	}
2931 }
2932 
2933 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2934 {
2935 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2936 
2937 	if (host_attr->debug_area_virt_addr) {
2938 		ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2939 				      host_attr->debug_area_size,
2940 				      host_attr->debug_area_virt_addr,
2941 				      host_attr->debug_area_dma_addr,
2942 				      host_attr->debug_area_dma_handle);
2943 		host_attr->debug_area_virt_addr = NULL;
2944 	}
2945 }
2946 
2947 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2948 {
2949 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2950 	struct ena_com_admin_queue *admin_queue;
2951 	struct ena_admin_set_feat_cmd cmd;
2952 	struct ena_admin_set_feat_resp resp;
2953 
2954 	int ret;
2955 
2956 	/* Host attribute config is called before ena_com_get_dev_attr_feat
2957 	 * so ena_com can't check if the feature is supported.
2958 	 */
2959 
2960 	memset(&cmd, 0x0, sizeof(cmd));
2961 	admin_queue = &ena_dev->admin_queue;
2962 
2963 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2964 	cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2965 
2966 	ret = ena_com_mem_addr_set(ena_dev,
2967 				   &cmd.u.host_attr.debug_ba,
2968 				   host_attr->debug_area_dma_addr);
2969 	if (unlikely(ret)) {
2970 		ena_trc_err(ena_dev, "Memory address set failed\n");
2971 		return ret;
2972 	}
2973 
2974 	ret = ena_com_mem_addr_set(ena_dev,
2975 				   &cmd.u.host_attr.os_info_ba,
2976 				   host_attr->host_info_dma_addr);
2977 	if (unlikely(ret)) {
2978 		ena_trc_err(ena_dev, "Memory address set failed\n");
2979 		return ret;
2980 	}
2981 
2982 	cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2983 
2984 	ret = ena_com_execute_admin_command(admin_queue,
2985 					    (struct ena_admin_aq_entry *)&cmd,
2986 					    sizeof(cmd),
2987 					    (struct ena_admin_acq_entry *)&resp,
2988 					    sizeof(resp));
2989 
2990 	if (unlikely(ret))
2991 		ena_trc_err(ena_dev, "Failed to set host attributes: %d\n", ret);
2992 
2993 	return ret;
2994 }
2995 
2996 /* Interrupt moderation */
2997 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2998 {
2999 	return ena_com_check_supported_feature_id(ena_dev,
3000 						  ENA_ADMIN_INTERRUPT_MODERATION);
3001 }
3002 
3003 static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
3004 							  u32 coalesce_usecs,
3005 							  u32 intr_delay_resolution,
3006 							  u32 *intr_moder_interval)
3007 {
3008 	if (!intr_delay_resolution) {
3009 		ena_trc_err(ena_dev, "Illegal interrupt delay granularity value\n");
3010 		return ENA_COM_FAULT;
3011 	}
3012 
3013 	*intr_moder_interval = coalesce_usecs / intr_delay_resolution;
3014 
3015 	return 0;
3016 }
3017 
3018 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
3019 						      u32 tx_coalesce_usecs)
3020 {
3021 	return ena_com_update_nonadaptive_moderation_interval(ena_dev,
3022 							      tx_coalesce_usecs,
3023 							      ena_dev->intr_delay_resolution,
3024 							      &ena_dev->intr_moder_tx_interval);
3025 }
3026 
3027 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
3028 						      u32 rx_coalesce_usecs)
3029 {
3030 	return ena_com_update_nonadaptive_moderation_interval(ena_dev,
3031 							      rx_coalesce_usecs,
3032 							      ena_dev->intr_delay_resolution,
3033 							      &ena_dev->intr_moder_rx_interval);
3034 }
3035 
3036 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
3037 {
3038 	struct ena_admin_get_feat_resp get_resp;
3039 	u16 delay_resolution;
3040 	int rc;
3041 
3042 	rc = ena_com_get_feature(ena_dev, &get_resp,
3043 				 ENA_ADMIN_INTERRUPT_MODERATION, 0);
3044 
3045 	if (rc) {
3046 		if (rc == ENA_COM_UNSUPPORTED) {
3047 			ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
3048 				    ENA_ADMIN_INTERRUPT_MODERATION);
3049 			rc = 0;
3050 		} else {
3051 			ena_trc_err(ena_dev,
3052 				    "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
3053 		}
3054 
3055 		/* no moderation supported, disable adaptive support */
3056 		ena_com_disable_adaptive_moderation(ena_dev);
3057 		return rc;
3058 	}
3059 
3060 	/* if moderation is supported by device we set adaptive moderation */
3061 	delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
3062 	ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
3063 
3064 	/* Disable adaptive moderation by default - can be enabled later */
3065 	ena_com_disable_adaptive_moderation(ena_dev);
3066 
3067 	return 0;
3068 }
3069 
3070 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
3071 {
3072 	return ena_dev->intr_moder_tx_interval;
3073 }
3074 
3075 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
3076 {
3077 	return ena_dev->intr_moder_rx_interval;
3078 }
3079 
3080 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
3081 			    struct ena_admin_feature_llq_desc *llq_features,
3082 			    struct ena_llq_configurations *llq_default_cfg)
3083 {
3084 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
3085 	int rc;
3086 
3087 	if (!llq_features->max_llq_num) {
3088 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3089 		return 0;
3090 	}
3091 
3092 	rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
3093 	if (rc)
3094 		return rc;
3095 
3096 	ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
3097 		(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
3098 
3099 	if (unlikely(ena_dev->tx_max_header_size == 0)) {
3100 		ena_trc_err(ena_dev, "The size of the LLQ entry is smaller than needed\n");
3101 		return -EINVAL;
3102 	}
3103 
3104 	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
3105 
3106 	return 0;
3107 }
3108