xref: /linux/drivers/net/ethernet/amazon/ena/ena_com.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #include "ena_com.h"
7 
8 /*****************************************************************************/
9 /*****************************************************************************/
10 
11 /* Timeout in micro-sec */
12 #define ADMIN_CMD_TIMEOUT_US (3000000)
13 
14 #define ENA_ASYNC_QUEUE_DEPTH 16
15 #define ENA_ADMIN_QUEUE_DEPTH 32
16 
17 
18 #define ENA_CTRL_MAJOR		0
19 #define ENA_CTRL_MINOR		0
20 #define ENA_CTRL_SUB_MINOR	1
21 
22 #define MIN_ENA_CTRL_VER \
23 	(((ENA_CTRL_MAJOR) << \
24 	(ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
25 	((ENA_CTRL_MINOR) << \
26 	(ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
27 	(ENA_CTRL_SUB_MINOR))
28 
29 #define ENA_DMA_ADDR_TO_UINT32_LOW(x)	((u32)((u64)(x)))
30 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x)	((u32)(((u64)(x)) >> 32))
31 
32 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
33 
34 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT	4
35 
36 #define ENA_REGS_ADMIN_INTR_MASK 1
37 
38 #define ENA_MAX_BACKOFF_DELAY_EXP 16U
39 
40 #define ENA_MIN_ADMIN_POLL_US 100
41 
42 #define ENA_MAX_ADMIN_POLL_US 5000
43 
44 /*****************************************************************************/
45 /*****************************************************************************/
46 /*****************************************************************************/
47 
48 enum ena_cmd_status {
49 	ENA_CMD_SUBMITTED,
50 	ENA_CMD_COMPLETED,
51 	/* Abort - canceled by the driver */
52 	ENA_CMD_ABORTED,
53 };
54 
55 struct ena_comp_ctx {
56 	struct completion wait_event;
57 	struct ena_admin_acq_entry *user_cqe;
58 	u32 comp_size;
59 	enum ena_cmd_status status;
60 	/* status from the device */
61 	u8 comp_status;
62 	u8 cmd_opcode;
63 	bool occupied;
64 };
65 
66 struct ena_com_stats_ctx {
67 	struct ena_admin_aq_get_stats_cmd get_cmd;
68 	struct ena_admin_acq_get_stats_resp get_resp;
69 };
70 
ena_com_mem_addr_set(struct ena_com_dev * ena_dev,struct ena_common_mem_addr * ena_addr,dma_addr_t addr)71 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
72 				       struct ena_common_mem_addr *ena_addr,
73 				       dma_addr_t addr)
74 {
75 	if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
76 		netdev_err(ena_dev->net_device,
77 			   "DMA address has more bits that the device supports\n");
78 		return -EINVAL;
79 	}
80 
81 	ena_addr->mem_addr_low = lower_32_bits(addr);
82 	ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
83 
84 	return 0;
85 }
86 
ena_com_admin_init_sq(struct ena_com_admin_queue * admin_queue)87 static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
88 {
89 	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
90 	struct ena_com_admin_sq *sq = &admin_queue->sq;
91 	u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
92 
93 	sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL);
94 
95 	if (!sq->entries) {
96 		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
97 		return -ENOMEM;
98 	}
99 
100 	sq->head = 0;
101 	sq->tail = 0;
102 	sq->phase = 1;
103 
104 	sq->db_addr = NULL;
105 
106 	return 0;
107 }
108 
ena_com_admin_init_cq(struct ena_com_admin_queue * admin_queue)109 static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
110 {
111 	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
112 	struct ena_com_admin_cq *cq = &admin_queue->cq;
113 	u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
114 
115 	cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
116 
117 	if (!cq->entries) {
118 		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
119 		return -ENOMEM;
120 	}
121 
122 	cq->head = 0;
123 	cq->phase = 1;
124 
125 	return 0;
126 }
127 
ena_com_admin_init_aenq(struct ena_com_dev * ena_dev,struct ena_aenq_handlers * aenq_handlers)128 static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
129 				   struct ena_aenq_handlers *aenq_handlers)
130 {
131 	struct ena_com_aenq *aenq = &ena_dev->aenq;
132 	u32 addr_low, addr_high, aenq_caps;
133 	u16 size;
134 
135 	ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
136 	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
137 	aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL);
138 
139 	if (!aenq->entries) {
140 		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
141 		return -ENOMEM;
142 	}
143 
144 	aenq->head = aenq->q_depth;
145 	aenq->phase = 1;
146 
147 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
148 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
149 
150 	writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
151 	writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
152 
153 	aenq_caps = 0;
154 	aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
155 	aenq_caps |=
156 		(sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
157 		ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
158 	writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
159 
160 	if (unlikely(!aenq_handlers)) {
161 		netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n");
162 		return -EINVAL;
163 	}
164 
165 	aenq->aenq_handlers = aenq_handlers;
166 
167 	return 0;
168 }
169 
comp_ctxt_release(struct ena_com_admin_queue * queue,struct ena_comp_ctx * comp_ctx)170 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
171 				     struct ena_comp_ctx *comp_ctx)
172 {
173 	comp_ctx->occupied = false;
174 	atomic_dec(&queue->outstanding_cmds);
175 }
176 
get_comp_ctxt(struct ena_com_admin_queue * admin_queue,u16 command_id,bool capture)177 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
178 					  u16 command_id, bool capture)
179 {
180 	if (unlikely(command_id >= admin_queue->q_depth)) {
181 		netdev_err(admin_queue->ena_dev->net_device,
182 			   "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
183 			   command_id, admin_queue->q_depth);
184 		return NULL;
185 	}
186 
187 	if (unlikely(!admin_queue->comp_ctx)) {
188 		netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n");
189 		return NULL;
190 	}
191 
192 	if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
193 		netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n");
194 		return NULL;
195 	}
196 
197 	if (capture) {
198 		atomic_inc(&admin_queue->outstanding_cmds);
199 		admin_queue->comp_ctx[command_id].occupied = true;
200 	}
201 
202 	return &admin_queue->comp_ctx[command_id];
203 }
204 
__ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)205 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
206 						       struct ena_admin_aq_entry *cmd,
207 						       size_t cmd_size_in_bytes,
208 						       struct ena_admin_acq_entry *comp,
209 						       size_t comp_size_in_bytes)
210 {
211 	struct ena_comp_ctx *comp_ctx;
212 	u16 tail_masked, cmd_id;
213 	u16 queue_size_mask;
214 	u16 cnt;
215 
216 	queue_size_mask = admin_queue->q_depth - 1;
217 
218 	tail_masked = admin_queue->sq.tail & queue_size_mask;
219 
220 	/* In case of queue FULL */
221 	cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
222 	if (cnt >= admin_queue->q_depth) {
223 		netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n");
224 		admin_queue->stats.out_of_space++;
225 		return ERR_PTR(-ENOSPC);
226 	}
227 
228 	cmd_id = admin_queue->curr_cmd_id;
229 
230 	cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
231 		ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
232 
233 	cmd->aq_common_descriptor.command_id |= cmd_id &
234 		ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
235 
236 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
237 	if (unlikely(!comp_ctx))
238 		return ERR_PTR(-EINVAL);
239 
240 	comp_ctx->status = ENA_CMD_SUBMITTED;
241 	comp_ctx->comp_size = (u32)comp_size_in_bytes;
242 	comp_ctx->user_cqe = comp;
243 	comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
244 
245 	reinit_completion(&comp_ctx->wait_event);
246 
247 	memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
248 
249 	admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
250 		queue_size_mask;
251 
252 	admin_queue->sq.tail++;
253 	admin_queue->stats.submitted_cmd++;
254 
255 	if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
256 		admin_queue->sq.phase = !admin_queue->sq.phase;
257 
258 	writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
259 
260 	return comp_ctx;
261 }
262 
ena_com_init_comp_ctxt(struct ena_com_admin_queue * admin_queue)263 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
264 {
265 	struct ena_com_dev *ena_dev = admin_queue->ena_dev;
266 	size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
267 	struct ena_comp_ctx *comp_ctx;
268 	u16 i;
269 
270 	admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
271 	if (unlikely(!admin_queue->comp_ctx)) {
272 		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
273 		return -ENOMEM;
274 	}
275 
276 	for (i = 0; i < admin_queue->q_depth; i++) {
277 		comp_ctx = get_comp_ctxt(admin_queue, i, false);
278 		if (comp_ctx)
279 			init_completion(&comp_ctx->wait_event);
280 	}
281 
282 	return 0;
283 }
284 
ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)285 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
286 						     struct ena_admin_aq_entry *cmd,
287 						     size_t cmd_size_in_bytes,
288 						     struct ena_admin_acq_entry *comp,
289 						     size_t comp_size_in_bytes)
290 {
291 	unsigned long flags = 0;
292 	struct ena_comp_ctx *comp_ctx;
293 
294 	spin_lock_irqsave(&admin_queue->q_lock, flags);
295 	if (unlikely(!admin_queue->running_state)) {
296 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
297 		return ERR_PTR(-ENODEV);
298 	}
299 	comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
300 					      cmd_size_in_bytes,
301 					      comp,
302 					      comp_size_in_bytes);
303 	if (IS_ERR(comp_ctx))
304 		admin_queue->running_state = false;
305 	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
306 
307 	return comp_ctx;
308 }
309 
ena_com_init_io_sq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_sq * io_sq)310 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
311 			      struct ena_com_create_io_ctx *ctx,
312 			      struct ena_com_io_sq *io_sq)
313 {
314 	size_t size;
315 
316 	memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
317 
318 	io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
319 	io_sq->desc_entry_size =
320 		(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
321 		sizeof(struct ena_eth_io_tx_desc) :
322 		sizeof(struct ena_eth_io_rx_desc);
323 
324 	size = io_sq->desc_entry_size * io_sq->q_depth;
325 
326 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
327 		io_sq->desc_addr.virt_addr =
328 			dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
329 					   GFP_KERNEL);
330 		if (!io_sq->desc_addr.virt_addr) {
331 			io_sq->desc_addr.virt_addr =
332 				dma_alloc_coherent(ena_dev->dmadev, size,
333 						   &io_sq->desc_addr.phys_addr, GFP_KERNEL);
334 		}
335 
336 		if (!io_sq->desc_addr.virt_addr) {
337 			netdev_err(ena_dev->net_device, "Memory allocation failed\n");
338 			return -ENOMEM;
339 		}
340 	}
341 
342 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
343 		/* Allocate bounce buffers */
344 		io_sq->bounce_buf_ctrl.buffer_size =
345 			ena_dev->llq_info.desc_list_entry_size;
346 		io_sq->bounce_buf_ctrl.buffers_num =
347 			ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
348 		io_sq->bounce_buf_ctrl.next_to_use = 0;
349 
350 		size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
351 			io_sq->bounce_buf_ctrl.buffers_num;
352 
353 		io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
354 		if (!io_sq->bounce_buf_ctrl.base_buffer)
355 			io_sq->bounce_buf_ctrl.base_buffer =
356 				devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
357 
358 		if (!io_sq->bounce_buf_ctrl.base_buffer) {
359 			netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n");
360 			return -ENOMEM;
361 		}
362 
363 		memcpy(&io_sq->llq_info, &ena_dev->llq_info,
364 		       sizeof(io_sq->llq_info));
365 
366 		/* Initiate the first bounce buffer */
367 		io_sq->llq_buf_ctrl.curr_bounce_buf =
368 			ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
369 		memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
370 		       0x0, io_sq->llq_info.desc_list_entry_size);
371 		io_sq->llq_buf_ctrl.descs_left_in_line =
372 			io_sq->llq_info.descs_num_before_header;
373 		io_sq->disable_meta_caching =
374 			io_sq->llq_info.disable_meta_caching;
375 
376 		if (io_sq->llq_info.max_entries_in_tx_burst > 0)
377 			io_sq->entries_in_tx_burst_left =
378 				io_sq->llq_info.max_entries_in_tx_burst;
379 	}
380 
381 	io_sq->tail = 0;
382 	io_sq->next_to_comp = 0;
383 	io_sq->phase = 1;
384 
385 	return 0;
386 }
387 
ena_com_init_io_cq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_cq * io_cq)388 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
389 			      struct ena_com_create_io_ctx *ctx,
390 			      struct ena_com_io_cq *io_cq)
391 {
392 	size_t size;
393 
394 	memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
395 
396 	/* Use the basic completion descriptor for Rx */
397 	io_cq->cdesc_entry_size_in_bytes =
398 		(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
399 		sizeof(struct ena_eth_io_tx_cdesc) :
400 		sizeof(struct ena_eth_io_rx_cdesc_base);
401 
402 	size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
403 
404 	io_cq->cdesc_addr.virt_addr =
405 		dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
406 	if (!io_cq->cdesc_addr.virt_addr) {
407 		io_cq->cdesc_addr.virt_addr =
408 			dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
409 					   GFP_KERNEL);
410 	}
411 
412 	if (!io_cq->cdesc_addr.virt_addr) {
413 		netdev_err(ena_dev->net_device, "Memory allocation failed\n");
414 		return -ENOMEM;
415 	}
416 
417 	io_cq->phase = 1;
418 	io_cq->head = 0;
419 
420 	return 0;
421 }
422 
ena_com_handle_single_admin_completion(struct ena_com_admin_queue * admin_queue,struct ena_admin_acq_entry * cqe)423 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
424 						   struct ena_admin_acq_entry *cqe)
425 {
426 	struct ena_comp_ctx *comp_ctx;
427 	u16 cmd_id;
428 
429 	cmd_id = cqe->acq_common_descriptor.command &
430 		ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
431 
432 	comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
433 	if (unlikely(!comp_ctx)) {
434 		netdev_err(admin_queue->ena_dev->net_device,
435 			   "comp_ctx is NULL. Changing the admin queue running state\n");
436 		admin_queue->running_state = false;
437 		return;
438 	}
439 
440 	comp_ctx->status = ENA_CMD_COMPLETED;
441 	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
442 
443 	if (comp_ctx->user_cqe)
444 		memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
445 
446 	if (!admin_queue->polling)
447 		complete(&comp_ctx->wait_event);
448 }
449 
ena_com_handle_admin_completion(struct ena_com_admin_queue * admin_queue)450 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
451 {
452 	struct ena_admin_acq_entry *cqe = NULL;
453 	u16 comp_num = 0;
454 	u16 head_masked;
455 	u8 phase;
456 
457 	head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
458 	phase = admin_queue->cq.phase;
459 
460 	cqe = &admin_queue->cq.entries[head_masked];
461 
462 	/* Go over all the completions */
463 	while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
464 		ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
465 		/* Do not read the rest of the completion entry before the
466 		 * phase bit was validated
467 		 */
468 		dma_rmb();
469 		ena_com_handle_single_admin_completion(admin_queue, cqe);
470 
471 		head_masked++;
472 		comp_num++;
473 		if (unlikely(head_masked == admin_queue->q_depth)) {
474 			head_masked = 0;
475 			phase = !phase;
476 		}
477 
478 		cqe = &admin_queue->cq.entries[head_masked];
479 	}
480 
481 	admin_queue->cq.head += comp_num;
482 	admin_queue->cq.phase = phase;
483 	admin_queue->sq.head += comp_num;
484 	admin_queue->stats.completed_cmd += comp_num;
485 }
486 
ena_com_comp_status_to_errno(struct ena_com_admin_queue * admin_queue,u8 comp_status)487 static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
488 					u8 comp_status)
489 {
490 	if (unlikely(comp_status != 0))
491 		netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n",
492 			   comp_status);
493 
494 	switch (comp_status) {
495 	case ENA_ADMIN_SUCCESS:
496 		return 0;
497 	case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
498 		return -ENOMEM;
499 	case ENA_ADMIN_UNSUPPORTED_OPCODE:
500 		return -EOPNOTSUPP;
501 	case ENA_ADMIN_BAD_OPCODE:
502 	case ENA_ADMIN_MALFORMED_REQUEST:
503 	case ENA_ADMIN_ILLEGAL_PARAMETER:
504 	case ENA_ADMIN_UNKNOWN_ERROR:
505 		return -EINVAL;
506 	case ENA_ADMIN_RESOURCE_BUSY:
507 		return -EAGAIN;
508 	}
509 
510 	return -EINVAL;
511 }
512 
ena_delay_exponential_backoff_us(u32 exp,u32 delay_us)513 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
514 {
515 	exp = min_t(u32, exp, ENA_MAX_BACKOFF_DELAY_EXP);
516 	delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
517 	delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
518 	usleep_range(delay_us, 2 * delay_us);
519 }
520 
ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)521 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
522 						     struct ena_com_admin_queue *admin_queue)
523 {
524 	unsigned long flags = 0;
525 	unsigned long timeout;
526 	int ret;
527 	u32 exp = 0;
528 
529 	timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
530 
531 	while (1) {
532 		spin_lock_irqsave(&admin_queue->q_lock, flags);
533 		ena_com_handle_admin_completion(admin_queue);
534 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
535 
536 		if (comp_ctx->status != ENA_CMD_SUBMITTED)
537 			break;
538 
539 		if (time_is_before_jiffies(timeout)) {
540 			netdev_err(admin_queue->ena_dev->net_device,
541 				   "Wait for completion (polling) timeout\n");
542 			/* ENA didn't have any completion */
543 			spin_lock_irqsave(&admin_queue->q_lock, flags);
544 			admin_queue->stats.no_completion++;
545 			admin_queue->running_state = false;
546 			spin_unlock_irqrestore(&admin_queue->q_lock, flags);
547 
548 			ret = -ETIME;
549 			goto err;
550 		}
551 
552 		ena_delay_exponential_backoff_us(exp++,
553 						 admin_queue->ena_dev->ena_min_poll_delay_us);
554 	}
555 
556 	if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
557 		netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n");
558 		spin_lock_irqsave(&admin_queue->q_lock, flags);
559 		admin_queue->stats.aborted_cmd++;
560 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
561 		ret = -ENODEV;
562 		goto err;
563 	}
564 
565 	WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status);
566 
567 	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
568 err:
569 	comp_ctxt_release(admin_queue, comp_ctx);
570 	return ret;
571 }
572 
573 /*
574  * Set the LLQ configurations of the firmware
575  *
576  * The driver provides only the enabled feature values to the device,
577  * which in turn, checks if they are supported.
578  */
ena_com_set_llq(struct ena_com_dev * ena_dev)579 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
580 {
581 	struct ena_com_admin_queue *admin_queue;
582 	struct ena_admin_set_feat_cmd cmd;
583 	struct ena_admin_set_feat_resp resp;
584 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
585 	int ret;
586 
587 	memset(&cmd, 0x0, sizeof(cmd));
588 	admin_queue = &ena_dev->admin_queue;
589 
590 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
591 	cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
592 
593 	cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
594 	cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
595 	cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
596 	cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
597 
598 	cmd.u.llq.accel_mode.u.set.enabled_flags =
599 		BIT(ENA_ADMIN_DISABLE_META_CACHING) |
600 		BIT(ENA_ADMIN_LIMIT_TX_BURST);
601 
602 	ret = ena_com_execute_admin_command(admin_queue,
603 					    (struct ena_admin_aq_entry *)&cmd,
604 					    sizeof(cmd),
605 					    (struct ena_admin_acq_entry *)&resp,
606 					    sizeof(resp));
607 
608 	if (unlikely(ret))
609 		netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret);
610 
611 	return ret;
612 }
613 
ena_com_config_llq_info(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_features,struct ena_llq_configurations * llq_default_cfg)614 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
615 				   struct ena_admin_feature_llq_desc *llq_features,
616 				   struct ena_llq_configurations *llq_default_cfg)
617 {
618 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
619 	struct ena_admin_accel_mode_get llq_accel_mode_get;
620 	u16 supported_feat;
621 	int rc;
622 
623 	memset(llq_info, 0, sizeof(*llq_info));
624 
625 	supported_feat = llq_features->header_location_ctrl_supported;
626 
627 	if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
628 		llq_info->header_location_ctrl =
629 			llq_default_cfg->llq_header_location;
630 	} else {
631 		netdev_err(ena_dev->net_device,
632 			   "Invalid header location control, supported: 0x%x\n", supported_feat);
633 		return -EINVAL;
634 	}
635 
636 	if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
637 		supported_feat = llq_features->descriptors_stride_ctrl_supported;
638 		if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
639 			llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
640 		} else	{
641 			if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
642 				llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
643 			} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
644 				llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
645 			} else {
646 				netdev_err(ena_dev->net_device,
647 					   "Invalid desc_stride_ctrl, supported: 0x%x\n",
648 					   supported_feat);
649 				return -EINVAL;
650 			}
651 
652 			netdev_err(ena_dev->net_device,
653 				   "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
654 				   llq_default_cfg->llq_stride_ctrl, supported_feat,
655 				   llq_info->desc_stride_ctrl);
656 		}
657 	} else {
658 		llq_info->desc_stride_ctrl = 0;
659 	}
660 
661 	supported_feat = llq_features->entry_size_ctrl_supported;
662 	if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
663 		llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
664 		llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
665 	} else {
666 		if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
667 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
668 			llq_info->desc_list_entry_size = 128;
669 		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
670 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
671 			llq_info->desc_list_entry_size = 192;
672 		} else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
673 			llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
674 			llq_info->desc_list_entry_size = 256;
675 		} else {
676 			netdev_err(ena_dev->net_device,
677 				   "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
678 			return -EINVAL;
679 		}
680 
681 		netdev_err(ena_dev->net_device,
682 			   "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
683 			   llq_default_cfg->llq_ring_entry_size, supported_feat,
684 			   llq_info->desc_list_entry_size);
685 	}
686 	if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
687 		/* The desc list entry size should be whole multiply of 8
688 		 * This requirement comes from __iowrite64_copy()
689 		 */
690 		netdev_err(ena_dev->net_device, "Illegal entry size %d\n",
691 			   llq_info->desc_list_entry_size);
692 		return -EINVAL;
693 	}
694 
695 	if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
696 		llq_info->descs_per_entry = llq_info->desc_list_entry_size /
697 			sizeof(struct ena_eth_io_tx_desc);
698 	else
699 		llq_info->descs_per_entry = 1;
700 
701 	supported_feat = llq_features->desc_num_before_header_supported;
702 	if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
703 		llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
704 	} else {
705 		if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
706 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
707 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
708 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
709 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
710 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
711 		} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
712 			llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
713 		} else {
714 			netdev_err(ena_dev->net_device,
715 				   "Invalid descs_num_before_header, supported: 0x%x\n",
716 				   supported_feat);
717 			return -EINVAL;
718 		}
719 
720 		netdev_err(ena_dev->net_device,
721 			   "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
722 			   llq_default_cfg->llq_num_decs_before_header, supported_feat,
723 			   llq_info->descs_num_before_header);
724 	}
725 	/* Check for accelerated queue supported */
726 	llq_accel_mode_get = llq_features->accel_mode.u.get;
727 
728 	llq_info->disable_meta_caching =
729 		!!(llq_accel_mode_get.supported_flags &
730 		   BIT(ENA_ADMIN_DISABLE_META_CACHING));
731 
732 	if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
733 		llq_info->max_entries_in_tx_burst =
734 			llq_accel_mode_get.max_tx_burst_size /
735 			llq_default_cfg->llq_ring_entry_size_value;
736 
737 	rc = ena_com_set_llq(ena_dev);
738 	if (rc)
739 		netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc);
740 
741 	return rc;
742 }
743 
ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)744 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
745 							struct ena_com_admin_queue *admin_queue)
746 {
747 	unsigned long flags = 0;
748 	int ret;
749 
750 	wait_for_completion_timeout(&comp_ctx->wait_event,
751 				    usecs_to_jiffies(admin_queue->completion_timeout));
752 
753 	/* In case the command wasn't completed find out the root cause.
754 	 * There might be 2 kinds of errors
755 	 * 1) No completion (timeout reached)
756 	 * 2) There is completion but the device didn't get any msi-x interrupt.
757 	 */
758 	if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
759 		spin_lock_irqsave(&admin_queue->q_lock, flags);
760 		ena_com_handle_admin_completion(admin_queue);
761 		admin_queue->stats.no_completion++;
762 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
763 
764 		if (comp_ctx->status == ENA_CMD_COMPLETED) {
765 			netdev_err(admin_queue->ena_dev->net_device,
766 				   "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
767 				   comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
768 			/* Check if fallback to polling is enabled */
769 			if (admin_queue->auto_polling)
770 				admin_queue->polling = true;
771 		} else {
772 			netdev_err(admin_queue->ena_dev->net_device,
773 				   "The ena device didn't send a completion for the admin cmd %d status %d\n",
774 				   comp_ctx->cmd_opcode, comp_ctx->status);
775 		}
776 		/* Check if shifted to polling mode.
777 		 * This will happen if there is a completion without an interrupt
778 		 * and autopolling mode is enabled. Continuing normal execution in such case
779 		 */
780 		if (!admin_queue->polling) {
781 			admin_queue->running_state = false;
782 			ret = -ETIME;
783 			goto err;
784 		}
785 	}
786 
787 	ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
788 err:
789 	comp_ctxt_release(admin_queue, comp_ctx);
790 	return ret;
791 }
792 
793 /* This method read the hardware device register through posting writes
794  * and waiting for response
795  * On timeout the function will return ENA_MMIO_READ_TIMEOUT
796  */
ena_com_reg_bar_read32(struct ena_com_dev * ena_dev,u16 offset)797 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
798 {
799 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
800 	volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
801 		mmio_read->read_resp;
802 	u32 mmio_read_reg, ret, i;
803 	unsigned long flags = 0;
804 	u32 timeout = mmio_read->reg_read_to;
805 
806 	might_sleep();
807 
808 	if (timeout == 0)
809 		timeout = ENA_REG_READ_TIMEOUT;
810 
811 	/* If readless is disabled, perform regular read */
812 	if (!mmio_read->readless_supported)
813 		return readl(ena_dev->reg_bar + offset);
814 
815 	spin_lock_irqsave(&mmio_read->lock, flags);
816 	mmio_read->seq_num++;
817 
818 	read_resp->req_id = mmio_read->seq_num + 0xDEAD;
819 	mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
820 			ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
821 	mmio_read_reg |= mmio_read->seq_num &
822 			ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
823 
824 	writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
825 
826 	for (i = 0; i < timeout; i++) {
827 		if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
828 			break;
829 
830 		udelay(1);
831 	}
832 
833 	if (unlikely(i == timeout)) {
834 		netdev_err(ena_dev->net_device,
835 			   "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
836 			   mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
837 		ret = ENA_MMIO_READ_TIMEOUT;
838 		goto err;
839 	}
840 
841 	if (read_resp->reg_off != offset) {
842 		netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n");
843 		ret = ENA_MMIO_READ_TIMEOUT;
844 	} else {
845 		ret = read_resp->reg_val;
846 	}
847 err:
848 	spin_unlock_irqrestore(&mmio_read->lock, flags);
849 
850 	return ret;
851 }
852 
853 /* There are two types to wait for completion.
854  * Polling mode - wait until the completion is available.
855  * Async mode - wait on wait queue until the completion is ready
856  * (or the timeout expired).
857  * It is expected that the IRQ called ena_com_handle_admin_completion
858  * to mark the completions.
859  */
ena_com_wait_and_process_admin_cq(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)860 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
861 					     struct ena_com_admin_queue *admin_queue)
862 {
863 	if (admin_queue->polling)
864 		return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
865 								 admin_queue);
866 
867 	return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
868 							    admin_queue);
869 }
870 
ena_com_destroy_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq)871 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
872 				 struct ena_com_io_sq *io_sq)
873 {
874 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
875 	struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
876 	struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
877 	u8 direction;
878 	int ret;
879 
880 	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
881 
882 	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
883 		direction = ENA_ADMIN_SQ_DIRECTION_TX;
884 	else
885 		direction = ENA_ADMIN_SQ_DIRECTION_RX;
886 
887 	destroy_cmd.sq.sq_identity |= (direction <<
888 		ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
889 		ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
890 
891 	destroy_cmd.sq.sq_idx = io_sq->idx;
892 	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
893 
894 	ret = ena_com_execute_admin_command(admin_queue,
895 					    (struct ena_admin_aq_entry *)&destroy_cmd,
896 					    sizeof(destroy_cmd),
897 					    (struct ena_admin_acq_entry *)&destroy_resp,
898 					    sizeof(destroy_resp));
899 
900 	if (unlikely(ret && (ret != -ENODEV)))
901 		netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret);
902 
903 	return ret;
904 }
905 
ena_com_io_queue_free(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,struct ena_com_io_cq * io_cq)906 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
907 				  struct ena_com_io_sq *io_sq,
908 				  struct ena_com_io_cq *io_cq)
909 {
910 	size_t size;
911 
912 	if (io_cq->cdesc_addr.virt_addr) {
913 		size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
914 
915 		dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr,
916 				  io_cq->cdesc_addr.phys_addr);
917 
918 		io_cq->cdesc_addr.virt_addr = NULL;
919 	}
920 
921 	if (io_sq->desc_addr.virt_addr) {
922 		size = io_sq->desc_entry_size * io_sq->q_depth;
923 
924 		dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr,
925 				  io_sq->desc_addr.phys_addr);
926 
927 		io_sq->desc_addr.virt_addr = NULL;
928 	}
929 
930 	if (io_sq->bounce_buf_ctrl.base_buffer) {
931 		devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
932 		io_sq->bounce_buf_ctrl.base_buffer = NULL;
933 	}
934 }
935 
wait_for_reset_state(struct ena_com_dev * ena_dev,u32 timeout,u16 exp_state)936 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
937 				u16 exp_state)
938 {
939 	u32 val, exp = 0;
940 	unsigned long timeout_stamp;
941 
942 	/* Convert timeout from resolution of 100ms to us resolution. */
943 	timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout);
944 
945 	while (1) {
946 		val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
947 
948 		if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
949 			netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
950 			return -ETIME;
951 		}
952 
953 		if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
954 			exp_state)
955 			return 0;
956 
957 		if (time_is_before_jiffies(timeout_stamp))
958 			return -ETIME;
959 
960 		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
961 	}
962 }
963 
ena_com_check_supported_feature_id(struct ena_com_dev * ena_dev,enum ena_admin_aq_feature_id feature_id)964 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
965 					       enum ena_admin_aq_feature_id feature_id)
966 {
967 	u32 feature_mask = 1 << feature_id;
968 
969 	/* Device attributes is always supported */
970 	if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
971 	    !(ena_dev->supported_features & feature_mask))
972 		return false;
973 
974 	return true;
975 }
976 
ena_com_get_feature_ex(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,dma_addr_t control_buf_dma_addr,u32 control_buff_size,u8 feature_ver)977 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
978 				  struct ena_admin_get_feat_resp *get_resp,
979 				  enum ena_admin_aq_feature_id feature_id,
980 				  dma_addr_t control_buf_dma_addr,
981 				  u32 control_buff_size,
982 				  u8 feature_ver)
983 {
984 	struct ena_com_admin_queue *admin_queue;
985 	struct ena_admin_get_feat_cmd get_cmd;
986 	int ret;
987 
988 	if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
989 		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id);
990 		return -EOPNOTSUPP;
991 	}
992 
993 	memset(&get_cmd, 0x0, sizeof(get_cmd));
994 	admin_queue = &ena_dev->admin_queue;
995 
996 	get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
997 
998 	if (control_buff_size)
999 		get_cmd.aq_common_descriptor.flags =
1000 			ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1001 	else
1002 		get_cmd.aq_common_descriptor.flags = 0;
1003 
1004 	ret = ena_com_mem_addr_set(ena_dev,
1005 				   &get_cmd.control_buffer.address,
1006 				   control_buf_dma_addr);
1007 	if (unlikely(ret)) {
1008 		netdev_err(ena_dev->net_device, "Memory address set failed\n");
1009 		return ret;
1010 	}
1011 
1012 	get_cmd.control_buffer.length = control_buff_size;
1013 	get_cmd.feat_common.feature_version = feature_ver;
1014 	get_cmd.feat_common.feature_id = feature_id;
1015 
1016 	ret = ena_com_execute_admin_command(admin_queue,
1017 					    (struct ena_admin_aq_entry *)
1018 					    &get_cmd,
1019 					    sizeof(get_cmd),
1020 					    (struct ena_admin_acq_entry *)
1021 					    get_resp,
1022 					    sizeof(*get_resp));
1023 
1024 	if (unlikely(ret))
1025 		netdev_err(ena_dev->net_device,
1026 			   "Failed to submit get_feature command %d error: %d\n", feature_id, ret);
1027 
1028 	return ret;
1029 }
1030 
ena_com_get_feature(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,u8 feature_ver)1031 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1032 			       struct ena_admin_get_feat_resp *get_resp,
1033 			       enum ena_admin_aq_feature_id feature_id,
1034 			       u8 feature_ver)
1035 {
1036 	return ena_com_get_feature_ex(ena_dev,
1037 				      get_resp,
1038 				      feature_id,
1039 				      0,
1040 				      0,
1041 				      feature_ver);
1042 }
1043 
ena_com_get_current_hash_function(struct ena_com_dev * ena_dev)1044 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1045 {
1046 	return ena_dev->rss.hash_func;
1047 }
1048 
ena_com_hash_key_fill_default_key(struct ena_com_dev * ena_dev)1049 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1050 {
1051 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
1052 		(ena_dev->rss).hash_key;
1053 
1054 	netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1055 	/* The key buffer is stored in the device in an array of
1056 	 * uint32 elements.
1057 	 */
1058 	hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1059 }
1060 
ena_com_hash_key_allocate(struct ena_com_dev * ena_dev)1061 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1062 {
1063 	struct ena_rss *rss = &ena_dev->rss;
1064 
1065 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
1066 		return -EOPNOTSUPP;
1067 
1068 	rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1069 					   &rss->hash_key_dma_addr, GFP_KERNEL);
1070 
1071 	if (unlikely(!rss->hash_key))
1072 		return -ENOMEM;
1073 
1074 	return 0;
1075 }
1076 
ena_com_hash_key_destroy(struct ena_com_dev * ena_dev)1077 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1078 {
1079 	struct ena_rss *rss = &ena_dev->rss;
1080 
1081 	if (rss->hash_key)
1082 		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key,
1083 				  rss->hash_key_dma_addr);
1084 	rss->hash_key = NULL;
1085 }
1086 
ena_com_hash_ctrl_init(struct ena_com_dev * ena_dev)1087 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1088 {
1089 	struct ena_rss *rss = &ena_dev->rss;
1090 
1091 	rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1092 					    &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1093 
1094 	if (unlikely(!rss->hash_ctrl))
1095 		return -ENOMEM;
1096 
1097 	return 0;
1098 }
1099 
ena_com_hash_ctrl_destroy(struct ena_com_dev * ena_dev)1100 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1101 {
1102 	struct ena_rss *rss = &ena_dev->rss;
1103 
1104 	if (rss->hash_ctrl)
1105 		dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl,
1106 				  rss->hash_ctrl_dma_addr);
1107 	rss->hash_ctrl = NULL;
1108 }
1109 
ena_com_indirect_table_allocate(struct ena_com_dev * ena_dev,u16 log_size)1110 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1111 					   u16 log_size)
1112 {
1113 	struct ena_rss *rss = &ena_dev->rss;
1114 	struct ena_admin_get_feat_resp get_resp;
1115 	size_t tbl_size;
1116 	int ret;
1117 
1118 	ret = ena_com_get_feature(ena_dev, &get_resp,
1119 				  ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1120 	if (unlikely(ret))
1121 		return ret;
1122 
1123 	if ((get_resp.u.ind_table.min_size > log_size) ||
1124 	    (get_resp.u.ind_table.max_size < log_size)) {
1125 		netdev_err(ena_dev->net_device,
1126 			   "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1127 			   1 << log_size, 1 << get_resp.u.ind_table.min_size,
1128 			   1 << get_resp.u.ind_table.max_size);
1129 		return -EINVAL;
1130 	}
1131 
1132 	tbl_size = (1ULL << log_size) *
1133 		sizeof(struct ena_admin_rss_ind_table_entry);
1134 
1135 	rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr,
1136 					      GFP_KERNEL);
1137 	if (unlikely(!rss->rss_ind_tbl))
1138 		goto mem_err1;
1139 
1140 	tbl_size = (1ULL << log_size) * sizeof(u16);
1141 	rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1142 	if (unlikely(!rss->host_rss_ind_tbl))
1143 		goto mem_err2;
1144 
1145 	rss->tbl_log_size = log_size;
1146 
1147 	return 0;
1148 
1149 mem_err2:
1150 	tbl_size = (1ULL << log_size) *
1151 		sizeof(struct ena_admin_rss_ind_table_entry);
1152 
1153 	dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr);
1154 	rss->rss_ind_tbl = NULL;
1155 mem_err1:
1156 	rss->tbl_log_size = 0;
1157 	return -ENOMEM;
1158 }
1159 
ena_com_indirect_table_destroy(struct ena_com_dev * ena_dev)1160 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1161 {
1162 	struct ena_rss *rss = &ena_dev->rss;
1163 	size_t tbl_size = (1ULL << rss->tbl_log_size) *
1164 		sizeof(struct ena_admin_rss_ind_table_entry);
1165 
1166 	if (rss->rss_ind_tbl)
1167 		dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1168 				  rss->rss_ind_tbl_dma_addr);
1169 	rss->rss_ind_tbl = NULL;
1170 
1171 	if (rss->host_rss_ind_tbl)
1172 		devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1173 	rss->host_rss_ind_tbl = NULL;
1174 }
1175 
ena_com_create_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,u16 cq_idx)1176 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1177 				struct ena_com_io_sq *io_sq, u16 cq_idx)
1178 {
1179 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1180 	struct ena_admin_aq_create_sq_cmd create_cmd;
1181 	struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1182 	u8 direction;
1183 	int ret;
1184 
1185 	memset(&create_cmd, 0x0, sizeof(create_cmd));
1186 
1187 	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1188 
1189 	if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1190 		direction = ENA_ADMIN_SQ_DIRECTION_TX;
1191 	else
1192 		direction = ENA_ADMIN_SQ_DIRECTION_RX;
1193 
1194 	create_cmd.sq_identity |= (direction <<
1195 		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1196 		ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1197 
1198 	create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1199 		ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1200 
1201 	create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1202 		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1203 		ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1204 
1205 	create_cmd.sq_caps_3 |=
1206 		ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1207 
1208 	create_cmd.cq_idx = cq_idx;
1209 	create_cmd.sq_depth = io_sq->q_depth;
1210 
1211 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1212 		ret = ena_com_mem_addr_set(ena_dev,
1213 					   &create_cmd.sq_ba,
1214 					   io_sq->desc_addr.phys_addr);
1215 		if (unlikely(ret)) {
1216 			netdev_err(ena_dev->net_device, "Memory address set failed\n");
1217 			return ret;
1218 		}
1219 	}
1220 
1221 	ret = ena_com_execute_admin_command(admin_queue,
1222 					    (struct ena_admin_aq_entry *)&create_cmd,
1223 					    sizeof(create_cmd),
1224 					    (struct ena_admin_acq_entry *)&cmd_completion,
1225 					    sizeof(cmd_completion));
1226 	if (unlikely(ret)) {
1227 		netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret);
1228 		return ret;
1229 	}
1230 
1231 	io_sq->idx = cmd_completion.sq_idx;
1232 
1233 	io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1234 		(uintptr_t)cmd_completion.sq_doorbell_offset);
1235 
1236 	if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1237 		io_sq->desc_addr.pbuf_dev_addr =
1238 			(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1239 			cmd_completion.llq_descriptors_offset);
1240 	}
1241 
1242 	netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1243 
1244 	return ret;
1245 }
1246 
ena_com_ind_tbl_convert_to_device(struct ena_com_dev * ena_dev)1247 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1248 {
1249 	struct ena_rss *rss = &ena_dev->rss;
1250 	struct ena_com_io_sq *io_sq;
1251 	u16 qid;
1252 	int i;
1253 
1254 	for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1255 		qid = rss->host_rss_ind_tbl[i];
1256 		if (qid >= ENA_TOTAL_NUM_QUEUES)
1257 			return -EINVAL;
1258 
1259 		io_sq = &ena_dev->io_sq_queues[qid];
1260 
1261 		if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1262 			return -EINVAL;
1263 
1264 		rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1265 	}
1266 
1267 	return 0;
1268 }
1269 
ena_com_update_intr_delay_resolution(struct ena_com_dev * ena_dev,u16 intr_delay_resolution)1270 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1271 						 u16 intr_delay_resolution)
1272 {
1273 	u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1274 
1275 	if (unlikely(!intr_delay_resolution)) {
1276 		netdev_err(ena_dev->net_device,
1277 			   "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1278 		intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1279 	}
1280 
1281 	/* update Rx */
1282 	ena_dev->intr_moder_rx_interval =
1283 		ena_dev->intr_moder_rx_interval *
1284 		prev_intr_delay_resolution /
1285 		intr_delay_resolution;
1286 
1287 	/* update Tx */
1288 	ena_dev->intr_moder_tx_interval =
1289 		ena_dev->intr_moder_tx_interval *
1290 		prev_intr_delay_resolution /
1291 		intr_delay_resolution;
1292 
1293 	ena_dev->intr_delay_resolution = intr_delay_resolution;
1294 }
1295 
1296 /*****************************************************************************/
1297 /*******************************      API       ******************************/
1298 /*****************************************************************************/
1299 
ena_com_execute_admin_command(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size,struct ena_admin_acq_entry * comp,size_t comp_size)1300 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1301 				  struct ena_admin_aq_entry *cmd,
1302 				  size_t cmd_size,
1303 				  struct ena_admin_acq_entry *comp,
1304 				  size_t comp_size)
1305 {
1306 	struct ena_comp_ctx *comp_ctx;
1307 	int ret;
1308 
1309 	comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1310 					    comp, comp_size);
1311 	if (IS_ERR(comp_ctx)) {
1312 		ret = PTR_ERR(comp_ctx);
1313 		if (ret == -ENODEV)
1314 			netdev_dbg(admin_queue->ena_dev->net_device,
1315 				   "Failed to submit command [%d]\n", ret);
1316 		else
1317 			netdev_err(admin_queue->ena_dev->net_device,
1318 				   "Failed to submit command [%d]\n", ret);
1319 
1320 		return ret;
1321 	}
1322 
1323 	ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1324 	if (unlikely(ret)) {
1325 		if (admin_queue->running_state)
1326 			netdev_err(admin_queue->ena_dev->net_device,
1327 				   "Failed to process command. ret = %d\n", ret);
1328 		else
1329 			netdev_dbg(admin_queue->ena_dev->net_device,
1330 				   "Failed to process command. ret = %d\n", ret);
1331 	}
1332 	return ret;
1333 }
1334 
ena_com_create_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1335 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1336 			 struct ena_com_io_cq *io_cq)
1337 {
1338 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1339 	struct ena_admin_aq_create_cq_cmd create_cmd;
1340 	struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1341 	int ret;
1342 
1343 	memset(&create_cmd, 0x0, sizeof(create_cmd));
1344 
1345 	create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1346 
1347 	create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1348 		ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1349 	create_cmd.cq_caps_1 |=
1350 		ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1351 
1352 	create_cmd.msix_vector = io_cq->msix_vector;
1353 	create_cmd.cq_depth = io_cq->q_depth;
1354 
1355 	ret = ena_com_mem_addr_set(ena_dev,
1356 				   &create_cmd.cq_ba,
1357 				   io_cq->cdesc_addr.phys_addr);
1358 	if (unlikely(ret)) {
1359 		netdev_err(ena_dev->net_device, "Memory address set failed\n");
1360 		return ret;
1361 	}
1362 
1363 	ret = ena_com_execute_admin_command(admin_queue,
1364 					    (struct ena_admin_aq_entry *)&create_cmd,
1365 					    sizeof(create_cmd),
1366 					    (struct ena_admin_acq_entry *)&cmd_completion,
1367 					    sizeof(cmd_completion));
1368 	if (unlikely(ret)) {
1369 		netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret);
1370 		return ret;
1371 	}
1372 
1373 	io_cq->idx = cmd_completion.cq_idx;
1374 
1375 	io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1376 		cmd_completion.cq_interrupt_unmask_register_offset);
1377 
1378 	if (cmd_completion.numa_node_register_offset)
1379 		io_cq->numa_node_cfg_reg =
1380 			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1381 			cmd_completion.numa_node_register_offset);
1382 
1383 	netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1384 
1385 	return ret;
1386 }
1387 
ena_com_get_io_handlers(struct ena_com_dev * ena_dev,u16 qid,struct ena_com_io_sq ** io_sq,struct ena_com_io_cq ** io_cq)1388 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1389 			    struct ena_com_io_sq **io_sq,
1390 			    struct ena_com_io_cq **io_cq)
1391 {
1392 	if (qid >= ENA_TOTAL_NUM_QUEUES) {
1393 		netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
1394 			   ENA_TOTAL_NUM_QUEUES);
1395 		return -EINVAL;
1396 	}
1397 
1398 	*io_sq = &ena_dev->io_sq_queues[qid];
1399 	*io_cq = &ena_dev->io_cq_queues[qid];
1400 
1401 	return 0;
1402 }
1403 
ena_com_abort_admin_commands(struct ena_com_dev * ena_dev)1404 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1405 {
1406 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1407 	struct ena_comp_ctx *comp_ctx;
1408 	u16 i;
1409 
1410 	if (!admin_queue->comp_ctx)
1411 		return;
1412 
1413 	for (i = 0; i < admin_queue->q_depth; i++) {
1414 		comp_ctx = get_comp_ctxt(admin_queue, i, false);
1415 		if (unlikely(!comp_ctx))
1416 			break;
1417 
1418 		comp_ctx->status = ENA_CMD_ABORTED;
1419 
1420 		complete(&comp_ctx->wait_event);
1421 	}
1422 }
1423 
ena_com_wait_for_abort_completion(struct ena_com_dev * ena_dev)1424 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1425 {
1426 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1427 	unsigned long flags = 0;
1428 	u32 exp = 0;
1429 
1430 	spin_lock_irqsave(&admin_queue->q_lock, flags);
1431 	while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1432 		spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1433 		ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1434 		spin_lock_irqsave(&admin_queue->q_lock, flags);
1435 	}
1436 	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1437 }
1438 
ena_com_destroy_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1439 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1440 			  struct ena_com_io_cq *io_cq)
1441 {
1442 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1443 	struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1444 	struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1445 	int ret;
1446 
1447 	memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1448 
1449 	destroy_cmd.cq_idx = io_cq->idx;
1450 	destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1451 
1452 	ret = ena_com_execute_admin_command(admin_queue,
1453 					    (struct ena_admin_aq_entry *)&destroy_cmd,
1454 					    sizeof(destroy_cmd),
1455 					    (struct ena_admin_acq_entry *)&destroy_resp,
1456 					    sizeof(destroy_resp));
1457 
1458 	if (unlikely(ret && (ret != -ENODEV)))
1459 		netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret);
1460 
1461 	return ret;
1462 }
1463 
ena_com_get_admin_running_state(struct ena_com_dev * ena_dev)1464 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1465 {
1466 	return ena_dev->admin_queue.running_state;
1467 }
1468 
ena_com_set_admin_running_state(struct ena_com_dev * ena_dev,bool state)1469 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1470 {
1471 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1472 	unsigned long flags = 0;
1473 
1474 	spin_lock_irqsave(&admin_queue->q_lock, flags);
1475 	ena_dev->admin_queue.running_state = state;
1476 	spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1477 }
1478 
ena_com_admin_aenq_enable(struct ena_com_dev * ena_dev)1479 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1480 {
1481 	u16 depth = ena_dev->aenq.q_depth;
1482 
1483 	WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1484 
1485 	/* Init head_db to mark that all entries in the queue
1486 	 * are initially available
1487 	 */
1488 	writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1489 }
1490 
ena_com_set_aenq_config(struct ena_com_dev * ena_dev,u32 groups_flag)1491 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1492 {
1493 	struct ena_com_admin_queue *admin_queue;
1494 	struct ena_admin_set_feat_cmd cmd;
1495 	struct ena_admin_set_feat_resp resp;
1496 	struct ena_admin_get_feat_resp get_resp;
1497 	int ret;
1498 
1499 	ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1500 	if (ret) {
1501 		dev_info(ena_dev->dmadev, "Can't get aenq configuration\n");
1502 		return ret;
1503 	}
1504 
1505 	if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1506 		netdev_warn(ena_dev->net_device,
1507 			    "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1508 			    get_resp.u.aenq.supported_groups, groups_flag);
1509 		return -EOPNOTSUPP;
1510 	}
1511 
1512 	memset(&cmd, 0x0, sizeof(cmd));
1513 	admin_queue = &ena_dev->admin_queue;
1514 
1515 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1516 	cmd.aq_common_descriptor.flags = 0;
1517 	cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1518 	cmd.u.aenq.enabled_groups = groups_flag;
1519 
1520 	ret = ena_com_execute_admin_command(admin_queue,
1521 					    (struct ena_admin_aq_entry *)&cmd,
1522 					    sizeof(cmd),
1523 					    (struct ena_admin_acq_entry *)&resp,
1524 					    sizeof(resp));
1525 
1526 	if (unlikely(ret))
1527 		netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret);
1528 
1529 	return ret;
1530 }
1531 
ena_com_get_dma_width(struct ena_com_dev * ena_dev)1532 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1533 {
1534 	u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1535 	u32 width;
1536 
1537 	if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1538 		netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1539 		return -ETIME;
1540 	}
1541 
1542 	width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1543 		ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1544 
1545 	netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
1546 
1547 	if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1548 		netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width);
1549 		return -EINVAL;
1550 	}
1551 
1552 	ena_dev->dma_addr_bits = width;
1553 
1554 	return width;
1555 }
1556 
ena_com_validate_version(struct ena_com_dev * ena_dev)1557 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1558 {
1559 	u32 ver;
1560 	u32 ctrl_ver;
1561 	u32 ctrl_ver_masked;
1562 
1563 	/* Make sure the ENA version and the controller version are at least
1564 	 * as the driver expects
1565 	 */
1566 	ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1567 	ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1568 					  ENA_REGS_CONTROLLER_VERSION_OFF);
1569 
1570 	if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1571 		netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1572 		return -ETIME;
1573 	}
1574 
1575 	dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
1576 		 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1577 		 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1578 
1579 	dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n",
1580 		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1581 			 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1582 		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1583 			 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1584 		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1585 		 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1586 			 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1587 
1588 	ctrl_ver_masked =
1589 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1590 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1591 		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1592 
1593 	/* Validate the ctrl version without the implementation ID */
1594 	if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1595 		netdev_err(ena_dev->net_device,
1596 			   "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1597 		return -1;
1598 	}
1599 
1600 	return 0;
1601 }
1602 
1603 static void
ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev * ena_dev,struct ena_com_admin_queue * admin_queue)1604 ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1605 				      struct ena_com_admin_queue *admin_queue)
1606 
1607 {
1608 	if (!admin_queue->comp_ctx)
1609 		return;
1610 
1611 	devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1612 
1613 	admin_queue->comp_ctx = NULL;
1614 }
1615 
ena_com_admin_destroy(struct ena_com_dev * ena_dev)1616 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1617 {
1618 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1619 	struct ena_com_admin_cq *cq = &admin_queue->cq;
1620 	struct ena_com_admin_sq *sq = &admin_queue->sq;
1621 	struct ena_com_aenq *aenq = &ena_dev->aenq;
1622 	u16 size;
1623 
1624 	ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1625 
1626 	size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1627 	if (sq->entries)
1628 		dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr);
1629 	sq->entries = NULL;
1630 
1631 	size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1632 	if (cq->entries)
1633 		dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr);
1634 	cq->entries = NULL;
1635 
1636 	size = ADMIN_AENQ_SIZE(aenq->q_depth);
1637 	if (ena_dev->aenq.entries)
1638 		dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr);
1639 	aenq->entries = NULL;
1640 }
1641 
ena_com_set_admin_polling_mode(struct ena_com_dev * ena_dev,bool polling)1642 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1643 {
1644 	u32 mask_value = 0;
1645 
1646 	if (polling)
1647 		mask_value = ENA_REGS_ADMIN_INTR_MASK;
1648 
1649 	writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1650 	ena_dev->admin_queue.polling = polling;
1651 }
1652 
ena_com_set_admin_auto_polling_mode(struct ena_com_dev * ena_dev,bool polling)1653 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1654 					 bool polling)
1655 {
1656 	ena_dev->admin_queue.auto_polling = polling;
1657 }
1658 
ena_com_mmio_reg_read_request_init(struct ena_com_dev * ena_dev)1659 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1660 {
1661 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1662 
1663 	spin_lock_init(&mmio_read->lock);
1664 	mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1665 						  &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1666 	if (unlikely(!mmio_read->read_resp))
1667 		goto err;
1668 
1669 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1670 
1671 	mmio_read->read_resp->req_id = 0x0;
1672 	mmio_read->seq_num = 0x0;
1673 	mmio_read->readless_supported = true;
1674 
1675 	return 0;
1676 
1677 err:
1678 
1679 	return -ENOMEM;
1680 }
1681 
ena_com_set_mmio_read_mode(struct ena_com_dev * ena_dev,bool readless_supported)1682 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1683 {
1684 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1685 
1686 	mmio_read->readless_supported = readless_supported;
1687 }
1688 
ena_com_mmio_reg_read_request_destroy(struct ena_com_dev * ena_dev)1689 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1690 {
1691 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1692 
1693 	writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1694 	writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1695 
1696 	dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
1697 			  mmio_read->read_resp_dma_addr);
1698 
1699 	mmio_read->read_resp = NULL;
1700 }
1701 
ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev * ena_dev)1702 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1703 {
1704 	struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1705 	u32 addr_low, addr_high;
1706 
1707 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1708 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1709 
1710 	writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1711 	writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1712 }
1713 
ena_com_admin_init(struct ena_com_dev * ena_dev,struct ena_aenq_handlers * aenq_handlers)1714 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1715 		       struct ena_aenq_handlers *aenq_handlers)
1716 {
1717 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1718 	u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1719 	int ret;
1720 
1721 	dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1722 
1723 	if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1724 		netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1725 		return -ETIME;
1726 	}
1727 
1728 	if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1729 		netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n");
1730 		return -ENODEV;
1731 	}
1732 
1733 	admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1734 
1735 	admin_queue->q_dmadev = ena_dev->dmadev;
1736 	admin_queue->polling = false;
1737 	admin_queue->curr_cmd_id = 0;
1738 
1739 	atomic_set(&admin_queue->outstanding_cmds, 0);
1740 
1741 	spin_lock_init(&admin_queue->q_lock);
1742 
1743 	ret = ena_com_init_comp_ctxt(admin_queue);
1744 	if (ret)
1745 		goto error;
1746 
1747 	ret = ena_com_admin_init_sq(admin_queue);
1748 	if (ret)
1749 		goto error;
1750 
1751 	ret = ena_com_admin_init_cq(admin_queue);
1752 	if (ret)
1753 		goto error;
1754 
1755 	admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1756 		ENA_REGS_AQ_DB_OFF);
1757 
1758 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1759 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1760 
1761 	writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1762 	writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1763 
1764 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1765 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1766 
1767 	writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1768 	writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1769 
1770 	aq_caps = 0;
1771 	aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1772 	aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1773 			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1774 			ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1775 
1776 	acq_caps = 0;
1777 	acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1778 	acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1779 		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1780 		ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1781 
1782 	writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1783 	writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1784 	ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1785 	if (ret)
1786 		goto error;
1787 
1788 	admin_queue->ena_dev = ena_dev;
1789 	admin_queue->running_state = true;
1790 
1791 	return 0;
1792 error:
1793 	ena_com_admin_destroy(ena_dev);
1794 
1795 	return ret;
1796 }
1797 
ena_com_create_io_queue(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx)1798 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1799 			    struct ena_com_create_io_ctx *ctx)
1800 {
1801 	struct ena_com_io_sq *io_sq;
1802 	struct ena_com_io_cq *io_cq;
1803 	int ret;
1804 
1805 	if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1806 		netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
1807 			   ctx->qid, ENA_TOTAL_NUM_QUEUES);
1808 		return -EINVAL;
1809 	}
1810 
1811 	io_sq = &ena_dev->io_sq_queues[ctx->qid];
1812 	io_cq = &ena_dev->io_cq_queues[ctx->qid];
1813 
1814 	memset(io_sq, 0x0, sizeof(*io_sq));
1815 	memset(io_cq, 0x0, sizeof(*io_cq));
1816 
1817 	/* Init CQ */
1818 	io_cq->q_depth = ctx->queue_size;
1819 	io_cq->direction = ctx->direction;
1820 	io_cq->qid = ctx->qid;
1821 
1822 	io_cq->msix_vector = ctx->msix_vector;
1823 
1824 	io_sq->q_depth = ctx->queue_size;
1825 	io_sq->direction = ctx->direction;
1826 	io_sq->qid = ctx->qid;
1827 
1828 	io_sq->mem_queue_type = ctx->mem_queue_type;
1829 
1830 	if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1831 		/* header length is limited to 8 bits */
1832 		io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1833 
1834 	ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1835 	if (ret)
1836 		goto error;
1837 	ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1838 	if (ret)
1839 		goto error;
1840 
1841 	ret = ena_com_create_io_cq(ena_dev, io_cq);
1842 	if (ret)
1843 		goto error;
1844 
1845 	ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1846 	if (ret)
1847 		goto destroy_io_cq;
1848 
1849 	return 0;
1850 
1851 destroy_io_cq:
1852 	ena_com_destroy_io_cq(ena_dev, io_cq);
1853 error:
1854 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1855 	return ret;
1856 }
1857 
ena_com_destroy_io_queue(struct ena_com_dev * ena_dev,u16 qid)1858 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1859 {
1860 	struct ena_com_io_sq *io_sq;
1861 	struct ena_com_io_cq *io_cq;
1862 
1863 	if (qid >= ENA_TOTAL_NUM_QUEUES) {
1864 		netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
1865 			   qid, ENA_TOTAL_NUM_QUEUES);
1866 		return;
1867 	}
1868 
1869 	io_sq = &ena_dev->io_sq_queues[qid];
1870 	io_cq = &ena_dev->io_cq_queues[qid];
1871 
1872 	ena_com_destroy_io_sq(ena_dev, io_sq);
1873 	ena_com_destroy_io_cq(ena_dev, io_cq);
1874 
1875 	ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1876 }
1877 
ena_com_get_link_params(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * resp)1878 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1879 			    struct ena_admin_get_feat_resp *resp)
1880 {
1881 	return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1882 }
1883 
ena_get_dev_stats(struct ena_com_dev * ena_dev,struct ena_com_stats_ctx * ctx,enum ena_admin_get_stats_type type)1884 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1885 			     struct ena_com_stats_ctx *ctx,
1886 			     enum ena_admin_get_stats_type type)
1887 {
1888 	struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
1889 	struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
1890 	struct ena_com_admin_queue *admin_queue;
1891 	int ret;
1892 
1893 	admin_queue = &ena_dev->admin_queue;
1894 
1895 	get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1896 	get_cmd->aq_common_descriptor.flags = 0;
1897 	get_cmd->type = type;
1898 
1899 	ret = ena_com_execute_admin_command(admin_queue,
1900 					    (struct ena_admin_aq_entry *)get_cmd,
1901 					    sizeof(*get_cmd),
1902 					    (struct ena_admin_acq_entry *)get_resp,
1903 					    sizeof(*get_resp));
1904 
1905 	if (unlikely(ret))
1906 		netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
1907 
1908 	return ret;
1909 }
1910 
ena_com_set_supported_customer_metrics(struct ena_com_dev * ena_dev)1911 static void ena_com_set_supported_customer_metrics(struct ena_com_dev *ena_dev)
1912 {
1913 	struct ena_customer_metrics *customer_metrics;
1914 	struct ena_com_stats_ctx ctx;
1915 	int ret;
1916 
1917 	customer_metrics = &ena_dev->customer_metrics;
1918 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
1919 		customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK;
1920 		return;
1921 	}
1922 
1923 	memset(&ctx, 0x0, sizeof(ctx));
1924 	ctx.get_cmd.requested_metrics = ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK;
1925 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
1926 	if (likely(ret == 0))
1927 		customer_metrics->supported_metrics =
1928 			ctx.get_resp.u.customer_metrics.reported_metrics;
1929 	else
1930 		netdev_err(ena_dev->net_device,
1931 			   "Failed to query customer metrics support. error: %d\n", ret);
1932 }
1933 
ena_com_get_dev_attr_feat(struct ena_com_dev * ena_dev,struct ena_com_dev_get_features_ctx * get_feat_ctx)1934 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1935 			      struct ena_com_dev_get_features_ctx *get_feat_ctx)
1936 {
1937 	struct ena_admin_get_feat_resp get_resp;
1938 	int rc;
1939 
1940 	rc = ena_com_get_feature(ena_dev, &get_resp,
1941 				 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1942 	if (rc)
1943 		return rc;
1944 
1945 	memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1946 	       sizeof(get_resp.u.dev_attr));
1947 
1948 	ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1949 	ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
1950 
1951 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1952 		rc = ena_com_get_feature(ena_dev, &get_resp,
1953 					 ENA_ADMIN_MAX_QUEUES_EXT,
1954 					 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1955 		if (rc)
1956 			return rc;
1957 
1958 		if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1959 			return -EINVAL;
1960 
1961 		memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1962 		       sizeof(get_resp.u.max_queue_ext));
1963 		ena_dev->tx_max_header_size =
1964 			get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1965 	} else {
1966 		rc = ena_com_get_feature(ena_dev, &get_resp,
1967 					 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1968 		memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1969 		       sizeof(get_resp.u.max_queue));
1970 		ena_dev->tx_max_header_size =
1971 			get_resp.u.max_queue.max_header_size;
1972 
1973 		if (rc)
1974 			return rc;
1975 	}
1976 
1977 	rc = ena_com_get_feature(ena_dev, &get_resp,
1978 				 ENA_ADMIN_AENQ_CONFIG, 0);
1979 	if (rc)
1980 		return rc;
1981 
1982 	memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1983 	       sizeof(get_resp.u.aenq));
1984 
1985 	rc = ena_com_get_feature(ena_dev, &get_resp,
1986 				 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
1987 	if (rc)
1988 		return rc;
1989 
1990 	memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1991 	       sizeof(get_resp.u.offload));
1992 
1993 	/* Driver hints isn't mandatory admin command. So in case the
1994 	 * command isn't supported set driver hints to 0
1995 	 */
1996 	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
1997 
1998 	if (!rc)
1999 		memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints));
2000 	else if (rc == -EOPNOTSUPP)
2001 		memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2002 	else
2003 		return rc;
2004 
2005 	rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2006 	if (!rc)
2007 		memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq));
2008 	else if (rc == -EOPNOTSUPP)
2009 		memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2010 	else
2011 		return rc;
2012 
2013 	ena_com_set_supported_customer_metrics(ena_dev);
2014 
2015 	return 0;
2016 }
2017 
ena_com_admin_q_comp_intr_handler(struct ena_com_dev * ena_dev)2018 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2019 {
2020 	ena_com_handle_admin_completion(&ena_dev->admin_queue);
2021 }
2022 
2023 /* ena_handle_specific_aenq_event:
2024  * return the handler that is relevant to the specific event group
2025  */
ena_com_get_specific_aenq_cb(struct ena_com_dev * ena_dev,u16 group)2026 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
2027 						     u16 group)
2028 {
2029 	struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2030 
2031 	if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2032 		return aenq_handlers->handlers[group];
2033 
2034 	return aenq_handlers->unimplemented_handler;
2035 }
2036 
2037 /* ena_aenq_intr_handler:
2038  * handles the aenq incoming events.
2039  * pop events from the queue and apply the specific handler
2040  */
ena_com_aenq_intr_handler(struct ena_com_dev * ena_dev,void * data)2041 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2042 {
2043 	struct ena_admin_aenq_entry *aenq_e;
2044 	struct ena_admin_aenq_common_desc *aenq_common;
2045 	struct ena_com_aenq *aenq  = &ena_dev->aenq;
2046 	u64 timestamp;
2047 	ena_aenq_handler handler_cb;
2048 	u16 masked_head, processed = 0;
2049 	u8 phase;
2050 
2051 	masked_head = aenq->head & (aenq->q_depth - 1);
2052 	phase = aenq->phase;
2053 	aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2054 	aenq_common = &aenq_e->aenq_common_desc;
2055 
2056 	/* Go over all the events */
2057 	while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2058 		/* Make sure the phase bit (ownership) is as expected before
2059 		 * reading the rest of the descriptor.
2060 		 */
2061 		dma_rmb();
2062 
2063 		timestamp = (u64)aenq_common->timestamp_low |
2064 			((u64)aenq_common->timestamp_high << 32);
2065 
2066 		netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
2067 			   aenq_common->group, aenq_common->syndrome, timestamp);
2068 
2069 		/* Handle specific event*/
2070 		handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2071 							  aenq_common->group);
2072 		handler_cb(data, aenq_e); /* call the actual event handler*/
2073 
2074 		/* Get next event entry */
2075 		masked_head++;
2076 		processed++;
2077 
2078 		if (unlikely(masked_head == aenq->q_depth)) {
2079 			masked_head = 0;
2080 			phase = !phase;
2081 		}
2082 		aenq_e = &aenq->entries[masked_head];
2083 		aenq_common = &aenq_e->aenq_common_desc;
2084 	}
2085 
2086 	aenq->head += processed;
2087 	aenq->phase = phase;
2088 
2089 	/* Don't update aenq doorbell if there weren't any processed events */
2090 	if (!processed)
2091 		return;
2092 
2093 	/* write the aenq doorbell after all AENQ descriptors were read */
2094 	mb();
2095 	writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2096 }
2097 
ena_com_dev_reset(struct ena_com_dev * ena_dev,enum ena_regs_reset_reason_types reset_reason)2098 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2099 		      enum ena_regs_reset_reason_types reset_reason)
2100 {
2101 	u32 stat, timeout, cap, reset_val;
2102 	int rc;
2103 
2104 	stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2105 	cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2106 
2107 	if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) {
2108 		netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
2109 		return -ETIME;
2110 	}
2111 
2112 	if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2113 		netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n");
2114 		return -EINVAL;
2115 	}
2116 
2117 	timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2118 			ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2119 	if (timeout == 0) {
2120 		netdev_err(ena_dev->net_device, "Invalid timeout value\n");
2121 		return -EINVAL;
2122 	}
2123 
2124 	/* start reset */
2125 	reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2126 	reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2127 		     ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2128 	writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2129 
2130 	/* Write again the MMIO read request address */
2131 	ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2132 
2133 	rc = wait_for_reset_state(ena_dev, timeout,
2134 				  ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2135 	if (rc != 0) {
2136 		netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n");
2137 		return rc;
2138 	}
2139 
2140 	/* reset done */
2141 	writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2142 	rc = wait_for_reset_state(ena_dev, timeout, 0);
2143 	if (rc != 0) {
2144 		netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n");
2145 		return rc;
2146 	}
2147 
2148 	timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2149 		ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2150 	if (timeout)
2151 		/* the resolution of timeout reg is 100ms */
2152 		ena_dev->admin_queue.completion_timeout = timeout * 100000;
2153 	else
2154 		ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2155 
2156 	return 0;
2157 }
2158 
ena_com_get_eni_stats(struct ena_com_dev * ena_dev,struct ena_admin_eni_stats * stats)2159 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2160 			  struct ena_admin_eni_stats *stats)
2161 {
2162 	struct ena_com_stats_ctx ctx;
2163 	int ret;
2164 
2165 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
2166 		netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
2167 			   ENA_ADMIN_ENI_STATS);
2168 		return -EOPNOTSUPP;
2169 	}
2170 
2171 	memset(&ctx, 0x0, sizeof(ctx));
2172 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2173 	if (likely(ret == 0))
2174 		memcpy(stats, &ctx.get_resp.u.eni_stats,
2175 		       sizeof(ctx.get_resp.u.eni_stats));
2176 
2177 	return ret;
2178 }
2179 
ena_com_get_ena_srd_info(struct ena_com_dev * ena_dev,struct ena_admin_ena_srd_info * info)2180 int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
2181 			     struct ena_admin_ena_srd_info *info)
2182 {
2183 	struct ena_com_stats_ctx ctx;
2184 	int ret;
2185 
2186 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENA_SRD_INFO)) {
2187 		netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
2188 			   ENA_ADMIN_ENA_SRD_INFO);
2189 		return -EOPNOTSUPP;
2190 	}
2191 
2192 	memset(&ctx, 0x0, sizeof(ctx));
2193 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENA_SRD);
2194 	if (likely(ret == 0))
2195 		memcpy(info, &ctx.get_resp.u.ena_srd_info,
2196 		       sizeof(ctx.get_resp.u.ena_srd_info));
2197 
2198 	return ret;
2199 }
2200 
ena_com_get_dev_basic_stats(struct ena_com_dev * ena_dev,struct ena_admin_basic_stats * stats)2201 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2202 				struct ena_admin_basic_stats *stats)
2203 {
2204 	struct ena_com_stats_ctx ctx;
2205 	int ret;
2206 
2207 	memset(&ctx, 0x0, sizeof(ctx));
2208 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2209 	if (likely(ret == 0))
2210 		memcpy(stats, &ctx.get_resp.u.basic_stats,
2211 		       sizeof(ctx.get_resp.u.basic_stats));
2212 
2213 	return ret;
2214 }
2215 
ena_com_get_customer_metrics(struct ena_com_dev * ena_dev,char * buffer,u32 len)2216 int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
2217 {
2218 	struct ena_admin_aq_get_stats_cmd *get_cmd;
2219 	struct ena_com_stats_ctx ctx;
2220 	int ret;
2221 
2222 	if (unlikely(len > ena_dev->customer_metrics.buffer_len)) {
2223 		netdev_err(ena_dev->net_device,
2224 			   "Invalid buffer size %u. The given buffer is too big.\n", len);
2225 		return -EINVAL;
2226 	}
2227 
2228 	if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
2229 		netdev_err(ena_dev->net_device, "Capability %d not supported.\n",
2230 			   ENA_ADMIN_CUSTOMER_METRICS);
2231 		return -EOPNOTSUPP;
2232 	}
2233 
2234 	if (!ena_dev->customer_metrics.supported_metrics) {
2235 		netdev_err(ena_dev->net_device, "No supported customer metrics.\n");
2236 		return -EOPNOTSUPP;
2237 	}
2238 
2239 	get_cmd = &ctx.get_cmd;
2240 	memset(&ctx, 0x0, sizeof(ctx));
2241 	ret = ena_com_mem_addr_set(ena_dev,
2242 				   &get_cmd->u.control_buffer.address,
2243 				   ena_dev->customer_metrics.buffer_dma_addr);
2244 	if (unlikely(ret)) {
2245 		netdev_err(ena_dev->net_device, "Memory address set failed.\n");
2246 		return ret;
2247 	}
2248 
2249 	get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len;
2250 	get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics;
2251 	ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
2252 	if (likely(ret == 0))
2253 		memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len);
2254 	else
2255 		netdev_err(ena_dev->net_device, "Failed to get customer metrics. error: %d\n", ret);
2256 
2257 	return ret;
2258 }
2259 
ena_com_set_dev_mtu(struct ena_com_dev * ena_dev,u32 mtu)2260 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
2261 {
2262 	struct ena_com_admin_queue *admin_queue;
2263 	struct ena_admin_set_feat_cmd cmd;
2264 	struct ena_admin_set_feat_resp resp;
2265 	int ret;
2266 
2267 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2268 		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
2269 		return -EOPNOTSUPP;
2270 	}
2271 
2272 	memset(&cmd, 0x0, sizeof(cmd));
2273 	admin_queue = &ena_dev->admin_queue;
2274 
2275 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2276 	cmd.aq_common_descriptor.flags = 0;
2277 	cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2278 	cmd.u.mtu.mtu = mtu;
2279 
2280 	ret = ena_com_execute_admin_command(admin_queue,
2281 					    (struct ena_admin_aq_entry *)&cmd,
2282 					    sizeof(cmd),
2283 					    (struct ena_admin_acq_entry *)&resp,
2284 					    sizeof(resp));
2285 
2286 	if (unlikely(ret))
2287 		netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret);
2288 
2289 	return ret;
2290 }
2291 
ena_com_get_offload_settings(struct ena_com_dev * ena_dev,struct ena_admin_feature_offload_desc * offload)2292 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2293 				 struct ena_admin_feature_offload_desc *offload)
2294 {
2295 	int ret;
2296 	struct ena_admin_get_feat_resp resp;
2297 
2298 	ret = ena_com_get_feature(ena_dev, &resp,
2299 				  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2300 	if (unlikely(ret)) {
2301 		netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
2302 		return ret;
2303 	}
2304 
2305 	memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2306 
2307 	return 0;
2308 }
2309 
ena_com_set_hash_function(struct ena_com_dev * ena_dev)2310 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2311 {
2312 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2313 	struct ena_rss *rss = &ena_dev->rss;
2314 	struct ena_admin_set_feat_cmd cmd;
2315 	struct ena_admin_set_feat_resp resp;
2316 	struct ena_admin_get_feat_resp get_resp;
2317 	int ret;
2318 
2319 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) {
2320 		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2321 			   ENA_ADMIN_RSS_HASH_FUNCTION);
2322 		return -EOPNOTSUPP;
2323 	}
2324 
2325 	/* Validate hash function is supported */
2326 	ret = ena_com_get_feature(ena_dev, &get_resp,
2327 				  ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2328 	if (unlikely(ret))
2329 		return ret;
2330 
2331 	if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2332 		netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n",
2333 			   rss->hash_func);
2334 		return -EOPNOTSUPP;
2335 	}
2336 
2337 	memset(&cmd, 0x0, sizeof(cmd));
2338 
2339 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2340 	cmd.aq_common_descriptor.flags =
2341 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2342 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2343 	cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2344 	cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2345 
2346 	ret = ena_com_mem_addr_set(ena_dev,
2347 				   &cmd.control_buffer.address,
2348 				   rss->hash_key_dma_addr);
2349 	if (unlikely(ret)) {
2350 		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2351 		return ret;
2352 	}
2353 
2354 	cmd.control_buffer.length = sizeof(*rss->hash_key);
2355 
2356 	ret = ena_com_execute_admin_command(admin_queue,
2357 					    (struct ena_admin_aq_entry *)&cmd,
2358 					    sizeof(cmd),
2359 					    (struct ena_admin_acq_entry *)&resp,
2360 					    sizeof(resp));
2361 	if (unlikely(ret)) {
2362 		netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n",
2363 			   rss->hash_func, ret);
2364 		return -EINVAL;
2365 	}
2366 
2367 	return 0;
2368 }
2369 
ena_com_fill_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions func,const u8 * key,u16 key_len,u32 init_val)2370 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2371 			       enum ena_admin_hash_functions func,
2372 			       const u8 *key, u16 key_len, u32 init_val)
2373 {
2374 	struct ena_admin_feature_rss_flow_hash_control *hash_key;
2375 	struct ena_admin_get_feat_resp get_resp;
2376 	enum ena_admin_hash_functions old_func;
2377 	struct ena_rss *rss = &ena_dev->rss;
2378 	int rc;
2379 
2380 	hash_key = rss->hash_key;
2381 
2382 	/* Make sure size is a mult of DWs */
2383 	if (unlikely(key_len & 0x3))
2384 		return -EINVAL;
2385 
2386 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2387 				    ENA_ADMIN_RSS_HASH_FUNCTION,
2388 				    rss->hash_key_dma_addr,
2389 				    sizeof(*rss->hash_key), 0);
2390 	if (unlikely(rc))
2391 		return rc;
2392 
2393 	if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2394 		netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func);
2395 		return -EOPNOTSUPP;
2396 	}
2397 
2398 	if ((func == ENA_ADMIN_TOEPLITZ) && key) {
2399 		if (key_len != sizeof(hash_key->key)) {
2400 			netdev_err(ena_dev->net_device,
2401 				   "key len (%u) doesn't equal the supported size (%zu)\n", key_len,
2402 				   sizeof(hash_key->key));
2403 			return -EINVAL;
2404 		}
2405 		memcpy(hash_key->key, key, key_len);
2406 		hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2407 	}
2408 
2409 	rss->hash_init_val = init_val;
2410 	old_func = rss->hash_func;
2411 	rss->hash_func = func;
2412 	rc = ena_com_set_hash_function(ena_dev);
2413 
2414 	/* Restore the old function */
2415 	if (unlikely(rc))
2416 		rss->hash_func = old_func;
2417 
2418 	return rc;
2419 }
2420 
ena_com_get_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions * func)2421 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2422 			      enum ena_admin_hash_functions *func)
2423 {
2424 	struct ena_rss *rss = &ena_dev->rss;
2425 	struct ena_admin_get_feat_resp get_resp;
2426 	int rc;
2427 
2428 	if (unlikely(!func))
2429 		return -EINVAL;
2430 
2431 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2432 				    ENA_ADMIN_RSS_HASH_FUNCTION,
2433 				    rss->hash_key_dma_addr,
2434 				    sizeof(*rss->hash_key), 0);
2435 	if (unlikely(rc))
2436 		return rc;
2437 
2438 	/* ffs() returns 1 in case the lsb is set */
2439 	rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2440 	if (rss->hash_func)
2441 		rss->hash_func--;
2442 
2443 	*func = rss->hash_func;
2444 
2445 	return 0;
2446 }
2447 
ena_com_get_hash_key(struct ena_com_dev * ena_dev,u8 * key)2448 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2449 {
2450 	struct ena_admin_feature_rss_flow_hash_control *hash_key =
2451 		ena_dev->rss.hash_key;
2452 
2453 	if (key)
2454 		memcpy(key, hash_key->key,
2455 		       (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2456 
2457 	return 0;
2458 }
2459 
ena_com_get_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 * fields)2460 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2461 			  enum ena_admin_flow_hash_proto proto,
2462 			  u16 *fields)
2463 {
2464 	struct ena_rss *rss = &ena_dev->rss;
2465 	struct ena_admin_get_feat_resp get_resp;
2466 	int rc;
2467 
2468 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2469 				    ENA_ADMIN_RSS_HASH_INPUT,
2470 				    rss->hash_ctrl_dma_addr,
2471 				    sizeof(*rss->hash_ctrl), 0);
2472 	if (unlikely(rc))
2473 		return rc;
2474 
2475 	if (fields)
2476 		*fields = rss->hash_ctrl->selected_fields[proto].fields;
2477 
2478 	return 0;
2479 }
2480 
ena_com_set_hash_ctrl(struct ena_com_dev * ena_dev)2481 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2482 {
2483 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2484 	struct ena_rss *rss = &ena_dev->rss;
2485 	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2486 	struct ena_admin_set_feat_cmd cmd;
2487 	struct ena_admin_set_feat_resp resp;
2488 	int ret;
2489 
2490 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) {
2491 		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2492 			   ENA_ADMIN_RSS_HASH_INPUT);
2493 		return -EOPNOTSUPP;
2494 	}
2495 
2496 	memset(&cmd, 0x0, sizeof(cmd));
2497 
2498 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2499 	cmd.aq_common_descriptor.flags =
2500 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2501 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2502 	cmd.u.flow_hash_input.enabled_input_sort =
2503 		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2504 		ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2505 
2506 	ret = ena_com_mem_addr_set(ena_dev,
2507 				   &cmd.control_buffer.address,
2508 				   rss->hash_ctrl_dma_addr);
2509 	if (unlikely(ret)) {
2510 		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2511 		return ret;
2512 	}
2513 	cmd.control_buffer.length = sizeof(*hash_ctrl);
2514 
2515 	ret = ena_com_execute_admin_command(admin_queue,
2516 					    (struct ena_admin_aq_entry *)&cmd,
2517 					    sizeof(cmd),
2518 					    (struct ena_admin_acq_entry *)&resp,
2519 					    sizeof(resp));
2520 	if (unlikely(ret))
2521 		netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret);
2522 
2523 	return ret;
2524 }
2525 
ena_com_set_default_hash_ctrl(struct ena_com_dev * ena_dev)2526 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2527 {
2528 	struct ena_rss *rss = &ena_dev->rss;
2529 	struct ena_admin_feature_rss_hash_control *hash_ctrl =
2530 		rss->hash_ctrl;
2531 	u16 available_fields = 0;
2532 	int rc, i;
2533 
2534 	/* Get the supported hash input */
2535 	rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2536 	if (unlikely(rc))
2537 		return rc;
2538 
2539 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2540 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2541 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2542 
2543 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2544 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2545 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2546 
2547 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2548 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2549 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2550 
2551 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2552 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2553 		ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2554 
2555 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2556 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2557 
2558 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2559 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2560 
2561 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2562 		ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2563 
2564 	hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2565 		ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2566 
2567 	for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2568 		available_fields = hash_ctrl->selected_fields[i].fields &
2569 				hash_ctrl->supported_fields[i].fields;
2570 		if (available_fields != hash_ctrl->selected_fields[i].fields) {
2571 			netdev_err(ena_dev->net_device,
2572 				   "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2573 				   i, hash_ctrl->supported_fields[i].fields,
2574 				   hash_ctrl->selected_fields[i].fields);
2575 			return -EOPNOTSUPP;
2576 		}
2577 	}
2578 
2579 	rc = ena_com_set_hash_ctrl(ena_dev);
2580 
2581 	/* In case of failure, restore the old hash ctrl */
2582 	if (unlikely(rc))
2583 		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2584 
2585 	return rc;
2586 }
2587 
ena_com_fill_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 hash_fields)2588 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2589 			   enum ena_admin_flow_hash_proto proto,
2590 			   u16 hash_fields)
2591 {
2592 	struct ena_rss *rss = &ena_dev->rss;
2593 	struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2594 	u16 supported_fields;
2595 	int rc;
2596 
2597 	if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2598 		netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto);
2599 		return -EINVAL;
2600 	}
2601 
2602 	/* Get the ctrl table */
2603 	rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2604 	if (unlikely(rc))
2605 		return rc;
2606 
2607 	/* Make sure all the fields are supported */
2608 	supported_fields = hash_ctrl->supported_fields[proto].fields;
2609 	if ((hash_fields & supported_fields) != hash_fields) {
2610 		netdev_err(ena_dev->net_device,
2611 			   "Proto %d doesn't support the required fields %x. supports only: %x\n",
2612 			   proto, hash_fields, supported_fields);
2613 	}
2614 
2615 	hash_ctrl->selected_fields[proto].fields = hash_fields;
2616 
2617 	rc = ena_com_set_hash_ctrl(ena_dev);
2618 
2619 	/* In case of failure, restore the old hash ctrl */
2620 	if (unlikely(rc))
2621 		ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2622 
2623 	return 0;
2624 }
2625 
ena_com_indirect_table_fill_entry(struct ena_com_dev * ena_dev,u16 entry_idx,u16 entry_value)2626 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2627 				      u16 entry_idx, u16 entry_value)
2628 {
2629 	struct ena_rss *rss = &ena_dev->rss;
2630 
2631 	if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2632 		return -EINVAL;
2633 
2634 	if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2635 		return -EINVAL;
2636 
2637 	rss->host_rss_ind_tbl[entry_idx] = entry_value;
2638 
2639 	return 0;
2640 }
2641 
ena_com_indirect_table_set(struct ena_com_dev * ena_dev)2642 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2643 {
2644 	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2645 	struct ena_rss *rss = &ena_dev->rss;
2646 	struct ena_admin_set_feat_cmd cmd;
2647 	struct ena_admin_set_feat_resp resp;
2648 	int ret;
2649 
2650 	if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
2651 		netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2652 			   ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
2653 		return -EOPNOTSUPP;
2654 	}
2655 
2656 	ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2657 	if (ret) {
2658 		netdev_err(ena_dev->net_device,
2659 			   "Failed to convert host indirection table to device table\n");
2660 		return ret;
2661 	}
2662 
2663 	memset(&cmd, 0x0, sizeof(cmd));
2664 
2665 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2666 	cmd.aq_common_descriptor.flags =
2667 		ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2668 	cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
2669 	cmd.u.ind_table.size = rss->tbl_log_size;
2670 	cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2671 
2672 	ret = ena_com_mem_addr_set(ena_dev,
2673 				   &cmd.control_buffer.address,
2674 				   rss->rss_ind_tbl_dma_addr);
2675 	if (unlikely(ret)) {
2676 		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2677 		return ret;
2678 	}
2679 
2680 	cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2681 		sizeof(struct ena_admin_rss_ind_table_entry);
2682 
2683 	ret = ena_com_execute_admin_command(admin_queue,
2684 					    (struct ena_admin_aq_entry *)&cmd,
2685 					    sizeof(cmd),
2686 					    (struct ena_admin_acq_entry *)&resp,
2687 					    sizeof(resp));
2688 
2689 	if (unlikely(ret))
2690 		netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret);
2691 
2692 	return ret;
2693 }
2694 
ena_com_indirect_table_get(struct ena_com_dev * ena_dev,u32 * ind_tbl)2695 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2696 {
2697 	struct ena_rss *rss = &ena_dev->rss;
2698 	struct ena_admin_get_feat_resp get_resp;
2699 	u32 tbl_size;
2700 	int i, rc;
2701 
2702 	tbl_size = (1ULL << rss->tbl_log_size) *
2703 		sizeof(struct ena_admin_rss_ind_table_entry);
2704 
2705 	rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2706 				    ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
2707 				    rss->rss_ind_tbl_dma_addr,
2708 				    tbl_size, 0);
2709 	if (unlikely(rc))
2710 		return rc;
2711 
2712 	if (!ind_tbl)
2713 		return 0;
2714 
2715 	for (i = 0; i < (1 << rss->tbl_log_size); i++)
2716 		ind_tbl[i] = rss->host_rss_ind_tbl[i];
2717 
2718 	return 0;
2719 }
2720 
ena_com_rss_init(struct ena_com_dev * ena_dev,u16 indr_tbl_log_size)2721 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2722 {
2723 	int rc;
2724 
2725 	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2726 
2727 	rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2728 	if (unlikely(rc))
2729 		goto err_indr_tbl;
2730 
2731 	/* The following function might return unsupported in case the
2732 	 * device doesn't support setting the key / hash function. We can safely
2733 	 * ignore this error and have indirection table support only.
2734 	 */
2735 	rc = ena_com_hash_key_allocate(ena_dev);
2736 	if (likely(!rc))
2737 		ena_com_hash_key_fill_default_key(ena_dev);
2738 	else if (rc != -EOPNOTSUPP)
2739 		goto err_hash_key;
2740 
2741 	rc = ena_com_hash_ctrl_init(ena_dev);
2742 	if (unlikely(rc))
2743 		goto err_hash_ctrl;
2744 
2745 	return 0;
2746 
2747 err_hash_ctrl:
2748 	ena_com_hash_key_destroy(ena_dev);
2749 err_hash_key:
2750 	ena_com_indirect_table_destroy(ena_dev);
2751 err_indr_tbl:
2752 
2753 	return rc;
2754 }
2755 
ena_com_rss_destroy(struct ena_com_dev * ena_dev)2756 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2757 {
2758 	ena_com_indirect_table_destroy(ena_dev);
2759 	ena_com_hash_key_destroy(ena_dev);
2760 	ena_com_hash_ctrl_destroy(ena_dev);
2761 
2762 	memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2763 }
2764 
ena_com_allocate_host_info(struct ena_com_dev * ena_dev)2765 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2766 {
2767 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2768 
2769 	host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2770 						  &host_attr->host_info_dma_addr, GFP_KERNEL);
2771 	if (unlikely(!host_attr->host_info))
2772 		return -ENOMEM;
2773 
2774 	host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2775 		ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2776 		(ENA_COMMON_SPEC_VERSION_MINOR));
2777 
2778 	return 0;
2779 }
2780 
ena_com_allocate_debug_area(struct ena_com_dev * ena_dev,u32 debug_area_size)2781 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2782 				u32 debug_area_size)
2783 {
2784 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2785 
2786 	host_attr->debug_area_virt_addr =
2787 		dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2788 				   &host_attr->debug_area_dma_addr, GFP_KERNEL);
2789 	if (unlikely(!host_attr->debug_area_virt_addr)) {
2790 		host_attr->debug_area_size = 0;
2791 		return -ENOMEM;
2792 	}
2793 
2794 	host_attr->debug_area_size = debug_area_size;
2795 
2796 	return 0;
2797 }
2798 
ena_com_allocate_customer_metrics_buffer(struct ena_com_dev * ena_dev)2799 int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
2800 {
2801 	struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
2802 
2803 	customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
2804 	customer_metrics->buffer_virt_addr = NULL;
2805 
2806 	customer_metrics->buffer_virt_addr =
2807 		dma_alloc_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
2808 				   &customer_metrics->buffer_dma_addr, GFP_KERNEL);
2809 	if (!customer_metrics->buffer_virt_addr) {
2810 		customer_metrics->buffer_len = 0;
2811 		return -ENOMEM;
2812 	}
2813 
2814 	return 0;
2815 }
2816 
ena_com_delete_host_info(struct ena_com_dev * ena_dev)2817 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2818 {
2819 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2820 
2821 	if (host_attr->host_info) {
2822 		dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2823 				  host_attr->host_info_dma_addr);
2824 		host_attr->host_info = NULL;
2825 	}
2826 }
2827 
ena_com_delete_debug_area(struct ena_com_dev * ena_dev)2828 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2829 {
2830 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2831 
2832 	if (host_attr->debug_area_virt_addr) {
2833 		dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2834 				  host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr);
2835 		host_attr->debug_area_virt_addr = NULL;
2836 	}
2837 }
2838 
ena_com_delete_customer_metrics_buffer(struct ena_com_dev * ena_dev)2839 void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev)
2840 {
2841 	struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
2842 
2843 	if (customer_metrics->buffer_virt_addr) {
2844 		dma_free_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
2845 				  customer_metrics->buffer_virt_addr,
2846 				  customer_metrics->buffer_dma_addr);
2847 		customer_metrics->buffer_virt_addr = NULL;
2848 		customer_metrics->buffer_len = 0;
2849 	}
2850 }
2851 
ena_com_set_host_attributes(struct ena_com_dev * ena_dev)2852 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2853 {
2854 	struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2855 	struct ena_com_admin_queue *admin_queue;
2856 	struct ena_admin_set_feat_cmd cmd;
2857 	struct ena_admin_set_feat_resp resp;
2858 
2859 	int ret;
2860 
2861 	/* Host attribute config is called before ena_com_get_dev_attr_feat
2862 	 * so ena_com can't check if the feature is supported.
2863 	 */
2864 
2865 	memset(&cmd, 0x0, sizeof(cmd));
2866 	admin_queue = &ena_dev->admin_queue;
2867 
2868 	cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2869 	cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2870 
2871 	ret = ena_com_mem_addr_set(ena_dev,
2872 				   &cmd.u.host_attr.debug_ba,
2873 				   host_attr->debug_area_dma_addr);
2874 	if (unlikely(ret)) {
2875 		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2876 		return ret;
2877 	}
2878 
2879 	ret = ena_com_mem_addr_set(ena_dev,
2880 				   &cmd.u.host_attr.os_info_ba,
2881 				   host_attr->host_info_dma_addr);
2882 	if (unlikely(ret)) {
2883 		netdev_err(ena_dev->net_device, "Memory address set failed\n");
2884 		return ret;
2885 	}
2886 
2887 	cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2888 
2889 	ret = ena_com_execute_admin_command(admin_queue,
2890 					    (struct ena_admin_aq_entry *)&cmd,
2891 					    sizeof(cmd),
2892 					    (struct ena_admin_acq_entry *)&resp,
2893 					    sizeof(resp));
2894 
2895 	if (unlikely(ret))
2896 		netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret);
2897 
2898 	return ret;
2899 }
2900 
2901 /* Interrupt moderation */
ena_com_interrupt_moderation_supported(struct ena_com_dev * ena_dev)2902 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2903 {
2904 	return ena_com_check_supported_feature_id(ena_dev,
2905 						  ENA_ADMIN_INTERRUPT_MODERATION);
2906 }
2907 
ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev * ena_dev,u32 coalesce_usecs,u32 intr_delay_resolution,u32 * intr_moder_interval)2908 static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
2909 							  u32 coalesce_usecs,
2910 							  u32 intr_delay_resolution,
2911 							  u32 *intr_moder_interval)
2912 {
2913 	if (!intr_delay_resolution) {
2914 		netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n");
2915 		return -EFAULT;
2916 	}
2917 
2918 	*intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2919 
2920 	return 0;
2921 }
2922 
ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev,u32 tx_coalesce_usecs)2923 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2924 						      u32 tx_coalesce_usecs)
2925 {
2926 	return ena_com_update_nonadaptive_moderation_interval(ena_dev,
2927 							      tx_coalesce_usecs,
2928 							      ena_dev->intr_delay_resolution,
2929 							      &ena_dev->intr_moder_tx_interval);
2930 }
2931 
ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev,u32 rx_coalesce_usecs)2932 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2933 						      u32 rx_coalesce_usecs)
2934 {
2935 	return ena_com_update_nonadaptive_moderation_interval(ena_dev,
2936 							      rx_coalesce_usecs,
2937 							      ena_dev->intr_delay_resolution,
2938 							      &ena_dev->intr_moder_rx_interval);
2939 }
2940 
ena_com_init_interrupt_moderation(struct ena_com_dev * ena_dev)2941 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2942 {
2943 	struct ena_admin_get_feat_resp get_resp;
2944 	u16 delay_resolution;
2945 	int rc;
2946 
2947 	rc = ena_com_get_feature(ena_dev, &get_resp,
2948 				 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2949 
2950 	if (rc) {
2951 		if (rc == -EOPNOTSUPP) {
2952 			netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2953 				   ENA_ADMIN_INTERRUPT_MODERATION);
2954 			rc = 0;
2955 		} else {
2956 			netdev_err(ena_dev->net_device,
2957 				   "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
2958 		}
2959 
2960 		/* no moderation supported, disable adaptive support */
2961 		ena_com_disable_adaptive_moderation(ena_dev);
2962 		return rc;
2963 	}
2964 
2965 	/* if moderation is supported by device we set adaptive moderation */
2966 	delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2967 	ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2968 
2969 	/* Disable adaptive moderation by default - can be enabled later */
2970 	ena_com_disable_adaptive_moderation(ena_dev);
2971 
2972 	return 0;
2973 }
2974 
ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev)2975 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2976 {
2977 	return ena_dev->intr_moder_tx_interval;
2978 }
2979 
ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev)2980 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2981 {
2982 	return ena_dev->intr_moder_rx_interval;
2983 }
2984 
ena_com_config_dev_mode(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_features,struct ena_llq_configurations * llq_default_cfg)2985 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2986 			    struct ena_admin_feature_llq_desc *llq_features,
2987 			    struct ena_llq_configurations *llq_default_cfg)
2988 {
2989 	struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2990 	int rc;
2991 
2992 	if (!llq_features->max_llq_num) {
2993 		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2994 		return 0;
2995 	}
2996 
2997 	rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2998 	if (rc)
2999 		return rc;
3000 
3001 	ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
3002 		(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
3003 
3004 	if (unlikely(ena_dev->tx_max_header_size == 0)) {
3005 		netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n");
3006 		return -EINVAL;
3007 	}
3008 
3009 	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
3010 
3011 	return 0;
3012 }
3013