1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2 /*
3 * Copyright 2018-2026 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6 #include <linux/log2.h>
7
8 #include "efa_com.h"
9 #include "efa_regs_defs.h"
10
11 #define ADMIN_CMD_TIMEOUT_US 30000000 /* usecs */
12
13 #define EFA_REG_READ_TIMEOUT_US 50000 /* usecs */
14 #define EFA_MMIO_READ_INVALID 0xffffffff
15
16 #define EFA_POLL_INTERVAL_MS 100 /* msecs */
17
18 #define EFA_ASYNC_QUEUE_DEPTH 16
19 #define EFA_ADMIN_QUEUE_DEPTH 32
20
21 #define EFA_CTRL_MAJOR 0
22 #define EFA_CTRL_MINOR 0
23 #define EFA_CTRL_SUB_MINOR 1
24
25 enum efa_cmd_status {
26 EFA_CMD_UNUSED,
27 EFA_CMD_ALLOCATED,
28 EFA_CMD_SUBMITTED,
29 EFA_CMD_COMPLETED,
30 };
31
32 struct efa_comp_ctx {
33 struct completion wait_event;
34 struct efa_admin_acq_entry *user_cqe;
35 u32 comp_size;
36 enum efa_cmd_status status;
37 u16 cmd_id;
38 u8 cmd_opcode;
39 };
40
efa_com_cmd_str(u8 cmd)41 static const char *efa_com_cmd_str(u8 cmd)
42 {
43 #define EFA_CMD_STR_CASE(_cmd) case EFA_ADMIN_##_cmd: return #_cmd
44
45 switch (cmd) {
46 EFA_CMD_STR_CASE(CREATE_QP);
47 EFA_CMD_STR_CASE(MODIFY_QP);
48 EFA_CMD_STR_CASE(QUERY_QP);
49 EFA_CMD_STR_CASE(DESTROY_QP);
50 EFA_CMD_STR_CASE(CREATE_AH);
51 EFA_CMD_STR_CASE(DESTROY_AH);
52 EFA_CMD_STR_CASE(REG_MR);
53 EFA_CMD_STR_CASE(DEREG_MR);
54 EFA_CMD_STR_CASE(CREATE_CQ);
55 EFA_CMD_STR_CASE(DESTROY_CQ);
56 EFA_CMD_STR_CASE(GET_FEATURE);
57 EFA_CMD_STR_CASE(SET_FEATURE);
58 EFA_CMD_STR_CASE(GET_STATS);
59 EFA_CMD_STR_CASE(ALLOC_PD);
60 EFA_CMD_STR_CASE(DEALLOC_PD);
61 EFA_CMD_STR_CASE(ALLOC_UAR);
62 EFA_CMD_STR_CASE(DEALLOC_UAR);
63 EFA_CMD_STR_CASE(CREATE_EQ);
64 EFA_CMD_STR_CASE(DESTROY_EQ);
65 default: return "unknown command opcode";
66 }
67 #undef EFA_CMD_STR_CASE
68 }
69
efa_com_set_dma_addr(dma_addr_t addr,u32 * addr_high,u32 * addr_low)70 void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low)
71 {
72 *addr_low = lower_32_bits(addr);
73 *addr_high = upper_32_bits(addr);
74 }
75
efa_com_reg_read32(struct efa_com_dev * edev,u16 offset)76 static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
77 {
78 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
79 struct efa_admin_mmio_req_read_less_resp *read_resp;
80 unsigned long exp_time;
81 u32 mmio_read_reg = 0;
82 u32 err;
83
84 read_resp = mmio_read->read_resp;
85
86 spin_lock(&mmio_read->lock);
87 mmio_read->seq_num++;
88
89 /* trash DMA req_id to identify when hardware is done */
90 read_resp->req_id = mmio_read->seq_num + 0x9aL;
91 EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REG_OFF, offset);
92 EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REQ_ID,
93 mmio_read->seq_num);
94
95 writel(mmio_read_reg, edev->reg_bar + EFA_REGS_MMIO_REG_READ_OFF);
96
97 exp_time = jiffies + usecs_to_jiffies(mmio_read->mmio_read_timeout);
98 do {
99 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
100 break;
101 udelay(1);
102 } while (time_is_after_jiffies(exp_time));
103
104 if (read_resp->req_id != mmio_read->seq_num) {
105 ibdev_err_ratelimited(
106 edev->efa_dev,
107 "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n",
108 mmio_read->seq_num, offset, read_resp->req_id,
109 read_resp->reg_off);
110 err = EFA_MMIO_READ_INVALID;
111 goto out;
112 }
113
114 if (read_resp->reg_off != offset) {
115 ibdev_err_ratelimited(
116 edev->efa_dev,
117 "Reading register failed: wrong offset provided\n");
118 err = EFA_MMIO_READ_INVALID;
119 goto out;
120 }
121
122 err = read_resp->reg_val;
123 out:
124 spin_unlock(&mmio_read->lock);
125 return err;
126 }
127
efa_com_admin_init_sq(struct efa_com_dev * edev)128 static int efa_com_admin_init_sq(struct efa_com_dev *edev)
129 {
130 struct efa_com_admin_queue *aq = &edev->aq;
131 struct efa_com_admin_sq *sq = &aq->sq;
132 u16 size = aq->depth * sizeof(*sq->entries);
133 u32 aq_caps = 0;
134 u32 addr_high;
135 u32 addr_low;
136
137 sq->entries =
138 dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL);
139 if (!sq->entries)
140 return -ENOMEM;
141
142 spin_lock_init(&sq->lock);
143
144 sq->cc = 0;
145 sq->pc = 0;
146 sq->phase = 1;
147
148 sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF);
149
150 addr_high = upper_32_bits(sq->dma_addr);
151 addr_low = lower_32_bits(sq->dma_addr);
152
153 writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
154 writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);
155
156 EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth);
157 EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE,
158 sizeof(struct efa_admin_aq_entry));
159
160 writel(aq_caps, edev->reg_bar + EFA_REGS_AQ_CAPS_OFF);
161
162 return 0;
163 }
164
efa_com_admin_init_cq(struct efa_com_dev * edev)165 static int efa_com_admin_init_cq(struct efa_com_dev *edev)
166 {
167 struct efa_com_admin_queue *aq = &edev->aq;
168 struct efa_com_admin_cq *cq = &aq->cq;
169 u16 size = aq->depth * sizeof(*cq->entries);
170 u32 acq_caps = 0;
171 u32 addr_high;
172 u32 addr_low;
173
174 cq->entries =
175 dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL);
176 if (!cq->entries)
177 return -ENOMEM;
178
179 spin_lock_init(&cq->lock);
180
181 cq->cc = 0;
182 cq->phase = 1;
183
184 addr_high = upper_32_bits(cq->dma_addr);
185 addr_low = lower_32_bits(cq->dma_addr);
186
187 writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
188 writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);
189
190 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth);
191 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE,
192 sizeof(struct efa_admin_acq_entry));
193 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR,
194 aq->msix_vector_idx);
195
196 writel(acq_caps, edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF);
197
198 return 0;
199 }
200
efa_com_admin_init_aenq(struct efa_com_dev * edev,struct efa_aenq_handlers * aenq_handlers)201 static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
202 struct efa_aenq_handlers *aenq_handlers)
203 {
204 struct efa_com_aenq *aenq = &edev->aenq;
205 u32 addr_low, addr_high;
206 u32 aenq_caps = 0;
207 u16 size;
208
209 if (!aenq_handlers) {
210 ibdev_err(edev->efa_dev, "aenq handlers pointer is NULL\n");
211 return -EINVAL;
212 }
213
214 size = EFA_ASYNC_QUEUE_DEPTH * sizeof(*aenq->entries);
215 aenq->entries = dma_alloc_coherent(edev->dmadev, size, &aenq->dma_addr,
216 GFP_KERNEL);
217 if (!aenq->entries)
218 return -ENOMEM;
219
220 aenq->aenq_handlers = aenq_handlers;
221 aenq->depth = EFA_ASYNC_QUEUE_DEPTH;
222 aenq->cc = 0;
223 aenq->phase = 1;
224
225 addr_low = lower_32_bits(aenq->dma_addr);
226 addr_high = upper_32_bits(aenq->dma_addr);
227
228 writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
229 writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);
230
231 EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_DEPTH, aenq->depth);
232 EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE,
233 sizeof(struct efa_admin_aenq_entry));
234 EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR,
235 aenq->msix_vector_idx);
236 writel(aenq_caps, edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF);
237
238 /*
239 * Init cons_db to mark that all entries in the queue
240 * are initially available
241 */
242 writel(edev->aenq.cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
243
244 return 0;
245 }
246
efa_com_alloc_ctx_id(struct efa_com_admin_queue * aq)247 static u16 efa_com_alloc_ctx_id(struct efa_com_admin_queue *aq)
248 {
249 u16 ctx_id;
250
251 spin_lock(&aq->comp_ctx_lock);
252 ctx_id = aq->comp_ctx_pool[aq->comp_ctx_pool_next];
253 aq->comp_ctx_pool_next++;
254 spin_unlock(&aq->comp_ctx_lock);
255
256 return ctx_id;
257 }
258
efa_com_dealloc_ctx_id(struct efa_com_admin_queue * aq,u16 ctx_id)259 static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq,
260 u16 ctx_id)
261 {
262 spin_lock(&aq->comp_ctx_lock);
263 aq->comp_ctx_pool_next--;
264 aq->comp_ctx_pool[aq->comp_ctx_pool_next] = ctx_id;
265 spin_unlock(&aq->comp_ctx_lock);
266 }
267
efa_com_alloc_comp_ctx(struct efa_com_admin_queue * aq)268 static struct efa_comp_ctx *efa_com_alloc_comp_ctx(struct efa_com_admin_queue *aq)
269 {
270 struct efa_comp_ctx *comp_ctx;
271 u16 ctx_id;
272
273 ctx_id = efa_com_alloc_ctx_id(aq);
274
275 comp_ctx = &aq->comp_ctx[ctx_id];
276 if (comp_ctx->status != EFA_CMD_UNUSED) {
277 efa_com_dealloc_ctx_id(aq, ctx_id);
278 ibdev_err_ratelimited(aq->efa_dev,
279 "Completion context[%u] is used[%u]\n",
280 ctx_id, comp_ctx->status);
281 return NULL;
282 }
283
284 comp_ctx->status = EFA_CMD_ALLOCATED;
285 ibdev_dbg(aq->efa_dev, "Take completion context[%u]\n", ctx_id);
286 return comp_ctx;
287 }
288
efa_com_get_comp_ctx_id(struct efa_com_admin_queue * aq,struct efa_comp_ctx * comp_ctx)289 static inline u16 efa_com_get_comp_ctx_id(struct efa_com_admin_queue *aq,
290 struct efa_comp_ctx *comp_ctx)
291 {
292 return comp_ctx - aq->comp_ctx;
293 }
294
efa_com_dealloc_comp_ctx(struct efa_com_admin_queue * aq,struct efa_comp_ctx * comp_ctx)295 static inline void efa_com_dealloc_comp_ctx(struct efa_com_admin_queue *aq,
296 struct efa_comp_ctx *comp_ctx)
297 {
298 u16 ctx_id = efa_com_get_comp_ctx_id(aq, comp_ctx);
299
300 ibdev_dbg(aq->efa_dev, "Put completion context[%u]\n", ctx_id);
301 comp_ctx->status = EFA_CMD_UNUSED;
302 efa_com_dealloc_ctx_id(aq, ctx_id);
303 }
304
efa_com_get_comp_ctx_by_cmd_id(struct efa_com_admin_queue * aq,u16 cmd_id)305 static inline struct efa_comp_ctx *efa_com_get_comp_ctx_by_cmd_id(struct efa_com_admin_queue *aq,
306 u16 cmd_id)
307 {
308 u16 ctx_id = cmd_id & (aq->depth - 1);
309
310 return &aq->comp_ctx[ctx_id];
311 }
312
__efa_com_submit_admin_cmd(struct efa_com_admin_queue * aq,struct efa_comp_ctx * comp_ctx,struct efa_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct efa_admin_acq_entry * comp,size_t comp_size_in_bytes)313 static void __efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
314 struct efa_comp_ctx *comp_ctx,
315 struct efa_admin_aq_entry *cmd,
316 size_t cmd_size_in_bytes,
317 struct efa_admin_acq_entry *comp,
318 size_t comp_size_in_bytes)
319 {
320 struct efa_admin_aq_entry *aqe;
321 u16 queue_size_mask;
322 u16 cmd_id;
323 u16 ctx_id;
324 u16 pi;
325
326 queue_size_mask = aq->depth - 1;
327 pi = aq->sq.pc & queue_size_mask;
328 ctx_id = efa_com_get_comp_ctx_id(aq, comp_ctx);
329
330 /* cmd_id LSBs are the ctx_id and MSBs are entropy bits from pc */
331 cmd_id = ctx_id & queue_size_mask;
332 cmd_id |= aq->sq.pc << ilog2(aq->depth);
333 cmd_id &= EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
334
335 cmd->aq_common_descriptor.command_id = cmd_id;
336 EFA_SET(&cmd->aq_common_descriptor.flags,
337 EFA_ADMIN_AQ_COMMON_DESC_PHASE, aq->sq.phase);
338
339 comp_ctx->status = EFA_CMD_SUBMITTED;
340 comp_ctx->comp_size = comp_size_in_bytes;
341 comp_ctx->user_cqe = comp;
342 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
343 comp_ctx->cmd_id = cmd_id;
344
345 reinit_completion(&comp_ctx->wait_event);
346
347 aqe = &aq->sq.entries[pi];
348 memset(aqe, 0, sizeof(*aqe));
349 memcpy(aqe, cmd, cmd_size_in_bytes);
350
351 aq->sq.pc++;
352 atomic64_inc(&aq->stats.submitted_cmd);
353
354 if ((aq->sq.pc & queue_size_mask) == 0)
355 aq->sq.phase = !aq->sq.phase;
356
357 /* barrier not needed in case of writel */
358 writel(aq->sq.pc, aq->sq.db_addr);
359 }
360
efa_com_init_comp_ctxt(struct efa_com_admin_queue * aq)361 static inline int efa_com_init_comp_ctxt(struct efa_com_admin_queue *aq)
362 {
363 size_t pool_size = aq->depth * sizeof(*aq->comp_ctx_pool);
364 size_t size = aq->depth * sizeof(struct efa_comp_ctx);
365 struct efa_comp_ctx *comp_ctx;
366 u16 i;
367
368 aq->comp_ctx = devm_kzalloc(aq->dmadev, size, GFP_KERNEL);
369 aq->comp_ctx_pool = devm_kzalloc(aq->dmadev, pool_size, GFP_KERNEL);
370 if (!aq->comp_ctx || !aq->comp_ctx_pool) {
371 devm_kfree(aq->dmadev, aq->comp_ctx_pool);
372 devm_kfree(aq->dmadev, aq->comp_ctx);
373 return -ENOMEM;
374 }
375
376 for (i = 0; i < aq->depth; i++) {
377 comp_ctx = &aq->comp_ctx[i];
378 comp_ctx->status = EFA_CMD_UNUSED;
379 init_completion(&comp_ctx->wait_event);
380
381 aq->comp_ctx_pool[i] = i;
382 }
383
384 spin_lock_init(&aq->comp_ctx_lock);
385
386 aq->comp_ctx_pool_next = 0;
387
388 return 0;
389 }
390
efa_com_submit_admin_cmd(struct efa_com_admin_queue * aq,struct efa_comp_ctx * comp_ctx,struct efa_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct efa_admin_acq_entry * comp,size_t comp_size_in_bytes)391 static int efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
392 struct efa_comp_ctx *comp_ctx,
393 struct efa_admin_aq_entry *cmd,
394 size_t cmd_size_in_bytes,
395 struct efa_admin_acq_entry *comp,
396 size_t comp_size_in_bytes)
397 {
398 spin_lock(&aq->sq.lock);
399 if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) {
400 ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n");
401 spin_unlock(&aq->sq.lock);
402 return -ENODEV;
403 }
404
405 __efa_com_submit_admin_cmd(aq, comp_ctx, cmd, cmd_size_in_bytes, comp,
406 comp_size_in_bytes);
407 spin_unlock(&aq->sq.lock);
408
409 return 0;
410 }
411
efa_com_handle_single_admin_completion(struct efa_com_admin_queue * aq,struct efa_admin_acq_entry * cqe)412 static int efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
413 struct efa_admin_acq_entry *cqe)
414 {
415 struct efa_comp_ctx *comp_ctx;
416 u16 cmd_id;
417
418 cmd_id = EFA_GET(&cqe->acq_common_descriptor.command,
419 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
420
421 comp_ctx = efa_com_get_comp_ctx_by_cmd_id(aq, cmd_id);
422 if (comp_ctx->status != EFA_CMD_SUBMITTED || comp_ctx->cmd_id != cmd_id) {
423 ibdev_err(aq->efa_dev,
424 "Received completion with unexpected command id[%x], status[%d] sq producer[%d], sq consumer[%d], cq consumer[%d]\n",
425 cmd_id, comp_ctx->status, aq->sq.pc, aq->sq.cc,
426 aq->cq.cc);
427 return -EINVAL;
428 }
429
430 comp_ctx->status = EFA_CMD_COMPLETED;
431 memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
432
433 if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
434 complete(&comp_ctx->wait_event);
435
436 return 0;
437 }
438
efa_com_handle_admin_completion(struct efa_com_admin_queue * aq)439 static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
440 {
441 struct efa_admin_acq_entry *cqe;
442 u16 queue_size_mask;
443 u16 comp_cmds = 0;
444 u8 phase;
445 int err;
446 u16 ci;
447
448 queue_size_mask = aq->depth - 1;
449
450 ci = aq->cq.cc & queue_size_mask;
451 phase = aq->cq.phase;
452
453 cqe = &aq->cq.entries[ci];
454
455 /* Go over all the completions */
456 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
457 EFA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
458 /*
459 * Do not read the rest of the completion entry before the
460 * phase bit was validated
461 */
462 dma_rmb();
463 err = efa_com_handle_single_admin_completion(aq, cqe);
464 if (!err)
465 comp_cmds++;
466
467 aq->cq.cc++;
468 ci++;
469 if (ci == aq->depth) {
470 ci = 0;
471 phase = !phase;
472 }
473
474 cqe = &aq->cq.entries[ci];
475 }
476
477 aq->cq.phase = phase;
478 aq->sq.cc += comp_cmds;
479 atomic64_add(comp_cmds, &aq->stats.completed_cmd);
480 }
481
efa_com_comp_status_to_errno(u8 comp_status)482 static int efa_com_comp_status_to_errno(u8 comp_status)
483 {
484 switch (comp_status) {
485 case EFA_ADMIN_SUCCESS:
486 return 0;
487 case EFA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
488 return -ENOMEM;
489 case EFA_ADMIN_UNSUPPORTED_OPCODE:
490 return -EOPNOTSUPP;
491 case EFA_ADMIN_BAD_OPCODE:
492 case EFA_ADMIN_MALFORMED_REQUEST:
493 case EFA_ADMIN_ILLEGAL_PARAMETER:
494 case EFA_ADMIN_UNKNOWN_ERROR:
495 return -EINVAL;
496 default:
497 return -EINVAL;
498 }
499 }
500
efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx * comp_ctx,struct efa_com_admin_queue * aq)501 static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_ctx,
502 struct efa_com_admin_queue *aq)
503 {
504 unsigned long timeout;
505 unsigned long flags;
506
507 timeout = jiffies + usecs_to_jiffies(aq->completion_timeout);
508
509 while (1) {
510 spin_lock_irqsave(&aq->cq.lock, flags);
511 efa_com_handle_admin_completion(aq);
512 spin_unlock_irqrestore(&aq->cq.lock, flags);
513
514 if (comp_ctx->status != EFA_CMD_SUBMITTED)
515 break;
516
517 if (time_is_before_jiffies(timeout)) {
518 ibdev_err_ratelimited(
519 aq->efa_dev,
520 "Wait for completion (polling) timeout\n");
521 /* EFA didn't have any completion */
522 atomic64_inc(&aq->stats.no_completion);
523
524 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
525 return -ETIME;
526 }
527
528 msleep(aq->poll_interval);
529 }
530
531 return efa_com_comp_status_to_errno(
532 comp_ctx->user_cqe->acq_common_descriptor.status);
533 }
534
efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx * comp_ctx,struct efa_com_admin_queue * aq)535 static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *comp_ctx,
536 struct efa_com_admin_queue *aq)
537 {
538 unsigned long flags;
539
540 wait_for_completion_timeout(&comp_ctx->wait_event,
541 usecs_to_jiffies(aq->completion_timeout));
542
543 /*
544 * In case the command wasn't completed find out the root cause.
545 * There might be 2 kinds of errors
546 * 1) No completion (timeout reached)
547 * 2) There is completion but the device didn't get any msi-x interrupt.
548 */
549 if (comp_ctx->status == EFA_CMD_SUBMITTED) {
550 spin_lock_irqsave(&aq->cq.lock, flags);
551 efa_com_handle_admin_completion(aq);
552 spin_unlock_irqrestore(&aq->cq.lock, flags);
553
554 atomic64_inc(&aq->stats.no_completion);
555
556 if (comp_ctx->status == EFA_CMD_COMPLETED)
557 ibdev_err_ratelimited(
558 aq->efa_dev,
559 "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
560 efa_com_cmd_str(comp_ctx->cmd_opcode),
561 comp_ctx->cmd_opcode, comp_ctx->status,
562 comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
563 aq->cq.cc);
564 else
565 ibdev_err_ratelimited(
566 aq->efa_dev,
567 "The device didn't send any completion for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
568 efa_com_cmd_str(comp_ctx->cmd_opcode),
569 comp_ctx->cmd_opcode, comp_ctx->status,
570 comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
571 aq->cq.cc);
572
573 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
574 return -ETIME;
575 }
576
577 return efa_com_comp_status_to_errno(
578 comp_ctx->user_cqe->acq_common_descriptor.status);
579 }
580
581 /*
582 * There are two types to wait for completion.
583 * Polling mode - wait until the completion is available.
584 * Async mode - wait on wait queue until the completion is ready
585 * (or the timeout expired).
586 * It is expected that the IRQ called efa_com_handle_admin_completion
587 * to mark the completions.
588 */
efa_com_wait_and_process_admin_cq(struct efa_comp_ctx * comp_ctx,struct efa_com_admin_queue * aq)589 static int efa_com_wait_and_process_admin_cq(struct efa_comp_ctx *comp_ctx,
590 struct efa_com_admin_queue *aq)
591 {
592 if (test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
593 return efa_com_wait_and_process_admin_cq_polling(comp_ctx, aq);
594
595 return efa_com_wait_and_process_admin_cq_interrupts(comp_ctx, aq);
596 }
597
598 /**
599 * efa_com_cmd_exec - Execute admin command
600 * @aq: admin queue.
601 * @cmd: the admin command to execute.
602 * @cmd_size: the command size.
603 * @comp: command completion return entry.
604 * @comp_size: command completion size.
605 * Submit an admin command and then wait until the device will return a
606 * completion.
607 * The completion will be copied into comp.
608 *
609 * @return - 0 on success, negative value on failure.
610 */
efa_com_cmd_exec(struct efa_com_admin_queue * aq,struct efa_admin_aq_entry * cmd,size_t cmd_size,struct efa_admin_acq_entry * comp,size_t comp_size)611 int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
612 struct efa_admin_aq_entry *cmd,
613 size_t cmd_size,
614 struct efa_admin_acq_entry *comp,
615 size_t comp_size)
616 {
617 struct efa_comp_ctx *comp_ctx;
618 int err;
619
620 might_sleep();
621
622 /* In case of queue FULL */
623 down(&aq->avail_cmds);
624
625 ibdev_dbg(aq->efa_dev, "%s (opcode %d)\n",
626 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
627 cmd->aq_common_descriptor.opcode);
628
629 comp_ctx = efa_com_alloc_comp_ctx(aq);
630 if (!comp_ctx) {
631 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
632 up(&aq->avail_cmds);
633 return -EINVAL;
634 }
635
636 err = efa_com_submit_admin_cmd(aq, comp_ctx, cmd, cmd_size, comp, comp_size);
637 if (err) {
638 ibdev_err_ratelimited(
639 aq->efa_dev,
640 "Failed to submit command %s (opcode %u) err %d\n",
641 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
642 cmd->aq_common_descriptor.opcode, err);
643
644 efa_com_dealloc_comp_ctx(aq, comp_ctx);
645 up(&aq->avail_cmds);
646 atomic64_inc(&aq->stats.cmd_err);
647 return err;
648 }
649
650 err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
651 if (err) {
652 ibdev_err_ratelimited(
653 aq->efa_dev,
654 "Failed to process command %s (opcode %u) err %d\n",
655 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
656 cmd->aq_common_descriptor.opcode, err);
657 atomic64_inc(&aq->stats.cmd_err);
658 }
659
660 efa_com_dealloc_comp_ctx(aq, comp_ctx);
661 up(&aq->avail_cmds);
662
663 return err;
664 }
665
666 /**
667 * efa_com_admin_destroy - Destroy the admin and the async events queues.
668 * @edev: EFA communication layer struct
669 */
efa_com_admin_destroy(struct efa_com_dev * edev)670 void efa_com_admin_destroy(struct efa_com_dev *edev)
671 {
672 struct efa_com_admin_queue *aq = &edev->aq;
673 struct efa_com_aenq *aenq = &edev->aenq;
674 struct efa_com_admin_cq *cq = &aq->cq;
675 struct efa_com_admin_sq *sq = &aq->sq;
676 u16 size;
677
678 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
679
680 devm_kfree(edev->dmadev, aq->comp_ctx_pool);
681 devm_kfree(edev->dmadev, aq->comp_ctx);
682
683 size = aq->depth * sizeof(*sq->entries);
684 dma_free_coherent(edev->dmadev, size, sq->entries, sq->dma_addr);
685
686 size = aq->depth * sizeof(*cq->entries);
687 dma_free_coherent(edev->dmadev, size, cq->entries, cq->dma_addr);
688
689 size = aenq->depth * sizeof(*aenq->entries);
690 dma_free_coherent(edev->dmadev, size, aenq->entries, aenq->dma_addr);
691 }
692
693 /**
694 * efa_com_set_admin_polling_mode - Set the admin completion queue polling mode
695 * @edev: EFA communication layer struct
696 * @polling: Enable/Disable polling mode
697 *
698 * Set the admin completion mode.
699 */
efa_com_set_admin_polling_mode(struct efa_com_dev * edev,bool polling)700 void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling)
701 {
702 u32 mask_value = 0;
703
704 if (polling)
705 EFA_SET(&mask_value, EFA_REGS_INTR_MASK_EN, 1);
706
707 writel(mask_value, edev->reg_bar + EFA_REGS_INTR_MASK_OFF);
708 if (polling)
709 set_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
710 else
711 clear_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
712 }
713
efa_com_stats_init(struct efa_com_dev * edev)714 static void efa_com_stats_init(struct efa_com_dev *edev)
715 {
716 atomic64_t *s = (atomic64_t *)&edev->aq.stats;
717 int i;
718
719 for (i = 0; i < sizeof(edev->aq.stats) / sizeof(*s); i++, s++)
720 atomic64_set(s, 0);
721 }
722
723 /**
724 * efa_com_admin_init - Init the admin and the async queues
725 * @edev: EFA communication layer struct
726 * @aenq_handlers: Those handlers to be called upon event.
727 *
728 * Initialize the admin submission and completion queues.
729 * Initialize the asynchronous events notification queues.
730 *
731 * @return - 0 on success, negative value on failure.
732 */
efa_com_admin_init(struct efa_com_dev * edev,struct efa_aenq_handlers * aenq_handlers)733 int efa_com_admin_init(struct efa_com_dev *edev,
734 struct efa_aenq_handlers *aenq_handlers)
735 {
736 struct efa_com_admin_queue *aq = &edev->aq;
737 u32 timeout;
738 u32 dev_sts;
739 u32 cap;
740 int err;
741
742 dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
743 if (!EFA_GET(&dev_sts, EFA_REGS_DEV_STS_READY)) {
744 ibdev_err(edev->efa_dev,
745 "Device isn't ready, abort com init %#x\n", dev_sts);
746 return -ENODEV;
747 }
748
749 aq->depth = EFA_ADMIN_QUEUE_DEPTH;
750
751 aq->dmadev = edev->dmadev;
752 aq->efa_dev = edev->efa_dev;
753 set_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state);
754
755 sema_init(&aq->avail_cmds, aq->depth);
756
757 efa_com_stats_init(edev);
758
759 err = efa_com_init_comp_ctxt(aq);
760 if (err)
761 return err;
762
763 err = efa_com_admin_init_sq(edev);
764 if (err)
765 goto err_destroy_comp_ctxt;
766
767 err = efa_com_admin_init_cq(edev);
768 if (err)
769 goto err_destroy_sq;
770
771 efa_com_set_admin_polling_mode(edev, false);
772
773 err = efa_com_admin_init_aenq(edev, aenq_handlers);
774 if (err)
775 goto err_destroy_cq;
776
777 cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
778 timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
779 if (timeout)
780 /* the resolution of timeout reg is 100ms */
781 aq->completion_timeout = timeout * 100000;
782 else
783 aq->completion_timeout = ADMIN_CMD_TIMEOUT_US;
784
785 aq->poll_interval = EFA_POLL_INTERVAL_MS;
786
787 set_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
788
789 return 0;
790
791 err_destroy_cq:
792 dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->cq.entries),
793 aq->cq.entries, aq->cq.dma_addr);
794 err_destroy_sq:
795 dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->sq.entries),
796 aq->sq.entries, aq->sq.dma_addr);
797 err_destroy_comp_ctxt:
798 devm_kfree(edev->dmadev, aq->comp_ctx);
799
800 return err;
801 }
802
803 /**
804 * efa_com_admin_q_comp_intr_handler - admin queue interrupt handler
805 * @edev: EFA communication layer struct
806 *
807 * This method goes over the admin completion queue and wakes up
808 * all the pending threads that wait on the commands wait event.
809 *
810 * Note: Should be called after MSI-X interrupt.
811 */
efa_com_admin_q_comp_intr_handler(struct efa_com_dev * edev)812 void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev)
813 {
814 unsigned long flags;
815
816 spin_lock_irqsave(&edev->aq.cq.lock, flags);
817 efa_com_handle_admin_completion(&edev->aq);
818 spin_unlock_irqrestore(&edev->aq.cq.lock, flags);
819 }
820
821 /*
822 * efa_handle_specific_aenq_event:
823 * return the handler that is relevant to the specific event group
824 */
efa_com_get_specific_aenq_cb(struct efa_com_dev * edev,u16 group)825 static efa_aenq_handler efa_com_get_specific_aenq_cb(struct efa_com_dev *edev,
826 u16 group)
827 {
828 struct efa_aenq_handlers *aenq_handlers = edev->aenq.aenq_handlers;
829
830 if (group < EFA_MAX_HANDLERS && aenq_handlers->handlers[group])
831 return aenq_handlers->handlers[group];
832
833 return aenq_handlers->unimplemented_handler;
834 }
835
836 /**
837 * efa_com_aenq_intr_handler - AENQ interrupt handler
838 * @edev: EFA communication layer struct
839 * @data: Data of interrupt handler.
840 *
841 * Go over the async event notification queue and call the proper aenq handler.
842 */
efa_com_aenq_intr_handler(struct efa_com_dev * edev,void * data)843 void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data)
844 {
845 struct efa_admin_aenq_common_desc *aenq_common;
846 struct efa_com_aenq *aenq = &edev->aenq;
847 struct efa_admin_aenq_entry *aenq_e;
848 efa_aenq_handler handler_cb;
849 u32 processed = 0;
850 u8 phase;
851 u32 ci;
852
853 ci = aenq->cc & (aenq->depth - 1);
854 phase = aenq->phase;
855 aenq_e = &aenq->entries[ci]; /* Get first entry */
856 aenq_common = &aenq_e->aenq_common_desc;
857
858 /* Go over all the events */
859 while ((READ_ONCE(aenq_common->flags) &
860 EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
861 /*
862 * Do not read the rest of the completion entry before the
863 * phase bit was validated
864 */
865 dma_rmb();
866
867 /* Handle specific event*/
868 handler_cb = efa_com_get_specific_aenq_cb(edev,
869 aenq_common->group);
870 handler_cb(data, aenq_e); /* call the actual event handler*/
871
872 /* Get next event entry */
873 ci++;
874 processed++;
875
876 if (ci == aenq->depth) {
877 ci = 0;
878 phase = !phase;
879 }
880 aenq_e = &aenq->entries[ci];
881 aenq_common = &aenq_e->aenq_common_desc;
882 }
883
884 aenq->cc += processed;
885 aenq->phase = phase;
886
887 /* Don't update aenq doorbell if there weren't any processed events */
888 if (!processed)
889 return;
890
891 /* barrier not needed in case of writel */
892 writel(aenq->cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
893 }
894
efa_com_mmio_reg_read_resp_addr_init(struct efa_com_dev * edev)895 static void efa_com_mmio_reg_read_resp_addr_init(struct efa_com_dev *edev)
896 {
897 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
898 u32 addr_high;
899 u32 addr_low;
900
901 /* dma_addr_bits is unknown at this point */
902 addr_high = (mmio_read->read_resp_dma_addr >> 32) & GENMASK(31, 0);
903 addr_low = mmio_read->read_resp_dma_addr & GENMASK(31, 0);
904
905 writel(addr_high, edev->reg_bar + EFA_REGS_MMIO_RESP_HI_OFF);
906 writel(addr_low, edev->reg_bar + EFA_REGS_MMIO_RESP_LO_OFF);
907 }
908
efa_com_mmio_reg_read_init(struct efa_com_dev * edev)909 int efa_com_mmio_reg_read_init(struct efa_com_dev *edev)
910 {
911 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
912
913 spin_lock_init(&mmio_read->lock);
914 mmio_read->read_resp =
915 dma_alloc_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
916 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
917 if (!mmio_read->read_resp)
918 return -ENOMEM;
919
920 efa_com_mmio_reg_read_resp_addr_init(edev);
921
922 mmio_read->read_resp->req_id = 0;
923 mmio_read->seq_num = 0;
924 mmio_read->mmio_read_timeout = EFA_REG_READ_TIMEOUT_US;
925
926 return 0;
927 }
928
efa_com_mmio_reg_read_destroy(struct efa_com_dev * edev)929 void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev)
930 {
931 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
932
933 dma_free_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
934 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
935 }
936
efa_com_validate_version(struct efa_com_dev * edev)937 int efa_com_validate_version(struct efa_com_dev *edev)
938 {
939 u32 min_ctrl_ver = 0;
940 u32 ctrl_ver_masked;
941 u32 min_ver = 0;
942 u32 ctrl_ver;
943 u32 ver;
944
945 /*
946 * Make sure the EFA version and the controller version are at least
947 * as the driver expects
948 */
949 ver = efa_com_reg_read32(edev, EFA_REGS_VERSION_OFF);
950 ctrl_ver = efa_com_reg_read32(edev,
951 EFA_REGS_CONTROLLER_VERSION_OFF);
952
953 ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n",
954 EFA_GET(&ver, EFA_REGS_VERSION_MAJOR_VERSION),
955 EFA_GET(&ver, EFA_REGS_VERSION_MINOR_VERSION));
956
957 EFA_SET(&min_ver, EFA_REGS_VERSION_MAJOR_VERSION,
958 EFA_ADMIN_API_VERSION_MAJOR);
959 EFA_SET(&min_ver, EFA_REGS_VERSION_MINOR_VERSION,
960 EFA_ADMIN_API_VERSION_MINOR);
961 if (ver < min_ver) {
962 ibdev_err(edev->efa_dev,
963 "EFA version is lower than the minimal version the driver supports\n");
964 return -EOPNOTSUPP;
965 }
966
967 ibdev_dbg(
968 edev->efa_dev,
969 "efa controller version: %d.%d.%d implementation version %d\n",
970 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION),
971 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION),
972 EFA_GET(&ctrl_ver,
973 EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION),
974 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_IMPL_ID));
975
976 ctrl_ver_masked =
977 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION) |
978 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION) |
979 EFA_GET(&ctrl_ver,
980 EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION);
981
982 EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION,
983 EFA_CTRL_MAJOR);
984 EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION,
985 EFA_CTRL_MINOR);
986 EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION,
987 EFA_CTRL_SUB_MINOR);
988 /* Validate the ctrl version without the implementation ID */
989 if (ctrl_ver_masked < min_ctrl_ver) {
990 ibdev_err(edev->efa_dev,
991 "EFA ctrl version is lower than the minimal ctrl version the driver supports\n");
992 return -EOPNOTSUPP;
993 }
994
995 return 0;
996 }
997
998 /**
999 * efa_com_get_dma_width - Retrieve physical dma address width the device
1000 * supports.
1001 * @edev: EFA communication layer struct
1002 *
1003 * Retrieve the maximum physical address bits the device can handle.
1004 *
1005 * @return: > 0 on Success and negative value otherwise.
1006 */
efa_com_get_dma_width(struct efa_com_dev * edev)1007 int efa_com_get_dma_width(struct efa_com_dev *edev)
1008 {
1009 u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
1010 int width;
1011
1012 width = EFA_GET(&caps, EFA_REGS_CAPS_DMA_ADDR_WIDTH);
1013
1014 ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width);
1015
1016 if (width < 32 || width > 64) {
1017 ibdev_err(edev->efa_dev, "DMA width illegal value: %d\n", width);
1018 return -EINVAL;
1019 }
1020
1021 edev->dma_addr_bits = width;
1022
1023 return width;
1024 }
1025
wait_for_reset_state(struct efa_com_dev * edev,u32 timeout,int on)1026 static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, int on)
1027 {
1028 u32 val, i;
1029
1030 for (i = 0; i < timeout; i++) {
1031 val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
1032
1033 if (EFA_GET(&val, EFA_REGS_DEV_STS_RESET_IN_PROGRESS) == on)
1034 return 0;
1035
1036 ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val);
1037 msleep(EFA_POLL_INTERVAL_MS);
1038 }
1039
1040 return -ETIME;
1041 }
1042
1043 /**
1044 * efa_com_dev_reset - Perform device FLR to the device.
1045 * @edev: EFA communication layer struct
1046 * @reset_reason: Specify what is the trigger for the reset in case of an error.
1047 *
1048 * @return - 0 on success, negative value on failure.
1049 */
efa_com_dev_reset(struct efa_com_dev * edev,enum efa_regs_reset_reason_types reset_reason)1050 int efa_com_dev_reset(struct efa_com_dev *edev,
1051 enum efa_regs_reset_reason_types reset_reason)
1052 {
1053 u32 stat, timeout, cap;
1054 u32 reset_val = 0;
1055 int err;
1056
1057 stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
1058 cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
1059
1060 if (!EFA_GET(&stat, EFA_REGS_DEV_STS_READY)) {
1061 ibdev_err(edev->efa_dev,
1062 "Device isn't ready, can't reset device\n");
1063 return -EINVAL;
1064 }
1065
1066 timeout = EFA_GET(&cap, EFA_REGS_CAPS_RESET_TIMEOUT);
1067 if (!timeout) {
1068 ibdev_err(edev->efa_dev, "Invalid timeout value\n");
1069 return -EINVAL;
1070 }
1071
1072 /* start reset */
1073 EFA_SET(&reset_val, EFA_REGS_DEV_CTL_DEV_RESET, 1);
1074 EFA_SET(&reset_val, EFA_REGS_DEV_CTL_RESET_REASON, reset_reason);
1075 writel(reset_val, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
1076
1077 /* reset clears the mmio readless address, restore it */
1078 efa_com_mmio_reg_read_resp_addr_init(edev);
1079
1080 err = wait_for_reset_state(edev, timeout, 1);
1081 if (err) {
1082 ibdev_err(edev->efa_dev, "Reset indication didn't turn on\n");
1083 return err;
1084 }
1085
1086 /* reset done */
1087 writel(0, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
1088 err = wait_for_reset_state(edev, timeout, 0);
1089 if (err) {
1090 ibdev_err(edev->efa_dev, "Reset indication didn't turn off\n");
1091 return err;
1092 }
1093
1094 timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
1095 if (timeout)
1096 /* the resolution of timeout reg is 100ms */
1097 edev->aq.completion_timeout = timeout * 100000;
1098 else
1099 edev->aq.completion_timeout = ADMIN_CMD_TIMEOUT_US;
1100
1101 return 0;
1102 }
1103
efa_com_create_eq(struct efa_com_dev * edev,struct efa_com_create_eq_params * params,struct efa_com_create_eq_result * result)1104 static int efa_com_create_eq(struct efa_com_dev *edev,
1105 struct efa_com_create_eq_params *params,
1106 struct efa_com_create_eq_result *result)
1107 {
1108 struct efa_com_admin_queue *aq = &edev->aq;
1109 struct efa_admin_create_eq_resp resp = {};
1110 struct efa_admin_create_eq_cmd cmd = {};
1111 int err;
1112
1113 cmd.aq_common_descriptor.opcode = EFA_ADMIN_CREATE_EQ;
1114 EFA_SET(&cmd.caps, EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS,
1115 params->entry_size_in_bytes / 4);
1116 cmd.depth = params->depth;
1117 cmd.event_bitmask = params->event_bitmask;
1118 cmd.msix_vec = params->msix_vec;
1119
1120 efa_com_set_dma_addr(params->dma_addr, &cmd.ba.mem_addr_high,
1121 &cmd.ba.mem_addr_low);
1122
1123 err = efa_com_cmd_exec(aq,
1124 (struct efa_admin_aq_entry *)&cmd,
1125 sizeof(cmd),
1126 (struct efa_admin_acq_entry *)&resp,
1127 sizeof(resp));
1128 if (err) {
1129 ibdev_err_ratelimited(edev->efa_dev,
1130 "Failed to create eq[%d]\n", err);
1131 return err;
1132 }
1133
1134 result->eqn = resp.eqn;
1135
1136 return 0;
1137 }
1138
efa_com_destroy_eq(struct efa_com_dev * edev,struct efa_com_destroy_eq_params * params)1139 static void efa_com_destroy_eq(struct efa_com_dev *edev,
1140 struct efa_com_destroy_eq_params *params)
1141 {
1142 struct efa_com_admin_queue *aq = &edev->aq;
1143 struct efa_admin_destroy_eq_resp resp = {};
1144 struct efa_admin_destroy_eq_cmd cmd = {};
1145 int err;
1146
1147 cmd.aq_common_descriptor.opcode = EFA_ADMIN_DESTROY_EQ;
1148 cmd.eqn = params->eqn;
1149
1150 err = efa_com_cmd_exec(aq,
1151 (struct efa_admin_aq_entry *)&cmd,
1152 sizeof(cmd),
1153 (struct efa_admin_acq_entry *)&resp,
1154 sizeof(resp));
1155 if (err)
1156 ibdev_err_ratelimited(edev->efa_dev,
1157 "Failed to destroy EQ-%u [%d]\n", cmd.eqn,
1158 err);
1159 }
1160
efa_com_arm_eq(struct efa_com_dev * edev,struct efa_com_eq * eeq)1161 static void efa_com_arm_eq(struct efa_com_dev *edev, struct efa_com_eq *eeq)
1162 {
1163 u32 val = 0;
1164
1165 EFA_SET(&val, EFA_REGS_EQ_DB_EQN, eeq->eqn);
1166 EFA_SET(&val, EFA_REGS_EQ_DB_ARM, 1);
1167
1168 writel(val, edev->reg_bar + EFA_REGS_EQ_DB_OFF);
1169 }
1170
efa_com_eq_comp_intr_handler(struct efa_com_dev * edev,struct efa_com_eq * eeq)1171 void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev,
1172 struct efa_com_eq *eeq)
1173 {
1174 struct efa_admin_eqe *eqe;
1175 u32 processed = 0;
1176 u8 phase;
1177 u32 ci;
1178
1179 ci = eeq->cc & (eeq->depth - 1);
1180 phase = eeq->phase;
1181 eqe = &eeq->eqes[ci];
1182
1183 /* Go over all the events */
1184 while ((READ_ONCE(eqe->common) & EFA_ADMIN_EQE_PHASE_MASK) == phase) {
1185 /*
1186 * Do not read the rest of the completion entry before the
1187 * phase bit was validated
1188 */
1189 dma_rmb();
1190
1191 eeq->cb(eeq, eqe);
1192
1193 /* Get next event entry */
1194 ci++;
1195 processed++;
1196
1197 if (ci == eeq->depth) {
1198 ci = 0;
1199 phase = !phase;
1200 }
1201
1202 eqe = &eeq->eqes[ci];
1203 }
1204
1205 eeq->cc += processed;
1206 eeq->phase = phase;
1207 efa_com_arm_eq(eeq->edev, eeq);
1208 }
1209
efa_com_eq_destroy(struct efa_com_dev * edev,struct efa_com_eq * eeq)1210 void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq)
1211 {
1212 struct efa_com_destroy_eq_params params = {
1213 .eqn = eeq->eqn,
1214 };
1215
1216 efa_com_destroy_eq(edev, ¶ms);
1217 dma_free_coherent(edev->dmadev, eeq->depth * sizeof(*eeq->eqes),
1218 eeq->eqes, eeq->dma_addr);
1219 }
1220
efa_com_eq_init(struct efa_com_dev * edev,struct efa_com_eq * eeq,efa_eqe_handler cb,u16 depth,u8 msix_vec)1221 int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq,
1222 efa_eqe_handler cb, u16 depth, u8 msix_vec)
1223 {
1224 struct efa_com_create_eq_params params = {};
1225 struct efa_com_create_eq_result result = {};
1226 int err;
1227
1228 params.depth = depth;
1229 params.entry_size_in_bytes = sizeof(*eeq->eqes);
1230 EFA_SET(¶ms.event_bitmask,
1231 EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS, 1);
1232 params.msix_vec = msix_vec;
1233
1234 eeq->eqes = dma_alloc_coherent(edev->dmadev,
1235 params.depth * sizeof(*eeq->eqes),
1236 ¶ms.dma_addr, GFP_KERNEL);
1237 if (!eeq->eqes)
1238 return -ENOMEM;
1239
1240 err = efa_com_create_eq(edev, ¶ms, &result);
1241 if (err)
1242 goto err_free_coherent;
1243
1244 eeq->eqn = result.eqn;
1245 eeq->edev = edev;
1246 eeq->dma_addr = params.dma_addr;
1247 eeq->phase = 1;
1248 eeq->depth = params.depth;
1249 eeq->cb = cb;
1250 efa_com_arm_eq(edev, eeq);
1251
1252 return 0;
1253
1254 err_free_coherent:
1255 dma_free_coherent(edev->dmadev, params.depth * sizeof(*eeq->eqes),
1256 eeq->eqes, params.dma_addr);
1257 return err;
1258 }
1259