xref: /linux/drivers/infiniband/hw/ionic/ionic_admin.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
3 
4 #include <linux/interrupt.h>
5 #include <linux/module.h>
6 #include <linux/printk.h>
7 
8 #include "ionic_fw.h"
9 #include "ionic_ibdev.h"
10 
11 #define IONIC_EQ_COUNT_MIN	4
12 #define IONIC_AQ_COUNT_MIN	1
13 
14 /* not a valid queue position or negative error status */
15 #define IONIC_ADMIN_POSTED	0x10000
16 
17 /* cpu can be held with irq disabled for COUNT * MS  (for create/destroy_ah) */
18 #define IONIC_ADMIN_BUSY_RETRY_COUNT	2000
19 #define IONIC_ADMIN_BUSY_RETRY_MS	1
20 
21 /* admin queue will be considered failed if a command takes longer */
22 #define IONIC_ADMIN_TIMEOUT	(HZ * 2)
23 #define IONIC_ADMIN_WARN	(HZ / 8)
24 
25 /* will poll for admin cq to tolerate and report from missed event */
26 #define IONIC_ADMIN_DELAY	(HZ / 8)
27 
28 /* work queue for polling the event queue and admin cq */
29 struct workqueue_struct *ionic_evt_workq;
30 
31 static void ionic_admin_timedout(struct ionic_aq *aq)
32 {
33 	struct ionic_ibdev *dev = aq->dev;
34 	unsigned long irqflags;
35 	u16 pos;
36 
37 	spin_lock_irqsave(&aq->lock, irqflags);
38 	if (ionic_queue_empty(&aq->q))
39 		goto out;
40 
41 	/* Reset ALL adminq if any one times out */
42 	if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED)
43 		queue_work(ionic_evt_workq, &dev->reset_work);
44 
45 	ibdev_err(&dev->ibdev, "admin command timed out, aq %d after: %ums\n",
46 		  aq->aqid, (u32)jiffies_to_msecs(jiffies - aq->stamp));
47 
48 	pos = (aq->q.prod - 1) & aq->q.mask;
49 	if (pos == aq->q.cons)
50 		goto out;
51 
52 	ibdev_warn(&dev->ibdev, "admin pos %u (last posted)\n", pos);
53 	print_hex_dump(KERN_WARNING, "cmd ", DUMP_PREFIX_OFFSET, 16, 1,
54 		       ionic_queue_at(&aq->q, pos),
55 		       BIT(aq->q.stride_log2), true);
56 
57 out:
58 	spin_unlock_irqrestore(&aq->lock, irqflags);
59 }
60 
61 static void ionic_admin_reset_dwork(struct ionic_ibdev *dev)
62 {
63 	if (atomic_read(&dev->admin_state) == IONIC_ADMIN_KILLED)
64 		return;
65 
66 	queue_delayed_work(ionic_evt_workq, &dev->admin_dwork,
67 			   IONIC_ADMIN_DELAY);
68 }
69 
70 static void ionic_admin_reset_wdog(struct ionic_aq *aq)
71 {
72 	if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED)
73 		return;
74 
75 	aq->stamp = jiffies;
76 	ionic_admin_reset_dwork(aq->dev);
77 }
78 
79 static bool ionic_admin_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq,
80 				 struct ionic_v1_cqe **cqe)
81 {
82 	struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
83 
84 	if (unlikely(cq->color != ionic_v1_cqe_color(qcqe)))
85 		return false;
86 
87 	/* Prevent out-of-order reads of the CQE */
88 	dma_rmb();
89 	*cqe = qcqe;
90 
91 	return true;
92 }
93 
94 static void ionic_admin_poll_locked(struct ionic_aq *aq)
95 {
96 	struct ionic_cq *cq = &aq->vcq->cq[0];
97 	struct ionic_admin_wr *wr, *wr_next;
98 	struct ionic_ibdev *dev = aq->dev;
99 	u32 wr_strides, avlbl_strides;
100 	struct ionic_v1_cqe *cqe;
101 	u32 qtf, qid;
102 	u16 old_prod;
103 	u8 type;
104 
105 	lockdep_assert_held(&aq->lock);
106 
107 	if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED) {
108 		list_for_each_entry_safe(wr, wr_next, &aq->wr_prod, aq_ent) {
109 			INIT_LIST_HEAD(&wr->aq_ent);
110 			aq->q_wr[wr->status].wr = NULL;
111 			wr->status = atomic_read(&aq->admin_state);
112 			complete_all(&wr->work);
113 		}
114 		INIT_LIST_HEAD(&aq->wr_prod);
115 
116 		list_for_each_entry_safe(wr, wr_next, &aq->wr_post, aq_ent) {
117 			INIT_LIST_HEAD(&wr->aq_ent);
118 			wr->status = atomic_read(&aq->admin_state);
119 			complete_all(&wr->work);
120 		}
121 		INIT_LIST_HEAD(&aq->wr_post);
122 
123 		return;
124 	}
125 
126 	old_prod = cq->q.prod;
127 
128 	while (ionic_admin_next_cqe(dev, cq, &cqe)) {
129 		qtf = ionic_v1_cqe_qtf(cqe);
130 		qid = ionic_v1_cqe_qtf_qid(qtf);
131 		type = ionic_v1_cqe_qtf_type(qtf);
132 
133 		if (unlikely(type != IONIC_V1_CQE_TYPE_ADMIN)) {
134 			ibdev_warn_ratelimited(&dev->ibdev,
135 					       "bad cqe type %u\n", type);
136 			goto cq_next;
137 		}
138 
139 		if (unlikely(qid != aq->aqid)) {
140 			ibdev_warn_ratelimited(&dev->ibdev,
141 					       "bad cqe qid %u\n", qid);
142 			goto cq_next;
143 		}
144 
145 		if (unlikely(be16_to_cpu(cqe->admin.cmd_idx) != aq->q.cons)) {
146 			ibdev_warn_ratelimited(&dev->ibdev,
147 					       "bad idx %u cons %u qid %u\n",
148 					       be16_to_cpu(cqe->admin.cmd_idx),
149 					       aq->q.cons, qid);
150 			goto cq_next;
151 		}
152 
153 		if (unlikely(ionic_queue_empty(&aq->q))) {
154 			ibdev_warn_ratelimited(&dev->ibdev,
155 					       "bad cqe for empty adminq\n");
156 			goto cq_next;
157 		}
158 
159 		wr = aq->q_wr[aq->q.cons].wr;
160 		if (wr) {
161 			aq->q_wr[aq->q.cons].wr = NULL;
162 			list_del_init(&wr->aq_ent);
163 
164 			wr->cqe = *cqe;
165 			wr->status = atomic_read(&aq->admin_state);
166 			complete_all(&wr->work);
167 		}
168 
169 		ionic_queue_consume_entries(&aq->q,
170 					    aq->q_wr[aq->q.cons].wqe_strides);
171 
172 cq_next:
173 		ionic_queue_produce(&cq->q);
174 		cq->color = ionic_color_wrap(cq->q.prod, cq->color);
175 	}
176 
177 	if (old_prod != cq->q.prod) {
178 		ionic_admin_reset_wdog(aq);
179 		cq->q.cons = cq->q.prod;
180 		ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
181 				 ionic_queue_dbell_val(&cq->q));
182 		queue_work(ionic_evt_workq, &aq->work);
183 	} else if (!aq->armed) {
184 		aq->armed = true;
185 		cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
186 		ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
187 				 cq->q.dbell | IONIC_CQ_RING_ARM |
188 				 cq->arm_any_prod);
189 		queue_work(ionic_evt_workq, &aq->work);
190 	}
191 
192 	if (atomic_read(&aq->admin_state) != IONIC_ADMIN_ACTIVE)
193 		return;
194 
195 	old_prod = aq->q.prod;
196 
197 	if (ionic_queue_empty(&aq->q) && !list_empty(&aq->wr_post))
198 		ionic_admin_reset_wdog(aq);
199 
200 	if (list_empty(&aq->wr_post))
201 		return;
202 
203 	do {
204 		u8 *src;
205 		int i, src_len;
206 		size_t stride_len;
207 
208 		wr = list_first_entry(&aq->wr_post, struct ionic_admin_wr,
209 				      aq_ent);
210 		wr_strides = (le16_to_cpu(wr->wqe.len) + ADMIN_WQE_HDR_LEN +
211 			     (ADMIN_WQE_STRIDE - 1)) >> aq->q.stride_log2;
212 		avlbl_strides = ionic_queue_length_remaining(&aq->q);
213 
214 		if (wr_strides > avlbl_strides)
215 			break;
216 
217 		list_move(&wr->aq_ent, &aq->wr_prod);
218 		wr->status = aq->q.prod;
219 		aq->q_wr[aq->q.prod].wr = wr;
220 		aq->q_wr[aq->q.prod].wqe_strides = wr_strides;
221 
222 		src_len = le16_to_cpu(wr->wqe.len);
223 		src = (uint8_t *)&wr->wqe.cmd;
224 
225 		/* First stride */
226 		memcpy(ionic_queue_at_prod(&aq->q), &wr->wqe,
227 		       ADMIN_WQE_HDR_LEN);
228 		stride_len = ADMIN_WQE_STRIDE - ADMIN_WQE_HDR_LEN;
229 		if (stride_len > src_len)
230 			stride_len = src_len;
231 		memcpy(ionic_queue_at_prod(&aq->q) + ADMIN_WQE_HDR_LEN,
232 		       src, stride_len);
233 		ibdev_dbg(&dev->ibdev, "post admin prod %u (%u strides)\n",
234 			  aq->q.prod, wr_strides);
235 		print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1,
236 				     ionic_queue_at_prod(&aq->q),
237 				     BIT(aq->q.stride_log2), true);
238 		ionic_queue_produce(&aq->q);
239 
240 		/* Remaining strides */
241 		for (i = stride_len; i < src_len; i += stride_len) {
242 			stride_len = ADMIN_WQE_STRIDE;
243 
244 			if (i + stride_len > src_len)
245 				stride_len = src_len - i;
246 
247 			memcpy(ionic_queue_at_prod(&aq->q), src + i,
248 			       stride_len);
249 			print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1,
250 					     ionic_queue_at_prod(&aq->q),
251 					     BIT(aq->q.stride_log2), true);
252 			ionic_queue_produce(&aq->q);
253 		}
254 	} while (!list_empty(&aq->wr_post));
255 
256 	if (old_prod != aq->q.prod)
257 		ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.aq_qtype,
258 				 ionic_queue_dbell_val(&aq->q));
259 }
260 
261 static void ionic_admin_dwork(struct work_struct *ws)
262 {
263 	struct ionic_ibdev *dev =
264 		container_of(ws, struct ionic_ibdev, admin_dwork.work);
265 	struct ionic_aq *aq, *bad_aq = NULL;
266 	bool do_reschedule = false;
267 	unsigned long irqflags;
268 	bool do_reset = false;
269 	u16 pos;
270 	int i;
271 
272 	for (i = 0; i < dev->lif_cfg.aq_count; i++) {
273 		aq = dev->aq_vec[i];
274 
275 		spin_lock_irqsave(&aq->lock, irqflags);
276 
277 		if (ionic_queue_empty(&aq->q))
278 			goto next_aq;
279 
280 		/* Reschedule if any queue has outstanding work */
281 		do_reschedule = true;
282 
283 		if (time_is_after_eq_jiffies(aq->stamp + IONIC_ADMIN_WARN))
284 			/* Warning threshold not met, nothing to do */
285 			goto next_aq;
286 
287 		/* See if polling now makes some progress */
288 		pos = aq->q.cons;
289 		ionic_admin_poll_locked(aq);
290 		if (pos != aq->q.cons) {
291 			ibdev_dbg(&dev->ibdev,
292 				  "missed event for acq %d\n", aq->cqid);
293 			goto next_aq;
294 		}
295 
296 		if (time_is_after_eq_jiffies(aq->stamp +
297 					     IONIC_ADMIN_TIMEOUT)) {
298 			/* Timeout threshold not met */
299 			ibdev_dbg(&dev->ibdev, "no progress after %ums\n",
300 				  (u32)jiffies_to_msecs(jiffies - aq->stamp));
301 			goto next_aq;
302 		}
303 
304 		/* Queue timed out */
305 		bad_aq = aq;
306 		do_reset = true;
307 next_aq:
308 		spin_unlock_irqrestore(&aq->lock, irqflags);
309 	}
310 
311 	if (do_reset)
312 		/* Reset RDMA lif on a timeout */
313 		ionic_admin_timedout(bad_aq);
314 	else if (do_reschedule)
315 		/* Try to poll again later */
316 		ionic_admin_reset_dwork(dev);
317 }
318 
319 static void ionic_admin_work(struct work_struct *ws)
320 {
321 	struct ionic_aq *aq = container_of(ws, struct ionic_aq, work);
322 	unsigned long irqflags;
323 
324 	spin_lock_irqsave(&aq->lock, irqflags);
325 	ionic_admin_poll_locked(aq);
326 	spin_unlock_irqrestore(&aq->lock, irqflags);
327 }
328 
329 static void ionic_admin_post_aq(struct ionic_aq *aq, struct ionic_admin_wr *wr)
330 {
331 	unsigned long irqflags;
332 	bool poll;
333 
334 	wr->status = IONIC_ADMIN_POSTED;
335 	wr->aq = aq;
336 
337 	spin_lock_irqsave(&aq->lock, irqflags);
338 	poll = list_empty(&aq->wr_post);
339 	list_add(&wr->aq_ent, &aq->wr_post);
340 	if (poll)
341 		ionic_admin_poll_locked(aq);
342 	spin_unlock_irqrestore(&aq->lock, irqflags);
343 }
344 
345 void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr)
346 {
347 	int aq_idx;
348 
349 	/* Use cpu id for the adminq selection */
350 	aq_idx = raw_smp_processor_id() % dev->lif_cfg.aq_count;
351 	ionic_admin_post_aq(dev->aq_vec[aq_idx], wr);
352 }
353 
354 static void ionic_admin_cancel(struct ionic_admin_wr *wr)
355 {
356 	struct ionic_aq *aq = wr->aq;
357 	unsigned long irqflags;
358 
359 	spin_lock_irqsave(&aq->lock, irqflags);
360 
361 	if (!list_empty(&wr->aq_ent)) {
362 		list_del(&wr->aq_ent);
363 		if (wr->status != IONIC_ADMIN_POSTED)
364 			aq->q_wr[wr->status].wr = NULL;
365 	}
366 
367 	spin_unlock_irqrestore(&aq->lock, irqflags);
368 }
369 
370 static int ionic_admin_busy_wait(struct ionic_admin_wr *wr)
371 {
372 	struct ionic_aq *aq = wr->aq;
373 	unsigned long irqflags;
374 	int try_i;
375 
376 	for (try_i = 0; try_i < IONIC_ADMIN_BUSY_RETRY_COUNT; ++try_i) {
377 		if (completion_done(&wr->work))
378 			return 0;
379 
380 		mdelay(IONIC_ADMIN_BUSY_RETRY_MS);
381 
382 		spin_lock_irqsave(&aq->lock, irqflags);
383 		ionic_admin_poll_locked(aq);
384 		spin_unlock_irqrestore(&aq->lock, irqflags);
385 	}
386 
387 	/*
388 	 * we timed out. Initiate RDMA LIF reset and indicate
389 	 * error to caller.
390 	 */
391 	ionic_admin_timedout(aq);
392 	return -ETIMEDOUT;
393 }
394 
395 int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr,
396 		     enum ionic_admin_flags flags)
397 {
398 	int rc, timo;
399 
400 	if (flags & IONIC_ADMIN_F_BUSYWAIT) {
401 		/* Spin */
402 		rc = ionic_admin_busy_wait(wr);
403 	} else if (flags & IONIC_ADMIN_F_INTERRUPT) {
404 		/*
405 		 * Interruptible sleep, 1s timeout
406 		 * This is used for commands which are safe for the caller
407 		 * to clean up without killing and resetting the adminq.
408 		 */
409 		timo = wait_for_completion_interruptible_timeout(&wr->work,
410 								 HZ);
411 		if (timo > 0)
412 			rc = 0;
413 		else if (timo == 0)
414 			rc = -ETIMEDOUT;
415 		else
416 			rc = timo;
417 	} else {
418 		/*
419 		 * Uninterruptible sleep
420 		 * This is used for commands which are NOT safe for the
421 		 * caller to clean up. Cleanup must be handled by the
422 		 * adminq kill and reset process so that host memory is
423 		 * not corrupted by the device.
424 		 */
425 		wait_for_completion(&wr->work);
426 		rc = 0;
427 	}
428 
429 	if (rc) {
430 		ibdev_warn(&dev->ibdev, "wait status %d\n", rc);
431 		ionic_admin_cancel(wr);
432 	} else if (wr->status == IONIC_ADMIN_KILLED) {
433 		ibdev_dbg(&dev->ibdev, "admin killed\n");
434 
435 		/* No error if admin already killed during teardown */
436 		rc = (flags & IONIC_ADMIN_F_TEARDOWN) ? 0 : -ENODEV;
437 	} else if (ionic_v1_cqe_error(&wr->cqe)) {
438 		ibdev_warn(&dev->ibdev, "opcode %u error %u\n",
439 			   wr->wqe.op,
440 			   be32_to_cpu(wr->cqe.status_length));
441 		rc = -EINVAL;
442 	}
443 	return rc;
444 }
445 
446 static int ionic_rdma_devcmd(struct ionic_ibdev *dev,
447 			     struct ionic_admin_ctx *admin)
448 {
449 	int rc;
450 
451 	rc = ionic_adminq_post_wait(dev->lif_cfg.lif, admin);
452 	if (rc)
453 		return rc;
454 
455 	return ionic_error_to_errno(admin->comp.comp.status);
456 }
457 
458 int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev)
459 {
460 	struct ionic_admin_ctx admin = {
461 		.work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
462 		.cmd.rdma_reset = {
463 			.opcode = IONIC_CMD_RDMA_RESET_LIF,
464 			.lif_index = cpu_to_le16(dev->lif_cfg.lif_index),
465 		},
466 	};
467 
468 	return ionic_rdma_devcmd(dev, &admin);
469 }
470 
471 static int ionic_rdma_queue_devcmd(struct ionic_ibdev *dev,
472 				   struct ionic_queue *q,
473 				   u32 qid, u32 cid, u16 opcode)
474 {
475 	struct ionic_admin_ctx admin = {
476 		.work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
477 		.cmd.rdma_queue = {
478 			.opcode = opcode,
479 			.lif_index = cpu_to_le16(dev->lif_cfg.lif_index),
480 			.qid_ver = cpu_to_le32(qid),
481 			.cid = cpu_to_le32(cid),
482 			.dbid = cpu_to_le16(dev->lif_cfg.dbid),
483 			.depth_log2 = q->depth_log2,
484 			.stride_log2 = q->stride_log2,
485 			.dma_addr = cpu_to_le64(q->dma),
486 		},
487 	};
488 
489 	return ionic_rdma_devcmd(dev, &admin);
490 }
491 
492 static void ionic_rdma_admincq_comp(struct ib_cq *ibcq, void *cq_context)
493 {
494 	struct ionic_aq *aq = cq_context;
495 	unsigned long irqflags;
496 
497 	spin_lock_irqsave(&aq->lock, irqflags);
498 	aq->armed = false;
499 	if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED)
500 		queue_work(ionic_evt_workq, &aq->work);
501 	spin_unlock_irqrestore(&aq->lock, irqflags);
502 }
503 
504 static void ionic_rdma_admincq_event(struct ib_event *event, void *cq_context)
505 {
506 	struct ionic_aq *aq = cq_context;
507 
508 	ibdev_err(&aq->dev->ibdev, "admincq event %d\n", event->event);
509 }
510 
511 static struct ionic_vcq *ionic_create_rdma_admincq(struct ionic_ibdev *dev,
512 						   int comp_vector)
513 {
514 	struct ib_cq_init_attr attr = {
515 		.cqe = IONIC_AQ_DEPTH,
516 		.comp_vector = comp_vector,
517 	};
518 	struct ionic_tbl_buf buf = {};
519 	struct ionic_vcq *vcq;
520 	struct ionic_cq *cq;
521 	int rc;
522 
523 	vcq = kzalloc(sizeof(*vcq), GFP_KERNEL);
524 	if (!vcq)
525 		return ERR_PTR(-ENOMEM);
526 
527 	vcq->ibcq.device = &dev->ibdev;
528 	vcq->ibcq.comp_handler = ionic_rdma_admincq_comp;
529 	vcq->ibcq.event_handler = ionic_rdma_admincq_event;
530 	atomic_set(&vcq->ibcq.usecnt, 0);
531 
532 	vcq->udma_mask = 1;
533 	cq = &vcq->cq[0];
534 
535 	rc = ionic_create_cq_common(vcq, &buf, &attr, NULL, NULL,
536 				    NULL, NULL, 0);
537 	if (rc)
538 		goto err_init;
539 
540 	rc = ionic_rdma_queue_devcmd(dev, &cq->q, cq->cqid, cq->eqid,
541 				     IONIC_CMD_RDMA_CREATE_CQ);
542 	if (rc)
543 		goto err_cmd;
544 
545 	return vcq;
546 
547 err_cmd:
548 	ionic_destroy_cq_common(dev, cq);
549 err_init:
550 	kfree(vcq);
551 
552 	return ERR_PTR(rc);
553 }
554 
555 static struct ionic_aq *__ionic_create_rdma_adminq(struct ionic_ibdev *dev,
556 						   u32 aqid, u32 cqid)
557 {
558 	struct ionic_aq *aq;
559 	int rc;
560 
561 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
562 	if (!aq)
563 		return ERR_PTR(-ENOMEM);
564 
565 	atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED);
566 	aq->dev = dev;
567 	aq->aqid = aqid;
568 	aq->cqid = cqid;
569 	spin_lock_init(&aq->lock);
570 
571 	rc = ionic_queue_init(&aq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
572 			      ADMIN_WQE_STRIDE);
573 	if (rc)
574 		goto err_q;
575 
576 	ionic_queue_dbell_init(&aq->q, aq->aqid);
577 
578 	aq->q_wr = kcalloc((u32)aq->q.mask + 1, sizeof(*aq->q_wr), GFP_KERNEL);
579 	if (!aq->q_wr) {
580 		rc = -ENOMEM;
581 		goto err_wr;
582 	}
583 
584 	INIT_LIST_HEAD(&aq->wr_prod);
585 	INIT_LIST_HEAD(&aq->wr_post);
586 
587 	INIT_WORK(&aq->work, ionic_admin_work);
588 	aq->armed = false;
589 
590 	return aq;
591 
592 err_wr:
593 	ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
594 err_q:
595 	kfree(aq);
596 
597 	return ERR_PTR(rc);
598 }
599 
600 static void __ionic_destroy_rdma_adminq(struct ionic_ibdev *dev,
601 					struct ionic_aq *aq)
602 {
603 	kfree(aq->q_wr);
604 	ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
605 	kfree(aq);
606 }
607 
608 static struct ionic_aq *ionic_create_rdma_adminq(struct ionic_ibdev *dev,
609 						 u32 aqid, u32 cqid)
610 {
611 	struct ionic_aq *aq;
612 	int rc;
613 
614 	aq = __ionic_create_rdma_adminq(dev, aqid, cqid);
615 	if (IS_ERR(aq))
616 		return aq;
617 
618 	rc = ionic_rdma_queue_devcmd(dev, &aq->q, aq->aqid, aq->cqid,
619 				     IONIC_CMD_RDMA_CREATE_ADMINQ);
620 	if (rc)
621 		goto err_cmd;
622 
623 	return aq;
624 
625 err_cmd:
626 	__ionic_destroy_rdma_adminq(dev, aq);
627 
628 	return ERR_PTR(rc);
629 }
630 
631 static void ionic_flush_qs(struct ionic_ibdev *dev)
632 {
633 	struct ionic_qp *qp, *qp_tmp;
634 	struct ionic_cq *cq, *cq_tmp;
635 	LIST_HEAD(flush_list);
636 	unsigned long index;
637 
638 	WARN_ON(!irqs_disabled());
639 
640 	/* Flush qp send and recv */
641 	xa_lock(&dev->qp_tbl);
642 	xa_for_each(&dev->qp_tbl, index, qp) {
643 		kref_get(&qp->qp_kref);
644 		list_add_tail(&qp->ibkill_flush_ent, &flush_list);
645 	}
646 	xa_unlock(&dev->qp_tbl);
647 
648 	list_for_each_entry_safe(qp, qp_tmp, &flush_list, ibkill_flush_ent) {
649 		ionic_flush_qp(dev, qp);
650 		kref_put(&qp->qp_kref, ionic_qp_complete);
651 		list_del(&qp->ibkill_flush_ent);
652 	}
653 
654 	/* Notify completions */
655 	xa_lock(&dev->cq_tbl);
656 	xa_for_each(&dev->cq_tbl, index, cq) {
657 		kref_get(&cq->cq_kref);
658 		list_add_tail(&cq->ibkill_flush_ent, &flush_list);
659 	}
660 	xa_unlock(&dev->cq_tbl);
661 
662 	list_for_each_entry_safe(cq, cq_tmp, &flush_list, ibkill_flush_ent) {
663 		ionic_notify_flush_cq(cq);
664 		kref_put(&cq->cq_kref, ionic_cq_complete);
665 		list_del(&cq->ibkill_flush_ent);
666 	}
667 }
668 
669 static void ionic_kill_ibdev(struct ionic_ibdev *dev, bool fatal_path)
670 {
671 	unsigned long irqflags;
672 	bool do_flush = false;
673 	int i;
674 
675 	/* Mark AQs for drain and flush the QPs while irq is disabled */
676 	local_irq_save(irqflags);
677 
678 	/* Mark the admin queue, flushing at most once */
679 	for (i = 0; i < dev->lif_cfg.aq_count; i++) {
680 		struct ionic_aq *aq = dev->aq_vec[i];
681 
682 		spin_lock(&aq->lock);
683 		if (atomic_read(&aq->admin_state) != IONIC_ADMIN_KILLED) {
684 			atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED);
685 			/* Flush incomplete admin commands */
686 			ionic_admin_poll_locked(aq);
687 			do_flush = true;
688 		}
689 		spin_unlock(&aq->lock);
690 	}
691 
692 	if (do_flush)
693 		ionic_flush_qs(dev);
694 
695 	local_irq_restore(irqflags);
696 
697 	/* Post a fatal event if requested */
698 	if (fatal_path) {
699 		struct ib_event ev;
700 
701 		ev.device = &dev->ibdev;
702 		ev.element.port_num = 1;
703 		ev.event = IB_EVENT_DEVICE_FATAL;
704 
705 		ib_dispatch_event(&ev);
706 	}
707 
708 	atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED);
709 }
710 
711 void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path)
712 {
713 	enum ionic_admin_state old_state;
714 	unsigned long irqflags = 0;
715 	int i, rc;
716 
717 	if (!dev->aq_vec)
718 		return;
719 
720 	/*
721 	 * Admin queues are transitioned from active to paused to killed state.
722 	 * When in paused state, no new commands are issued to the device,
723 	 * nor are any completed locally. After resetting the lif, it will be
724 	 * safe to resume the rdma admin queues in the killed state. Commands
725 	 * will not be issued to the device, but will complete locally with status
726 	 * IONIC_ADMIN_KILLED. Handling completion will ensure that creating or
727 	 * modifying resources fails, but destroying resources succeeds.
728 	 * If there was a failure resetting the lif using this strategy,
729 	 * then the state of the device is unknown.
730 	 */
731 	old_state = atomic_cmpxchg(&dev->admin_state, IONIC_ADMIN_ACTIVE,
732 				   IONIC_ADMIN_PAUSED);
733 	if (old_state != IONIC_ADMIN_ACTIVE)
734 		return;
735 
736 	/* Pause all the AQs */
737 	local_irq_save(irqflags);
738 	for (i = 0; i < dev->lif_cfg.aq_count; i++) {
739 		struct ionic_aq *aq = dev->aq_vec[i];
740 
741 		spin_lock(&aq->lock);
742 		/* pause rdma admin queues to reset lif */
743 		if (atomic_read(&aq->admin_state) == IONIC_ADMIN_ACTIVE)
744 			atomic_set(&aq->admin_state, IONIC_ADMIN_PAUSED);
745 		spin_unlock(&aq->lock);
746 	}
747 	local_irq_restore(irqflags);
748 
749 	rc = ionic_rdma_reset_devcmd(dev);
750 	if (unlikely(rc)) {
751 		ibdev_err(&dev->ibdev, "failed to reset rdma %d\n", rc);
752 		ionic_request_rdma_reset(dev->lif_cfg.lif);
753 	}
754 
755 	ionic_kill_ibdev(dev, fatal_path);
756 }
757 
758 static void ionic_reset_work(struct work_struct *ws)
759 {
760 	struct ionic_ibdev *dev =
761 		container_of(ws, struct ionic_ibdev, reset_work);
762 
763 	ionic_kill_rdma_admin(dev, true);
764 }
765 
766 static bool ionic_next_eqe(struct ionic_eq *eq, struct ionic_v1_eqe *eqe)
767 {
768 	struct ionic_v1_eqe *qeqe;
769 	bool color;
770 
771 	qeqe = ionic_queue_at_prod(&eq->q);
772 	color = ionic_v1_eqe_color(qeqe);
773 
774 	/* cons is color for eq */
775 	if (eq->q.cons != color)
776 		return false;
777 
778 	/* Prevent out-of-order reads of the EQE */
779 	dma_rmb();
780 
781 	ibdev_dbg(&eq->dev->ibdev, "poll eq prod %u\n", eq->q.prod);
782 	print_hex_dump_debug("eqe ", DUMP_PREFIX_OFFSET, 16, 1,
783 			     qeqe, BIT(eq->q.stride_log2), true);
784 	*eqe = *qeqe;
785 
786 	return true;
787 }
788 
789 static void ionic_cq_event(struct ionic_ibdev *dev, u32 cqid, u8 code)
790 {
791 	unsigned long irqflags;
792 	struct ib_event ibev;
793 	struct ionic_cq *cq;
794 
795 	xa_lock_irqsave(&dev->cq_tbl, irqflags);
796 	cq = xa_load(&dev->cq_tbl, cqid);
797 	if (cq)
798 		kref_get(&cq->cq_kref);
799 	xa_unlock_irqrestore(&dev->cq_tbl, irqflags);
800 
801 	if (!cq) {
802 		ibdev_dbg(&dev->ibdev,
803 			  "missing cqid %#x code %u\n", cqid, code);
804 		return;
805 	}
806 
807 	switch (code) {
808 	case IONIC_V1_EQE_CQ_NOTIFY:
809 		if (cq->vcq->ibcq.comp_handler)
810 			cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq,
811 						   cq->vcq->ibcq.cq_context);
812 		break;
813 
814 	case IONIC_V1_EQE_CQ_ERR:
815 		if (cq->vcq->ibcq.event_handler) {
816 			ibev.event = IB_EVENT_CQ_ERR;
817 			ibev.device = &dev->ibdev;
818 			ibev.element.cq = &cq->vcq->ibcq;
819 
820 			cq->vcq->ibcq.event_handler(&ibev,
821 						    cq->vcq->ibcq.cq_context);
822 		}
823 		break;
824 
825 	default:
826 		ibdev_dbg(&dev->ibdev,
827 			  "unrecognized cqid %#x code %u\n", cqid, code);
828 		break;
829 	}
830 
831 	kref_put(&cq->cq_kref, ionic_cq_complete);
832 }
833 
834 static void ionic_qp_event(struct ionic_ibdev *dev, u32 qpid, u8 code)
835 {
836 	unsigned long irqflags;
837 	struct ib_event ibev;
838 	struct ionic_qp *qp;
839 
840 	xa_lock_irqsave(&dev->qp_tbl, irqflags);
841 	qp = xa_load(&dev->qp_tbl, qpid);
842 	if (qp)
843 		kref_get(&qp->qp_kref);
844 	xa_unlock_irqrestore(&dev->qp_tbl, irqflags);
845 
846 	if (!qp) {
847 		ibdev_dbg(&dev->ibdev,
848 			  "missing qpid %#x code %u\n", qpid, code);
849 		return;
850 	}
851 
852 	ibev.device = &dev->ibdev;
853 	ibev.element.qp = &qp->ibqp;
854 
855 	switch (code) {
856 	case IONIC_V1_EQE_SQ_DRAIN:
857 		ibev.event = IB_EVENT_SQ_DRAINED;
858 		break;
859 
860 	case IONIC_V1_EQE_QP_COMM_EST:
861 		ibev.event = IB_EVENT_COMM_EST;
862 		break;
863 
864 	case IONIC_V1_EQE_QP_LAST_WQE:
865 		ibev.event = IB_EVENT_QP_LAST_WQE_REACHED;
866 		break;
867 
868 	case IONIC_V1_EQE_QP_ERR:
869 		ibev.event = IB_EVENT_QP_FATAL;
870 		break;
871 
872 	case IONIC_V1_EQE_QP_ERR_REQUEST:
873 		ibev.event = IB_EVENT_QP_REQ_ERR;
874 		break;
875 
876 	case IONIC_V1_EQE_QP_ERR_ACCESS:
877 		ibev.event = IB_EVENT_QP_ACCESS_ERR;
878 		break;
879 
880 	default:
881 		ibdev_dbg(&dev->ibdev,
882 			  "unrecognized qpid %#x code %u\n", qpid, code);
883 		goto out;
884 	}
885 
886 	if (qp->ibqp.event_handler)
887 		qp->ibqp.event_handler(&ibev, qp->ibqp.qp_context);
888 
889 out:
890 	kref_put(&qp->qp_kref, ionic_qp_complete);
891 }
892 
893 static u16 ionic_poll_eq(struct ionic_eq *eq, u16 budget)
894 {
895 	struct ionic_ibdev *dev = eq->dev;
896 	struct ionic_v1_eqe eqe;
897 	u16 npolled = 0;
898 	u8 type, code;
899 	u32 evt, qid;
900 
901 	while (npolled < budget) {
902 		if (!ionic_next_eqe(eq, &eqe))
903 			break;
904 
905 		ionic_queue_produce(&eq->q);
906 
907 		/* cons is color for eq */
908 		eq->q.cons = ionic_color_wrap(eq->q.prod, eq->q.cons);
909 
910 		++npolled;
911 
912 		evt = ionic_v1_eqe_evt(&eqe);
913 		type = ionic_v1_eqe_evt_type(evt);
914 		code = ionic_v1_eqe_evt_code(evt);
915 		qid = ionic_v1_eqe_evt_qid(evt);
916 
917 		switch (type) {
918 		case IONIC_V1_EQE_TYPE_CQ:
919 			ionic_cq_event(dev, qid, code);
920 			break;
921 
922 		case IONIC_V1_EQE_TYPE_QP:
923 			ionic_qp_event(dev, qid, code);
924 			break;
925 
926 		default:
927 			ibdev_dbg(&dev->ibdev,
928 				  "unknown event %#x type %u\n", evt, type);
929 		}
930 	}
931 
932 	return npolled;
933 }
934 
935 static void ionic_poll_eq_work(struct work_struct *work)
936 {
937 	struct ionic_eq *eq = container_of(work, struct ionic_eq, work);
938 	u32 npolled;
939 
940 	if (unlikely(!eq->enable) || WARN_ON(eq->armed))
941 		return;
942 
943 	npolled = ionic_poll_eq(eq, IONIC_EQ_WORK_BUDGET);
944 	if (npolled == IONIC_EQ_WORK_BUDGET) {
945 		ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
946 				   npolled, 0);
947 		queue_work(ionic_evt_workq, &eq->work);
948 	} else {
949 		xchg(&eq->armed, 1);
950 		ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
951 				   0, IONIC_INTR_CRED_UNMASK);
952 	}
953 }
954 
955 static irqreturn_t ionic_poll_eq_isr(int irq, void *eqptr)
956 {
957 	struct ionic_eq *eq = eqptr;
958 	int was_armed;
959 	u32 npolled;
960 
961 	was_armed = xchg(&eq->armed, 0);
962 
963 	if (unlikely(!eq->enable) || !was_armed)
964 		return IRQ_HANDLED;
965 
966 	npolled = ionic_poll_eq(eq, IONIC_EQ_ISR_BUDGET);
967 	if (npolled == IONIC_EQ_ISR_BUDGET) {
968 		ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
969 				   npolled, 0);
970 		queue_work(ionic_evt_workq, &eq->work);
971 	} else {
972 		xchg(&eq->armed, 1);
973 		ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
974 				   0, IONIC_INTR_CRED_UNMASK);
975 	}
976 
977 	return IRQ_HANDLED;
978 }
979 
980 static struct ionic_eq *ionic_create_eq(struct ionic_ibdev *dev, int eqid)
981 {
982 	struct ionic_intr_info intr_obj = { };
983 	struct ionic_eq *eq;
984 	int rc;
985 
986 	eq = kzalloc(sizeof(*eq), GFP_KERNEL);
987 	if (!eq)
988 		return ERR_PTR(-ENOMEM);
989 
990 	eq->dev = dev;
991 
992 	rc = ionic_queue_init(&eq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
993 			      sizeof(struct ionic_v1_eqe));
994 	if (rc)
995 		goto err_q;
996 
997 	eq->eqid = eqid;
998 
999 	eq->armed = true;
1000 	eq->enable = false;
1001 	INIT_WORK(&eq->work, ionic_poll_eq_work);
1002 
1003 	rc = ionic_intr_alloc(dev->lif_cfg.lif, &intr_obj);
1004 	if (rc < 0)
1005 		goto err_intr;
1006 
1007 	eq->irq = intr_obj.vector;
1008 	eq->intr = intr_obj.index;
1009 
1010 	ionic_queue_dbell_init(&eq->q, eq->eqid);
1011 
1012 	/* cons is color for eq */
1013 	eq->q.cons = true;
1014 
1015 	snprintf(eq->name, sizeof(eq->name), "%s-%d-%d-eq",
1016 		 "ionr", dev->lif_cfg.lif_index, eq->eqid);
1017 
1018 	ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET);
1019 	ionic_intr_mask_assert(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET);
1020 	ionic_intr_coal_init(dev->lif_cfg.intr_ctrl, eq->intr, 0);
1021 	ionic_intr_clean(dev->lif_cfg.intr_ctrl, eq->intr);
1022 
1023 	eq->enable = true;
1024 
1025 	rc = request_irq(eq->irq, ionic_poll_eq_isr, 0, eq->name, eq);
1026 	if (rc)
1027 		goto err_irq;
1028 
1029 	rc = ionic_rdma_queue_devcmd(dev, &eq->q, eq->eqid, eq->intr,
1030 				     IONIC_CMD_RDMA_CREATE_EQ);
1031 	if (rc)
1032 		goto err_cmd;
1033 
1034 	ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_CLEAR);
1035 
1036 	return eq;
1037 
1038 err_cmd:
1039 	eq->enable = false;
1040 	free_irq(eq->irq, eq);
1041 	flush_work(&eq->work);
1042 err_irq:
1043 	ionic_intr_free(dev->lif_cfg.lif, eq->intr);
1044 err_intr:
1045 	ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
1046 err_q:
1047 	kfree(eq);
1048 
1049 	return ERR_PTR(rc);
1050 }
1051 
1052 static void ionic_destroy_eq(struct ionic_eq *eq)
1053 {
1054 	struct ionic_ibdev *dev = eq->dev;
1055 
1056 	eq->enable = false;
1057 	free_irq(eq->irq, eq);
1058 	flush_work(&eq->work);
1059 
1060 	ionic_intr_free(dev->lif_cfg.lif, eq->intr);
1061 	ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
1062 	kfree(eq);
1063 }
1064 
1065 int ionic_create_rdma_admin(struct ionic_ibdev *dev)
1066 {
1067 	int eq_i = 0, aq_i = 0, rc = 0;
1068 	struct ionic_vcq *vcq;
1069 	struct ionic_aq *aq;
1070 	struct ionic_eq *eq;
1071 
1072 	dev->eq_vec = NULL;
1073 	dev->aq_vec = NULL;
1074 
1075 	INIT_WORK(&dev->reset_work, ionic_reset_work);
1076 	INIT_DELAYED_WORK(&dev->admin_dwork, ionic_admin_dwork);
1077 	atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED);
1078 
1079 	if (dev->lif_cfg.aq_count > IONIC_AQ_COUNT) {
1080 		ibdev_dbg(&dev->ibdev, "limiting adminq count to %d\n",
1081 			  IONIC_AQ_COUNT);
1082 		dev->lif_cfg.aq_count = IONIC_AQ_COUNT;
1083 	}
1084 
1085 	if (dev->lif_cfg.eq_count > IONIC_EQ_COUNT) {
1086 		dev_dbg(&dev->ibdev.dev, "limiting eventq count to %d\n",
1087 			IONIC_EQ_COUNT);
1088 		dev->lif_cfg.eq_count = IONIC_EQ_COUNT;
1089 	}
1090 
1091 	/* need at least two eq and one aq */
1092 	if (dev->lif_cfg.eq_count < IONIC_EQ_COUNT_MIN ||
1093 	    dev->lif_cfg.aq_count < IONIC_AQ_COUNT_MIN) {
1094 		rc = -EINVAL;
1095 		goto out;
1096 	}
1097 
1098 	dev->eq_vec = kmalloc_array(dev->lif_cfg.eq_count, sizeof(*dev->eq_vec),
1099 				    GFP_KERNEL);
1100 	if (!dev->eq_vec) {
1101 		rc = -ENOMEM;
1102 		goto out;
1103 	}
1104 
1105 	for (eq_i = 0; eq_i < dev->lif_cfg.eq_count; ++eq_i) {
1106 		eq = ionic_create_eq(dev, eq_i + dev->lif_cfg.eq_base);
1107 		if (IS_ERR(eq)) {
1108 			rc = PTR_ERR(eq);
1109 
1110 			if (eq_i < IONIC_EQ_COUNT_MIN) {
1111 				ibdev_err(&dev->ibdev,
1112 					  "fail create eq %pe\n", eq);
1113 				goto out;
1114 			}
1115 
1116 			/* ok, just fewer eq than device supports */
1117 			ibdev_dbg(&dev->ibdev, "eq count %d want %d rc %pe\n",
1118 				  eq_i, dev->lif_cfg.eq_count, eq);
1119 
1120 			rc = 0;
1121 			break;
1122 		}
1123 
1124 		dev->eq_vec[eq_i] = eq;
1125 	}
1126 
1127 	dev->lif_cfg.eq_count = eq_i;
1128 
1129 	dev->aq_vec = kmalloc_array(dev->lif_cfg.aq_count, sizeof(*dev->aq_vec),
1130 				    GFP_KERNEL);
1131 	if (!dev->aq_vec) {
1132 		rc = -ENOMEM;
1133 		goto out;
1134 	}
1135 
1136 	/* Create one CQ per AQ */
1137 	for (aq_i = 0; aq_i < dev->lif_cfg.aq_count; ++aq_i) {
1138 		vcq = ionic_create_rdma_admincq(dev, aq_i % eq_i);
1139 		if (IS_ERR(vcq)) {
1140 			rc = PTR_ERR(vcq);
1141 
1142 			if (!aq_i) {
1143 				ibdev_err(&dev->ibdev,
1144 					  "failed to create acq %pe\n", vcq);
1145 				goto out;
1146 			}
1147 
1148 			/* ok, just fewer adminq than device supports */
1149 			ibdev_dbg(&dev->ibdev, "acq count %d want %d rc %pe\n",
1150 				  aq_i, dev->lif_cfg.aq_count, vcq);
1151 			break;
1152 		}
1153 
1154 		aq = ionic_create_rdma_adminq(dev, aq_i + dev->lif_cfg.aq_base,
1155 					      vcq->cq[0].cqid);
1156 		if (IS_ERR(aq)) {
1157 			/* Clean up the dangling CQ */
1158 			ionic_destroy_cq_common(dev, &vcq->cq[0]);
1159 			kfree(vcq);
1160 
1161 			rc = PTR_ERR(aq);
1162 
1163 			if (!aq_i) {
1164 				ibdev_err(&dev->ibdev,
1165 					  "failed to create aq %pe\n", aq);
1166 				goto out;
1167 			}
1168 
1169 			/* ok, just fewer adminq than device supports */
1170 			ibdev_dbg(&dev->ibdev, "aq count %d want %d rc %pe\n",
1171 				  aq_i, dev->lif_cfg.aq_count, aq);
1172 			break;
1173 		}
1174 
1175 		vcq->ibcq.cq_context = aq;
1176 		aq->vcq = vcq;
1177 
1178 		atomic_set(&aq->admin_state, IONIC_ADMIN_ACTIVE);
1179 		dev->aq_vec[aq_i] = aq;
1180 	}
1181 
1182 	atomic_set(&dev->admin_state, IONIC_ADMIN_ACTIVE);
1183 out:
1184 	dev->lif_cfg.eq_count = eq_i;
1185 	dev->lif_cfg.aq_count = aq_i;
1186 
1187 	return rc;
1188 }
1189 
1190 void ionic_destroy_rdma_admin(struct ionic_ibdev *dev)
1191 {
1192 	struct ionic_vcq *vcq;
1193 	struct ionic_aq *aq;
1194 	struct ionic_eq *eq;
1195 
1196 	/*
1197 	 * Killing the admin before destroy makes sure all admin and
1198 	 * completions are flushed. admin_state = IONIC_ADMIN_KILLED
1199 	 * stops queueing up further works.
1200 	 */
1201 	cancel_delayed_work_sync(&dev->admin_dwork);
1202 	cancel_work_sync(&dev->reset_work);
1203 
1204 	if (dev->aq_vec) {
1205 		while (dev->lif_cfg.aq_count > 0) {
1206 			aq = dev->aq_vec[--dev->lif_cfg.aq_count];
1207 			vcq = aq->vcq;
1208 
1209 			cancel_work_sync(&aq->work);
1210 
1211 			__ionic_destroy_rdma_adminq(dev, aq);
1212 			if (vcq) {
1213 				ionic_destroy_cq_common(dev, &vcq->cq[0]);
1214 				kfree(vcq);
1215 			}
1216 		}
1217 
1218 		kfree(dev->aq_vec);
1219 	}
1220 
1221 	if (dev->eq_vec) {
1222 		while (dev->lif_cfg.eq_count > 0) {
1223 			eq = dev->eq_vec[--dev->lif_cfg.eq_count];
1224 			ionic_destroy_eq(eq);
1225 		}
1226 
1227 		kfree(dev->eq_vec);
1228 	}
1229 }
1230