xref: /linux/drivers/infiniband/hw/ionic/ionic_admin.c (revision f3bdbd42702c6b10ebe627828c76ef51c68e4355)
1*f3bdbd42SAbhijit Gangurde // SPDX-License-Identifier: GPL-2.0
2*f3bdbd42SAbhijit Gangurde /* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
3*f3bdbd42SAbhijit Gangurde 
4*f3bdbd42SAbhijit Gangurde #include <linux/interrupt.h>
5*f3bdbd42SAbhijit Gangurde #include <linux/module.h>
6*f3bdbd42SAbhijit Gangurde #include <linux/printk.h>
7*f3bdbd42SAbhijit Gangurde 
8*f3bdbd42SAbhijit Gangurde #include "ionic_fw.h"
9*f3bdbd42SAbhijit Gangurde #include "ionic_ibdev.h"
10*f3bdbd42SAbhijit Gangurde 
11*f3bdbd42SAbhijit Gangurde #define IONIC_EQ_COUNT_MIN	4
12*f3bdbd42SAbhijit Gangurde #define IONIC_AQ_COUNT_MIN	1
13*f3bdbd42SAbhijit Gangurde 
14*f3bdbd42SAbhijit Gangurde /* not a valid queue position or negative error status */
15*f3bdbd42SAbhijit Gangurde #define IONIC_ADMIN_POSTED	0x10000
16*f3bdbd42SAbhijit Gangurde 
17*f3bdbd42SAbhijit Gangurde /* cpu can be held with irq disabled for COUNT * MS  (for create/destroy_ah) */
18*f3bdbd42SAbhijit Gangurde #define IONIC_ADMIN_BUSY_RETRY_COUNT	2000
19*f3bdbd42SAbhijit Gangurde #define IONIC_ADMIN_BUSY_RETRY_MS	1
20*f3bdbd42SAbhijit Gangurde 
21*f3bdbd42SAbhijit Gangurde /* admin queue will be considered failed if a command takes longer */
22*f3bdbd42SAbhijit Gangurde #define IONIC_ADMIN_TIMEOUT	(HZ * 2)
23*f3bdbd42SAbhijit Gangurde #define IONIC_ADMIN_WARN	(HZ / 8)
24*f3bdbd42SAbhijit Gangurde 
25*f3bdbd42SAbhijit Gangurde /* will poll for admin cq to tolerate and report from missed event */
26*f3bdbd42SAbhijit Gangurde #define IONIC_ADMIN_DELAY	(HZ / 8)
27*f3bdbd42SAbhijit Gangurde 
28*f3bdbd42SAbhijit Gangurde /* work queue for polling the event queue and admin cq */
29*f3bdbd42SAbhijit Gangurde struct workqueue_struct *ionic_evt_workq;
30*f3bdbd42SAbhijit Gangurde 
31*f3bdbd42SAbhijit Gangurde static void ionic_admin_timedout(struct ionic_aq *aq)
32*f3bdbd42SAbhijit Gangurde {
33*f3bdbd42SAbhijit Gangurde 	struct ionic_ibdev *dev = aq->dev;
34*f3bdbd42SAbhijit Gangurde 	unsigned long irqflags;
35*f3bdbd42SAbhijit Gangurde 	u16 pos;
36*f3bdbd42SAbhijit Gangurde 
37*f3bdbd42SAbhijit Gangurde 	spin_lock_irqsave(&aq->lock, irqflags);
38*f3bdbd42SAbhijit Gangurde 	if (ionic_queue_empty(&aq->q))
39*f3bdbd42SAbhijit Gangurde 		goto out;
40*f3bdbd42SAbhijit Gangurde 
41*f3bdbd42SAbhijit Gangurde 	/* Reset ALL adminq if any one times out */
42*f3bdbd42SAbhijit Gangurde 	if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED)
43*f3bdbd42SAbhijit Gangurde 		queue_work(ionic_evt_workq, &dev->reset_work);
44*f3bdbd42SAbhijit Gangurde 
45*f3bdbd42SAbhijit Gangurde 	ibdev_err(&dev->ibdev, "admin command timed out, aq %d after: %ums\n",
46*f3bdbd42SAbhijit Gangurde 		  aq->aqid, (u32)jiffies_to_msecs(jiffies - aq->stamp));
47*f3bdbd42SAbhijit Gangurde 
48*f3bdbd42SAbhijit Gangurde 	pos = (aq->q.prod - 1) & aq->q.mask;
49*f3bdbd42SAbhijit Gangurde 	if (pos == aq->q.cons)
50*f3bdbd42SAbhijit Gangurde 		goto out;
51*f3bdbd42SAbhijit Gangurde 
52*f3bdbd42SAbhijit Gangurde 	ibdev_warn(&dev->ibdev, "admin pos %u (last posted)\n", pos);
53*f3bdbd42SAbhijit Gangurde 	print_hex_dump(KERN_WARNING, "cmd ", DUMP_PREFIX_OFFSET, 16, 1,
54*f3bdbd42SAbhijit Gangurde 		       ionic_queue_at(&aq->q, pos),
55*f3bdbd42SAbhijit Gangurde 		       BIT(aq->q.stride_log2), true);
56*f3bdbd42SAbhijit Gangurde 
57*f3bdbd42SAbhijit Gangurde out:
58*f3bdbd42SAbhijit Gangurde 	spin_unlock_irqrestore(&aq->lock, irqflags);
59*f3bdbd42SAbhijit Gangurde }
60*f3bdbd42SAbhijit Gangurde 
61*f3bdbd42SAbhijit Gangurde static void ionic_admin_reset_dwork(struct ionic_ibdev *dev)
62*f3bdbd42SAbhijit Gangurde {
63*f3bdbd42SAbhijit Gangurde 	if (atomic_read(&dev->admin_state) == IONIC_ADMIN_KILLED)
64*f3bdbd42SAbhijit Gangurde 		return;
65*f3bdbd42SAbhijit Gangurde 
66*f3bdbd42SAbhijit Gangurde 	queue_delayed_work(ionic_evt_workq, &dev->admin_dwork,
67*f3bdbd42SAbhijit Gangurde 			   IONIC_ADMIN_DELAY);
68*f3bdbd42SAbhijit Gangurde }
69*f3bdbd42SAbhijit Gangurde 
70*f3bdbd42SAbhijit Gangurde static void ionic_admin_reset_wdog(struct ionic_aq *aq)
71*f3bdbd42SAbhijit Gangurde {
72*f3bdbd42SAbhijit Gangurde 	if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED)
73*f3bdbd42SAbhijit Gangurde 		return;
74*f3bdbd42SAbhijit Gangurde 
75*f3bdbd42SAbhijit Gangurde 	aq->stamp = jiffies;
76*f3bdbd42SAbhijit Gangurde 	ionic_admin_reset_dwork(aq->dev);
77*f3bdbd42SAbhijit Gangurde }
78*f3bdbd42SAbhijit Gangurde 
79*f3bdbd42SAbhijit Gangurde static bool ionic_admin_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq,
80*f3bdbd42SAbhijit Gangurde 				 struct ionic_v1_cqe **cqe)
81*f3bdbd42SAbhijit Gangurde {
82*f3bdbd42SAbhijit Gangurde 	struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
83*f3bdbd42SAbhijit Gangurde 
84*f3bdbd42SAbhijit Gangurde 	if (unlikely(cq->color != ionic_v1_cqe_color(qcqe)))
85*f3bdbd42SAbhijit Gangurde 		return false;
86*f3bdbd42SAbhijit Gangurde 
87*f3bdbd42SAbhijit Gangurde 	/* Prevent out-of-order reads of the CQE */
88*f3bdbd42SAbhijit Gangurde 	dma_rmb();
89*f3bdbd42SAbhijit Gangurde 	*cqe = qcqe;
90*f3bdbd42SAbhijit Gangurde 
91*f3bdbd42SAbhijit Gangurde 	return true;
92*f3bdbd42SAbhijit Gangurde }
93*f3bdbd42SAbhijit Gangurde 
94*f3bdbd42SAbhijit Gangurde static void ionic_admin_poll_locked(struct ionic_aq *aq)
95*f3bdbd42SAbhijit Gangurde {
96*f3bdbd42SAbhijit Gangurde 	struct ionic_cq *cq = &aq->vcq->cq[0];
97*f3bdbd42SAbhijit Gangurde 	struct ionic_admin_wr *wr, *wr_next;
98*f3bdbd42SAbhijit Gangurde 	struct ionic_ibdev *dev = aq->dev;
99*f3bdbd42SAbhijit Gangurde 	u32 wr_strides, avlbl_strides;
100*f3bdbd42SAbhijit Gangurde 	struct ionic_v1_cqe *cqe;
101*f3bdbd42SAbhijit Gangurde 	u32 qtf, qid;
102*f3bdbd42SAbhijit Gangurde 	u16 old_prod;
103*f3bdbd42SAbhijit Gangurde 	u8 type;
104*f3bdbd42SAbhijit Gangurde 
105*f3bdbd42SAbhijit Gangurde 	lockdep_assert_held(&aq->lock);
106*f3bdbd42SAbhijit Gangurde 
107*f3bdbd42SAbhijit Gangurde 	if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED) {
108*f3bdbd42SAbhijit Gangurde 		list_for_each_entry_safe(wr, wr_next, &aq->wr_prod, aq_ent) {
109*f3bdbd42SAbhijit Gangurde 			INIT_LIST_HEAD(&wr->aq_ent);
110*f3bdbd42SAbhijit Gangurde 			aq->q_wr[wr->status].wr = NULL;
111*f3bdbd42SAbhijit Gangurde 			wr->status = atomic_read(&aq->admin_state);
112*f3bdbd42SAbhijit Gangurde 			complete_all(&wr->work);
113*f3bdbd42SAbhijit Gangurde 		}
114*f3bdbd42SAbhijit Gangurde 		INIT_LIST_HEAD(&aq->wr_prod);
115*f3bdbd42SAbhijit Gangurde 
116*f3bdbd42SAbhijit Gangurde 		list_for_each_entry_safe(wr, wr_next, &aq->wr_post, aq_ent) {
117*f3bdbd42SAbhijit Gangurde 			INIT_LIST_HEAD(&wr->aq_ent);
118*f3bdbd42SAbhijit Gangurde 			wr->status = atomic_read(&aq->admin_state);
119*f3bdbd42SAbhijit Gangurde 			complete_all(&wr->work);
120*f3bdbd42SAbhijit Gangurde 		}
121*f3bdbd42SAbhijit Gangurde 		INIT_LIST_HEAD(&aq->wr_post);
122*f3bdbd42SAbhijit Gangurde 
123*f3bdbd42SAbhijit Gangurde 		return;
124*f3bdbd42SAbhijit Gangurde 	}
125*f3bdbd42SAbhijit Gangurde 
126*f3bdbd42SAbhijit Gangurde 	old_prod = cq->q.prod;
127*f3bdbd42SAbhijit Gangurde 
128*f3bdbd42SAbhijit Gangurde 	while (ionic_admin_next_cqe(dev, cq, &cqe)) {
129*f3bdbd42SAbhijit Gangurde 		qtf = ionic_v1_cqe_qtf(cqe);
130*f3bdbd42SAbhijit Gangurde 		qid = ionic_v1_cqe_qtf_qid(qtf);
131*f3bdbd42SAbhijit Gangurde 		type = ionic_v1_cqe_qtf_type(qtf);
132*f3bdbd42SAbhijit Gangurde 
133*f3bdbd42SAbhijit Gangurde 		if (unlikely(type != IONIC_V1_CQE_TYPE_ADMIN)) {
134*f3bdbd42SAbhijit Gangurde 			ibdev_warn_ratelimited(&dev->ibdev,
135*f3bdbd42SAbhijit Gangurde 					       "bad cqe type %u\n", type);
136*f3bdbd42SAbhijit Gangurde 			goto cq_next;
137*f3bdbd42SAbhijit Gangurde 		}
138*f3bdbd42SAbhijit Gangurde 
139*f3bdbd42SAbhijit Gangurde 		if (unlikely(qid != aq->aqid)) {
140*f3bdbd42SAbhijit Gangurde 			ibdev_warn_ratelimited(&dev->ibdev,
141*f3bdbd42SAbhijit Gangurde 					       "bad cqe qid %u\n", qid);
142*f3bdbd42SAbhijit Gangurde 			goto cq_next;
143*f3bdbd42SAbhijit Gangurde 		}
144*f3bdbd42SAbhijit Gangurde 
145*f3bdbd42SAbhijit Gangurde 		if (unlikely(be16_to_cpu(cqe->admin.cmd_idx) != aq->q.cons)) {
146*f3bdbd42SAbhijit Gangurde 			ibdev_warn_ratelimited(&dev->ibdev,
147*f3bdbd42SAbhijit Gangurde 					       "bad idx %u cons %u qid %u\n",
148*f3bdbd42SAbhijit Gangurde 					       be16_to_cpu(cqe->admin.cmd_idx),
149*f3bdbd42SAbhijit Gangurde 					       aq->q.cons, qid);
150*f3bdbd42SAbhijit Gangurde 			goto cq_next;
151*f3bdbd42SAbhijit Gangurde 		}
152*f3bdbd42SAbhijit Gangurde 
153*f3bdbd42SAbhijit Gangurde 		if (unlikely(ionic_queue_empty(&aq->q))) {
154*f3bdbd42SAbhijit Gangurde 			ibdev_warn_ratelimited(&dev->ibdev,
155*f3bdbd42SAbhijit Gangurde 					       "bad cqe for empty adminq\n");
156*f3bdbd42SAbhijit Gangurde 			goto cq_next;
157*f3bdbd42SAbhijit Gangurde 		}
158*f3bdbd42SAbhijit Gangurde 
159*f3bdbd42SAbhijit Gangurde 		wr = aq->q_wr[aq->q.cons].wr;
160*f3bdbd42SAbhijit Gangurde 		if (wr) {
161*f3bdbd42SAbhijit Gangurde 			aq->q_wr[aq->q.cons].wr = NULL;
162*f3bdbd42SAbhijit Gangurde 			list_del_init(&wr->aq_ent);
163*f3bdbd42SAbhijit Gangurde 
164*f3bdbd42SAbhijit Gangurde 			wr->cqe = *cqe;
165*f3bdbd42SAbhijit Gangurde 			wr->status = atomic_read(&aq->admin_state);
166*f3bdbd42SAbhijit Gangurde 			complete_all(&wr->work);
167*f3bdbd42SAbhijit Gangurde 		}
168*f3bdbd42SAbhijit Gangurde 
169*f3bdbd42SAbhijit Gangurde 		ionic_queue_consume_entries(&aq->q,
170*f3bdbd42SAbhijit Gangurde 					    aq->q_wr[aq->q.cons].wqe_strides);
171*f3bdbd42SAbhijit Gangurde 
172*f3bdbd42SAbhijit Gangurde cq_next:
173*f3bdbd42SAbhijit Gangurde 		ionic_queue_produce(&cq->q);
174*f3bdbd42SAbhijit Gangurde 		cq->color = ionic_color_wrap(cq->q.prod, cq->color);
175*f3bdbd42SAbhijit Gangurde 	}
176*f3bdbd42SAbhijit Gangurde 
177*f3bdbd42SAbhijit Gangurde 	if (old_prod != cq->q.prod) {
178*f3bdbd42SAbhijit Gangurde 		ionic_admin_reset_wdog(aq);
179*f3bdbd42SAbhijit Gangurde 		cq->q.cons = cq->q.prod;
180*f3bdbd42SAbhijit Gangurde 		ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
181*f3bdbd42SAbhijit Gangurde 				 ionic_queue_dbell_val(&cq->q));
182*f3bdbd42SAbhijit Gangurde 		queue_work(ionic_evt_workq, &aq->work);
183*f3bdbd42SAbhijit Gangurde 	} else if (!aq->armed) {
184*f3bdbd42SAbhijit Gangurde 		aq->armed = true;
185*f3bdbd42SAbhijit Gangurde 		cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
186*f3bdbd42SAbhijit Gangurde 		ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
187*f3bdbd42SAbhijit Gangurde 				 cq->q.dbell | IONIC_CQ_RING_ARM |
188*f3bdbd42SAbhijit Gangurde 				 cq->arm_any_prod);
189*f3bdbd42SAbhijit Gangurde 		queue_work(ionic_evt_workq, &aq->work);
190*f3bdbd42SAbhijit Gangurde 	}
191*f3bdbd42SAbhijit Gangurde 
192*f3bdbd42SAbhijit Gangurde 	if (atomic_read(&aq->admin_state) != IONIC_ADMIN_ACTIVE)
193*f3bdbd42SAbhijit Gangurde 		return;
194*f3bdbd42SAbhijit Gangurde 
195*f3bdbd42SAbhijit Gangurde 	old_prod = aq->q.prod;
196*f3bdbd42SAbhijit Gangurde 
197*f3bdbd42SAbhijit Gangurde 	if (ionic_queue_empty(&aq->q) && !list_empty(&aq->wr_post))
198*f3bdbd42SAbhijit Gangurde 		ionic_admin_reset_wdog(aq);
199*f3bdbd42SAbhijit Gangurde 
200*f3bdbd42SAbhijit Gangurde 	if (list_empty(&aq->wr_post))
201*f3bdbd42SAbhijit Gangurde 		return;
202*f3bdbd42SAbhijit Gangurde 
203*f3bdbd42SAbhijit Gangurde 	do {
204*f3bdbd42SAbhijit Gangurde 		u8 *src;
205*f3bdbd42SAbhijit Gangurde 		int i, src_len;
206*f3bdbd42SAbhijit Gangurde 		size_t stride_len;
207*f3bdbd42SAbhijit Gangurde 
208*f3bdbd42SAbhijit Gangurde 		wr = list_first_entry(&aq->wr_post, struct ionic_admin_wr,
209*f3bdbd42SAbhijit Gangurde 				      aq_ent);
210*f3bdbd42SAbhijit Gangurde 		wr_strides = (le16_to_cpu(wr->wqe.len) + ADMIN_WQE_HDR_LEN +
211*f3bdbd42SAbhijit Gangurde 			     (ADMIN_WQE_STRIDE - 1)) >> aq->q.stride_log2;
212*f3bdbd42SAbhijit Gangurde 		avlbl_strides = ionic_queue_length_remaining(&aq->q);
213*f3bdbd42SAbhijit Gangurde 
214*f3bdbd42SAbhijit Gangurde 		if (wr_strides > avlbl_strides)
215*f3bdbd42SAbhijit Gangurde 			break;
216*f3bdbd42SAbhijit Gangurde 
217*f3bdbd42SAbhijit Gangurde 		list_move(&wr->aq_ent, &aq->wr_prod);
218*f3bdbd42SAbhijit Gangurde 		wr->status = aq->q.prod;
219*f3bdbd42SAbhijit Gangurde 		aq->q_wr[aq->q.prod].wr = wr;
220*f3bdbd42SAbhijit Gangurde 		aq->q_wr[aq->q.prod].wqe_strides = wr_strides;
221*f3bdbd42SAbhijit Gangurde 
222*f3bdbd42SAbhijit Gangurde 		src_len = le16_to_cpu(wr->wqe.len);
223*f3bdbd42SAbhijit Gangurde 		src = (uint8_t *)&wr->wqe.cmd;
224*f3bdbd42SAbhijit Gangurde 
225*f3bdbd42SAbhijit Gangurde 		/* First stride */
226*f3bdbd42SAbhijit Gangurde 		memcpy(ionic_queue_at_prod(&aq->q), &wr->wqe,
227*f3bdbd42SAbhijit Gangurde 		       ADMIN_WQE_HDR_LEN);
228*f3bdbd42SAbhijit Gangurde 		stride_len = ADMIN_WQE_STRIDE - ADMIN_WQE_HDR_LEN;
229*f3bdbd42SAbhijit Gangurde 		if (stride_len > src_len)
230*f3bdbd42SAbhijit Gangurde 			stride_len = src_len;
231*f3bdbd42SAbhijit Gangurde 		memcpy(ionic_queue_at_prod(&aq->q) + ADMIN_WQE_HDR_LEN,
232*f3bdbd42SAbhijit Gangurde 		       src, stride_len);
233*f3bdbd42SAbhijit Gangurde 		ibdev_dbg(&dev->ibdev, "post admin prod %u (%u strides)\n",
234*f3bdbd42SAbhijit Gangurde 			  aq->q.prod, wr_strides);
235*f3bdbd42SAbhijit Gangurde 		print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1,
236*f3bdbd42SAbhijit Gangurde 				     ionic_queue_at_prod(&aq->q),
237*f3bdbd42SAbhijit Gangurde 				     BIT(aq->q.stride_log2), true);
238*f3bdbd42SAbhijit Gangurde 		ionic_queue_produce(&aq->q);
239*f3bdbd42SAbhijit Gangurde 
240*f3bdbd42SAbhijit Gangurde 		/* Remaining strides */
241*f3bdbd42SAbhijit Gangurde 		for (i = stride_len; i < src_len; i += stride_len) {
242*f3bdbd42SAbhijit Gangurde 			stride_len = ADMIN_WQE_STRIDE;
243*f3bdbd42SAbhijit Gangurde 
244*f3bdbd42SAbhijit Gangurde 			if (i + stride_len > src_len)
245*f3bdbd42SAbhijit Gangurde 				stride_len = src_len - i;
246*f3bdbd42SAbhijit Gangurde 
247*f3bdbd42SAbhijit Gangurde 			memcpy(ionic_queue_at_prod(&aq->q), src + i,
248*f3bdbd42SAbhijit Gangurde 			       stride_len);
249*f3bdbd42SAbhijit Gangurde 			print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1,
250*f3bdbd42SAbhijit Gangurde 					     ionic_queue_at_prod(&aq->q),
251*f3bdbd42SAbhijit Gangurde 					     BIT(aq->q.stride_log2), true);
252*f3bdbd42SAbhijit Gangurde 			ionic_queue_produce(&aq->q);
253*f3bdbd42SAbhijit Gangurde 		}
254*f3bdbd42SAbhijit Gangurde 	} while (!list_empty(&aq->wr_post));
255*f3bdbd42SAbhijit Gangurde 
256*f3bdbd42SAbhijit Gangurde 	if (old_prod != aq->q.prod)
257*f3bdbd42SAbhijit Gangurde 		ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.aq_qtype,
258*f3bdbd42SAbhijit Gangurde 				 ionic_queue_dbell_val(&aq->q));
259*f3bdbd42SAbhijit Gangurde }
260*f3bdbd42SAbhijit Gangurde 
261*f3bdbd42SAbhijit Gangurde static void ionic_admin_dwork(struct work_struct *ws)
262*f3bdbd42SAbhijit Gangurde {
263*f3bdbd42SAbhijit Gangurde 	struct ionic_ibdev *dev =
264*f3bdbd42SAbhijit Gangurde 		container_of(ws, struct ionic_ibdev, admin_dwork.work);
265*f3bdbd42SAbhijit Gangurde 	struct ionic_aq *aq, *bad_aq = NULL;
266*f3bdbd42SAbhijit Gangurde 	bool do_reschedule = false;
267*f3bdbd42SAbhijit Gangurde 	unsigned long irqflags;
268*f3bdbd42SAbhijit Gangurde 	bool do_reset = false;
269*f3bdbd42SAbhijit Gangurde 	u16 pos;
270*f3bdbd42SAbhijit Gangurde 	int i;
271*f3bdbd42SAbhijit Gangurde 
272*f3bdbd42SAbhijit Gangurde 	for (i = 0; i < dev->lif_cfg.aq_count; i++) {
273*f3bdbd42SAbhijit Gangurde 		aq = dev->aq_vec[i];
274*f3bdbd42SAbhijit Gangurde 
275*f3bdbd42SAbhijit Gangurde 		spin_lock_irqsave(&aq->lock, irqflags);
276*f3bdbd42SAbhijit Gangurde 
277*f3bdbd42SAbhijit Gangurde 		if (ionic_queue_empty(&aq->q))
278*f3bdbd42SAbhijit Gangurde 			goto next_aq;
279*f3bdbd42SAbhijit Gangurde 
280*f3bdbd42SAbhijit Gangurde 		/* Reschedule if any queue has outstanding work */
281*f3bdbd42SAbhijit Gangurde 		do_reschedule = true;
282*f3bdbd42SAbhijit Gangurde 
283*f3bdbd42SAbhijit Gangurde 		if (time_is_after_eq_jiffies(aq->stamp + IONIC_ADMIN_WARN))
284*f3bdbd42SAbhijit Gangurde 			/* Warning threshold not met, nothing to do */
285*f3bdbd42SAbhijit Gangurde 			goto next_aq;
286*f3bdbd42SAbhijit Gangurde 
287*f3bdbd42SAbhijit Gangurde 		/* See if polling now makes some progress */
288*f3bdbd42SAbhijit Gangurde 		pos = aq->q.cons;
289*f3bdbd42SAbhijit Gangurde 		ionic_admin_poll_locked(aq);
290*f3bdbd42SAbhijit Gangurde 		if (pos != aq->q.cons) {
291*f3bdbd42SAbhijit Gangurde 			ibdev_dbg(&dev->ibdev,
292*f3bdbd42SAbhijit Gangurde 				  "missed event for acq %d\n", aq->cqid);
293*f3bdbd42SAbhijit Gangurde 			goto next_aq;
294*f3bdbd42SAbhijit Gangurde 		}
295*f3bdbd42SAbhijit Gangurde 
296*f3bdbd42SAbhijit Gangurde 		if (time_is_after_eq_jiffies(aq->stamp +
297*f3bdbd42SAbhijit Gangurde 					     IONIC_ADMIN_TIMEOUT)) {
298*f3bdbd42SAbhijit Gangurde 			/* Timeout threshold not met */
299*f3bdbd42SAbhijit Gangurde 			ibdev_dbg(&dev->ibdev, "no progress after %ums\n",
300*f3bdbd42SAbhijit Gangurde 				  (u32)jiffies_to_msecs(jiffies - aq->stamp));
301*f3bdbd42SAbhijit Gangurde 			goto next_aq;
302*f3bdbd42SAbhijit Gangurde 		}
303*f3bdbd42SAbhijit Gangurde 
304*f3bdbd42SAbhijit Gangurde 		/* Queue timed out */
305*f3bdbd42SAbhijit Gangurde 		bad_aq = aq;
306*f3bdbd42SAbhijit Gangurde 		do_reset = true;
307*f3bdbd42SAbhijit Gangurde next_aq:
308*f3bdbd42SAbhijit Gangurde 		spin_unlock_irqrestore(&aq->lock, irqflags);
309*f3bdbd42SAbhijit Gangurde 	}
310*f3bdbd42SAbhijit Gangurde 
311*f3bdbd42SAbhijit Gangurde 	if (do_reset)
312*f3bdbd42SAbhijit Gangurde 		/* Reset RDMA lif on a timeout */
313*f3bdbd42SAbhijit Gangurde 		ionic_admin_timedout(bad_aq);
314*f3bdbd42SAbhijit Gangurde 	else if (do_reschedule)
315*f3bdbd42SAbhijit Gangurde 		/* Try to poll again later */
316*f3bdbd42SAbhijit Gangurde 		ionic_admin_reset_dwork(dev);
317*f3bdbd42SAbhijit Gangurde }
318*f3bdbd42SAbhijit Gangurde 
319*f3bdbd42SAbhijit Gangurde static void ionic_admin_work(struct work_struct *ws)
320*f3bdbd42SAbhijit Gangurde {
321*f3bdbd42SAbhijit Gangurde 	struct ionic_aq *aq = container_of(ws, struct ionic_aq, work);
322*f3bdbd42SAbhijit Gangurde 	unsigned long irqflags;
323*f3bdbd42SAbhijit Gangurde 
324*f3bdbd42SAbhijit Gangurde 	spin_lock_irqsave(&aq->lock, irqflags);
325*f3bdbd42SAbhijit Gangurde 	ionic_admin_poll_locked(aq);
326*f3bdbd42SAbhijit Gangurde 	spin_unlock_irqrestore(&aq->lock, irqflags);
327*f3bdbd42SAbhijit Gangurde }
328*f3bdbd42SAbhijit Gangurde 
329*f3bdbd42SAbhijit Gangurde static void ionic_admin_post_aq(struct ionic_aq *aq, struct ionic_admin_wr *wr)
330*f3bdbd42SAbhijit Gangurde {
331*f3bdbd42SAbhijit Gangurde 	unsigned long irqflags;
332*f3bdbd42SAbhijit Gangurde 	bool poll;
333*f3bdbd42SAbhijit Gangurde 
334*f3bdbd42SAbhijit Gangurde 	wr->status = IONIC_ADMIN_POSTED;
335*f3bdbd42SAbhijit Gangurde 	wr->aq = aq;
336*f3bdbd42SAbhijit Gangurde 
337*f3bdbd42SAbhijit Gangurde 	spin_lock_irqsave(&aq->lock, irqflags);
338*f3bdbd42SAbhijit Gangurde 	poll = list_empty(&aq->wr_post);
339*f3bdbd42SAbhijit Gangurde 	list_add(&wr->aq_ent, &aq->wr_post);
340*f3bdbd42SAbhijit Gangurde 	if (poll)
341*f3bdbd42SAbhijit Gangurde 		ionic_admin_poll_locked(aq);
342*f3bdbd42SAbhijit Gangurde 	spin_unlock_irqrestore(&aq->lock, irqflags);
343*f3bdbd42SAbhijit Gangurde }
344*f3bdbd42SAbhijit Gangurde 
345*f3bdbd42SAbhijit Gangurde void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr)
346*f3bdbd42SAbhijit Gangurde {
347*f3bdbd42SAbhijit Gangurde 	int aq_idx;
348*f3bdbd42SAbhijit Gangurde 
349*f3bdbd42SAbhijit Gangurde 	/* Use cpu id for the adminq selection */
350*f3bdbd42SAbhijit Gangurde 	aq_idx = raw_smp_processor_id() % dev->lif_cfg.aq_count;
351*f3bdbd42SAbhijit Gangurde 	ionic_admin_post_aq(dev->aq_vec[aq_idx], wr);
352*f3bdbd42SAbhijit Gangurde }
353*f3bdbd42SAbhijit Gangurde 
354*f3bdbd42SAbhijit Gangurde static void ionic_admin_cancel(struct ionic_admin_wr *wr)
355*f3bdbd42SAbhijit Gangurde {
356*f3bdbd42SAbhijit Gangurde 	struct ionic_aq *aq = wr->aq;
357*f3bdbd42SAbhijit Gangurde 	unsigned long irqflags;
358*f3bdbd42SAbhijit Gangurde 
359*f3bdbd42SAbhijit Gangurde 	spin_lock_irqsave(&aq->lock, irqflags);
360*f3bdbd42SAbhijit Gangurde 
361*f3bdbd42SAbhijit Gangurde 	if (!list_empty(&wr->aq_ent)) {
362*f3bdbd42SAbhijit Gangurde 		list_del(&wr->aq_ent);
363*f3bdbd42SAbhijit Gangurde 		if (wr->status != IONIC_ADMIN_POSTED)
364*f3bdbd42SAbhijit Gangurde 			aq->q_wr[wr->status].wr = NULL;
365*f3bdbd42SAbhijit Gangurde 	}
366*f3bdbd42SAbhijit Gangurde 
367*f3bdbd42SAbhijit Gangurde 	spin_unlock_irqrestore(&aq->lock, irqflags);
368*f3bdbd42SAbhijit Gangurde }
369*f3bdbd42SAbhijit Gangurde 
370*f3bdbd42SAbhijit Gangurde static int ionic_admin_busy_wait(struct ionic_admin_wr *wr)
371*f3bdbd42SAbhijit Gangurde {
372*f3bdbd42SAbhijit Gangurde 	struct ionic_aq *aq = wr->aq;
373*f3bdbd42SAbhijit Gangurde 	unsigned long irqflags;
374*f3bdbd42SAbhijit Gangurde 	int try_i;
375*f3bdbd42SAbhijit Gangurde 
376*f3bdbd42SAbhijit Gangurde 	for (try_i = 0; try_i < IONIC_ADMIN_BUSY_RETRY_COUNT; ++try_i) {
377*f3bdbd42SAbhijit Gangurde 		if (completion_done(&wr->work))
378*f3bdbd42SAbhijit Gangurde 			return 0;
379*f3bdbd42SAbhijit Gangurde 
380*f3bdbd42SAbhijit Gangurde 		mdelay(IONIC_ADMIN_BUSY_RETRY_MS);
381*f3bdbd42SAbhijit Gangurde 
382*f3bdbd42SAbhijit Gangurde 		spin_lock_irqsave(&aq->lock, irqflags);
383*f3bdbd42SAbhijit Gangurde 		ionic_admin_poll_locked(aq);
384*f3bdbd42SAbhijit Gangurde 		spin_unlock_irqrestore(&aq->lock, irqflags);
385*f3bdbd42SAbhijit Gangurde 	}
386*f3bdbd42SAbhijit Gangurde 
387*f3bdbd42SAbhijit Gangurde 	/*
388*f3bdbd42SAbhijit Gangurde 	 * we timed out. Initiate RDMA LIF reset and indicate
389*f3bdbd42SAbhijit Gangurde 	 * error to caller.
390*f3bdbd42SAbhijit Gangurde 	 */
391*f3bdbd42SAbhijit Gangurde 	ionic_admin_timedout(aq);
392*f3bdbd42SAbhijit Gangurde 	return -ETIMEDOUT;
393*f3bdbd42SAbhijit Gangurde }
394*f3bdbd42SAbhijit Gangurde 
395*f3bdbd42SAbhijit Gangurde int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr,
396*f3bdbd42SAbhijit Gangurde 		     enum ionic_admin_flags flags)
397*f3bdbd42SAbhijit Gangurde {
398*f3bdbd42SAbhijit Gangurde 	int rc, timo;
399*f3bdbd42SAbhijit Gangurde 
400*f3bdbd42SAbhijit Gangurde 	if (flags & IONIC_ADMIN_F_BUSYWAIT) {
401*f3bdbd42SAbhijit Gangurde 		/* Spin */
402*f3bdbd42SAbhijit Gangurde 		rc = ionic_admin_busy_wait(wr);
403*f3bdbd42SAbhijit Gangurde 	} else if (flags & IONIC_ADMIN_F_INTERRUPT) {
404*f3bdbd42SAbhijit Gangurde 		/*
405*f3bdbd42SAbhijit Gangurde 		 * Interruptible sleep, 1s timeout
406*f3bdbd42SAbhijit Gangurde 		 * This is used for commands which are safe for the caller
407*f3bdbd42SAbhijit Gangurde 		 * to clean up without killing and resetting the adminq.
408*f3bdbd42SAbhijit Gangurde 		 */
409*f3bdbd42SAbhijit Gangurde 		timo = wait_for_completion_interruptible_timeout(&wr->work,
410*f3bdbd42SAbhijit Gangurde 								 HZ);
411*f3bdbd42SAbhijit Gangurde 		if (timo > 0)
412*f3bdbd42SAbhijit Gangurde 			rc = 0;
413*f3bdbd42SAbhijit Gangurde 		else if (timo == 0)
414*f3bdbd42SAbhijit Gangurde 			rc = -ETIMEDOUT;
415*f3bdbd42SAbhijit Gangurde 		else
416*f3bdbd42SAbhijit Gangurde 			rc = timo;
417*f3bdbd42SAbhijit Gangurde 	} else {
418*f3bdbd42SAbhijit Gangurde 		/*
419*f3bdbd42SAbhijit Gangurde 		 * Uninterruptible sleep
420*f3bdbd42SAbhijit Gangurde 		 * This is used for commands which are NOT safe for the
421*f3bdbd42SAbhijit Gangurde 		 * caller to clean up. Cleanup must be handled by the
422*f3bdbd42SAbhijit Gangurde 		 * adminq kill and reset process so that host memory is
423*f3bdbd42SAbhijit Gangurde 		 * not corrupted by the device.
424*f3bdbd42SAbhijit Gangurde 		 */
425*f3bdbd42SAbhijit Gangurde 		wait_for_completion(&wr->work);
426*f3bdbd42SAbhijit Gangurde 		rc = 0;
427*f3bdbd42SAbhijit Gangurde 	}
428*f3bdbd42SAbhijit Gangurde 
429*f3bdbd42SAbhijit Gangurde 	if (rc) {
430*f3bdbd42SAbhijit Gangurde 		ibdev_warn(&dev->ibdev, "wait status %d\n", rc);
431*f3bdbd42SAbhijit Gangurde 		ionic_admin_cancel(wr);
432*f3bdbd42SAbhijit Gangurde 	} else if (wr->status == IONIC_ADMIN_KILLED) {
433*f3bdbd42SAbhijit Gangurde 		ibdev_dbg(&dev->ibdev, "admin killed\n");
434*f3bdbd42SAbhijit Gangurde 
435*f3bdbd42SAbhijit Gangurde 		/* No error if admin already killed during teardown */
436*f3bdbd42SAbhijit Gangurde 		rc = (flags & IONIC_ADMIN_F_TEARDOWN) ? 0 : -ENODEV;
437*f3bdbd42SAbhijit Gangurde 	} else if (ionic_v1_cqe_error(&wr->cqe)) {
438*f3bdbd42SAbhijit Gangurde 		ibdev_warn(&dev->ibdev, "opcode %u error %u\n",
439*f3bdbd42SAbhijit Gangurde 			   wr->wqe.op,
440*f3bdbd42SAbhijit Gangurde 			   be32_to_cpu(wr->cqe.status_length));
441*f3bdbd42SAbhijit Gangurde 		rc = -EINVAL;
442*f3bdbd42SAbhijit Gangurde 	}
443*f3bdbd42SAbhijit Gangurde 	return rc;
444*f3bdbd42SAbhijit Gangurde }
445*f3bdbd42SAbhijit Gangurde 
446*f3bdbd42SAbhijit Gangurde static int ionic_rdma_devcmd(struct ionic_ibdev *dev,
447*f3bdbd42SAbhijit Gangurde 			     struct ionic_admin_ctx *admin)
448*f3bdbd42SAbhijit Gangurde {
449*f3bdbd42SAbhijit Gangurde 	int rc;
450*f3bdbd42SAbhijit Gangurde 
451*f3bdbd42SAbhijit Gangurde 	rc = ionic_adminq_post_wait(dev->lif_cfg.lif, admin);
452*f3bdbd42SAbhijit Gangurde 	if (rc)
453*f3bdbd42SAbhijit Gangurde 		return rc;
454*f3bdbd42SAbhijit Gangurde 
455*f3bdbd42SAbhijit Gangurde 	return ionic_error_to_errno(admin->comp.comp.status);
456*f3bdbd42SAbhijit Gangurde }
457*f3bdbd42SAbhijit Gangurde 
458*f3bdbd42SAbhijit Gangurde int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev)
459*f3bdbd42SAbhijit Gangurde {
460*f3bdbd42SAbhijit Gangurde 	struct ionic_admin_ctx admin = {
461*f3bdbd42SAbhijit Gangurde 		.work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
462*f3bdbd42SAbhijit Gangurde 		.cmd.rdma_reset = {
463*f3bdbd42SAbhijit Gangurde 			.opcode = IONIC_CMD_RDMA_RESET_LIF,
464*f3bdbd42SAbhijit Gangurde 			.lif_index = cpu_to_le16(dev->lif_cfg.lif_index),
465*f3bdbd42SAbhijit Gangurde 		},
466*f3bdbd42SAbhijit Gangurde 	};
467*f3bdbd42SAbhijit Gangurde 
468*f3bdbd42SAbhijit Gangurde 	return ionic_rdma_devcmd(dev, &admin);
469*f3bdbd42SAbhijit Gangurde }
470*f3bdbd42SAbhijit Gangurde 
471*f3bdbd42SAbhijit Gangurde static int ionic_rdma_queue_devcmd(struct ionic_ibdev *dev,
472*f3bdbd42SAbhijit Gangurde 				   struct ionic_queue *q,
473*f3bdbd42SAbhijit Gangurde 				   u32 qid, u32 cid, u16 opcode)
474*f3bdbd42SAbhijit Gangurde {
475*f3bdbd42SAbhijit Gangurde 	struct ionic_admin_ctx admin = {
476*f3bdbd42SAbhijit Gangurde 		.work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
477*f3bdbd42SAbhijit Gangurde 		.cmd.rdma_queue = {
478*f3bdbd42SAbhijit Gangurde 			.opcode = opcode,
479*f3bdbd42SAbhijit Gangurde 			.lif_index = cpu_to_le16(dev->lif_cfg.lif_index),
480*f3bdbd42SAbhijit Gangurde 			.qid_ver = cpu_to_le32(qid),
481*f3bdbd42SAbhijit Gangurde 			.cid = cpu_to_le32(cid),
482*f3bdbd42SAbhijit Gangurde 			.dbid = cpu_to_le16(dev->lif_cfg.dbid),
483*f3bdbd42SAbhijit Gangurde 			.depth_log2 = q->depth_log2,
484*f3bdbd42SAbhijit Gangurde 			.stride_log2 = q->stride_log2,
485*f3bdbd42SAbhijit Gangurde 			.dma_addr = cpu_to_le64(q->dma),
486*f3bdbd42SAbhijit Gangurde 		},
487*f3bdbd42SAbhijit Gangurde 	};
488*f3bdbd42SAbhijit Gangurde 
489*f3bdbd42SAbhijit Gangurde 	return ionic_rdma_devcmd(dev, &admin);
490*f3bdbd42SAbhijit Gangurde }
491*f3bdbd42SAbhijit Gangurde 
492*f3bdbd42SAbhijit Gangurde static void ionic_rdma_admincq_comp(struct ib_cq *ibcq, void *cq_context)
493*f3bdbd42SAbhijit Gangurde {
494*f3bdbd42SAbhijit Gangurde 	struct ionic_aq *aq = cq_context;
495*f3bdbd42SAbhijit Gangurde 	unsigned long irqflags;
496*f3bdbd42SAbhijit Gangurde 
497*f3bdbd42SAbhijit Gangurde 	spin_lock_irqsave(&aq->lock, irqflags);
498*f3bdbd42SAbhijit Gangurde 	aq->armed = false;
499*f3bdbd42SAbhijit Gangurde 	if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED)
500*f3bdbd42SAbhijit Gangurde 		queue_work(ionic_evt_workq, &aq->work);
501*f3bdbd42SAbhijit Gangurde 	spin_unlock_irqrestore(&aq->lock, irqflags);
502*f3bdbd42SAbhijit Gangurde }
503*f3bdbd42SAbhijit Gangurde 
504*f3bdbd42SAbhijit Gangurde static void ionic_rdma_admincq_event(struct ib_event *event, void *cq_context)
505*f3bdbd42SAbhijit Gangurde {
506*f3bdbd42SAbhijit Gangurde 	struct ionic_aq *aq = cq_context;
507*f3bdbd42SAbhijit Gangurde 
508*f3bdbd42SAbhijit Gangurde 	ibdev_err(&aq->dev->ibdev, "admincq event %d\n", event->event);
509*f3bdbd42SAbhijit Gangurde }
510*f3bdbd42SAbhijit Gangurde 
511*f3bdbd42SAbhijit Gangurde static struct ionic_vcq *ionic_create_rdma_admincq(struct ionic_ibdev *dev,
512*f3bdbd42SAbhijit Gangurde 						   int comp_vector)
513*f3bdbd42SAbhijit Gangurde {
514*f3bdbd42SAbhijit Gangurde 	struct ib_cq_init_attr attr = {
515*f3bdbd42SAbhijit Gangurde 		.cqe = IONIC_AQ_DEPTH,
516*f3bdbd42SAbhijit Gangurde 		.comp_vector = comp_vector,
517*f3bdbd42SAbhijit Gangurde 	};
518*f3bdbd42SAbhijit Gangurde 	struct ionic_tbl_buf buf = {};
519*f3bdbd42SAbhijit Gangurde 	struct ionic_vcq *vcq;
520*f3bdbd42SAbhijit Gangurde 	struct ionic_cq *cq;
521*f3bdbd42SAbhijit Gangurde 	int rc;
522*f3bdbd42SAbhijit Gangurde 
523*f3bdbd42SAbhijit Gangurde 	vcq = kzalloc(sizeof(*vcq), GFP_KERNEL);
524*f3bdbd42SAbhijit Gangurde 	if (!vcq)
525*f3bdbd42SAbhijit Gangurde 		return ERR_PTR(-ENOMEM);
526*f3bdbd42SAbhijit Gangurde 
527*f3bdbd42SAbhijit Gangurde 	vcq->ibcq.device = &dev->ibdev;
528*f3bdbd42SAbhijit Gangurde 	vcq->ibcq.comp_handler = ionic_rdma_admincq_comp;
529*f3bdbd42SAbhijit Gangurde 	vcq->ibcq.event_handler = ionic_rdma_admincq_event;
530*f3bdbd42SAbhijit Gangurde 	atomic_set(&vcq->ibcq.usecnt, 0);
531*f3bdbd42SAbhijit Gangurde 
532*f3bdbd42SAbhijit Gangurde 	vcq->udma_mask = 1;
533*f3bdbd42SAbhijit Gangurde 	cq = &vcq->cq[0];
534*f3bdbd42SAbhijit Gangurde 
535*f3bdbd42SAbhijit Gangurde 	rc = ionic_create_cq_common(vcq, &buf, &attr, NULL, NULL,
536*f3bdbd42SAbhijit Gangurde 				    NULL, NULL, 0);
537*f3bdbd42SAbhijit Gangurde 	if (rc)
538*f3bdbd42SAbhijit Gangurde 		goto err_init;
539*f3bdbd42SAbhijit Gangurde 
540*f3bdbd42SAbhijit Gangurde 	rc = ionic_rdma_queue_devcmd(dev, &cq->q, cq->cqid, cq->eqid,
541*f3bdbd42SAbhijit Gangurde 				     IONIC_CMD_RDMA_CREATE_CQ);
542*f3bdbd42SAbhijit Gangurde 	if (rc)
543*f3bdbd42SAbhijit Gangurde 		goto err_cmd;
544*f3bdbd42SAbhijit Gangurde 
545*f3bdbd42SAbhijit Gangurde 	return vcq;
546*f3bdbd42SAbhijit Gangurde 
547*f3bdbd42SAbhijit Gangurde err_cmd:
548*f3bdbd42SAbhijit Gangurde 	ionic_destroy_cq_common(dev, cq);
549*f3bdbd42SAbhijit Gangurde err_init:
550*f3bdbd42SAbhijit Gangurde 	kfree(vcq);
551*f3bdbd42SAbhijit Gangurde 
552*f3bdbd42SAbhijit Gangurde 	return ERR_PTR(rc);
553*f3bdbd42SAbhijit Gangurde }
554*f3bdbd42SAbhijit Gangurde 
555*f3bdbd42SAbhijit Gangurde static struct ionic_aq *__ionic_create_rdma_adminq(struct ionic_ibdev *dev,
556*f3bdbd42SAbhijit Gangurde 						   u32 aqid, u32 cqid)
557*f3bdbd42SAbhijit Gangurde {
558*f3bdbd42SAbhijit Gangurde 	struct ionic_aq *aq;
559*f3bdbd42SAbhijit Gangurde 	int rc;
560*f3bdbd42SAbhijit Gangurde 
561*f3bdbd42SAbhijit Gangurde 	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
562*f3bdbd42SAbhijit Gangurde 	if (!aq)
563*f3bdbd42SAbhijit Gangurde 		return ERR_PTR(-ENOMEM);
564*f3bdbd42SAbhijit Gangurde 
565*f3bdbd42SAbhijit Gangurde 	atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED);
566*f3bdbd42SAbhijit Gangurde 	aq->dev = dev;
567*f3bdbd42SAbhijit Gangurde 	aq->aqid = aqid;
568*f3bdbd42SAbhijit Gangurde 	aq->cqid = cqid;
569*f3bdbd42SAbhijit Gangurde 	spin_lock_init(&aq->lock);
570*f3bdbd42SAbhijit Gangurde 
571*f3bdbd42SAbhijit Gangurde 	rc = ionic_queue_init(&aq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
572*f3bdbd42SAbhijit Gangurde 			      ADMIN_WQE_STRIDE);
573*f3bdbd42SAbhijit Gangurde 	if (rc)
574*f3bdbd42SAbhijit Gangurde 		goto err_q;
575*f3bdbd42SAbhijit Gangurde 
576*f3bdbd42SAbhijit Gangurde 	ionic_queue_dbell_init(&aq->q, aq->aqid);
577*f3bdbd42SAbhijit Gangurde 
578*f3bdbd42SAbhijit Gangurde 	aq->q_wr = kcalloc((u32)aq->q.mask + 1, sizeof(*aq->q_wr), GFP_KERNEL);
579*f3bdbd42SAbhijit Gangurde 	if (!aq->q_wr) {
580*f3bdbd42SAbhijit Gangurde 		rc = -ENOMEM;
581*f3bdbd42SAbhijit Gangurde 		goto err_wr;
582*f3bdbd42SAbhijit Gangurde 	}
583*f3bdbd42SAbhijit Gangurde 
584*f3bdbd42SAbhijit Gangurde 	INIT_LIST_HEAD(&aq->wr_prod);
585*f3bdbd42SAbhijit Gangurde 	INIT_LIST_HEAD(&aq->wr_post);
586*f3bdbd42SAbhijit Gangurde 
587*f3bdbd42SAbhijit Gangurde 	INIT_WORK(&aq->work, ionic_admin_work);
588*f3bdbd42SAbhijit Gangurde 	aq->armed = false;
589*f3bdbd42SAbhijit Gangurde 
590*f3bdbd42SAbhijit Gangurde 	return aq;
591*f3bdbd42SAbhijit Gangurde 
592*f3bdbd42SAbhijit Gangurde err_wr:
593*f3bdbd42SAbhijit Gangurde 	ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
594*f3bdbd42SAbhijit Gangurde err_q:
595*f3bdbd42SAbhijit Gangurde 	kfree(aq);
596*f3bdbd42SAbhijit Gangurde 
597*f3bdbd42SAbhijit Gangurde 	return ERR_PTR(rc);
598*f3bdbd42SAbhijit Gangurde }
599*f3bdbd42SAbhijit Gangurde 
600*f3bdbd42SAbhijit Gangurde static void __ionic_destroy_rdma_adminq(struct ionic_ibdev *dev,
601*f3bdbd42SAbhijit Gangurde 					struct ionic_aq *aq)
602*f3bdbd42SAbhijit Gangurde {
603*f3bdbd42SAbhijit Gangurde 	ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
604*f3bdbd42SAbhijit Gangurde 	kfree(aq);
605*f3bdbd42SAbhijit Gangurde }
606*f3bdbd42SAbhijit Gangurde 
607*f3bdbd42SAbhijit Gangurde static struct ionic_aq *ionic_create_rdma_adminq(struct ionic_ibdev *dev,
608*f3bdbd42SAbhijit Gangurde 						 u32 aqid, u32 cqid)
609*f3bdbd42SAbhijit Gangurde {
610*f3bdbd42SAbhijit Gangurde 	struct ionic_aq *aq;
611*f3bdbd42SAbhijit Gangurde 	int rc;
612*f3bdbd42SAbhijit Gangurde 
613*f3bdbd42SAbhijit Gangurde 	aq = __ionic_create_rdma_adminq(dev, aqid, cqid);
614*f3bdbd42SAbhijit Gangurde 	if (IS_ERR(aq))
615*f3bdbd42SAbhijit Gangurde 		return aq;
616*f3bdbd42SAbhijit Gangurde 
617*f3bdbd42SAbhijit Gangurde 	rc = ionic_rdma_queue_devcmd(dev, &aq->q, aq->aqid, aq->cqid,
618*f3bdbd42SAbhijit Gangurde 				     IONIC_CMD_RDMA_CREATE_ADMINQ);
619*f3bdbd42SAbhijit Gangurde 	if (rc)
620*f3bdbd42SAbhijit Gangurde 		goto err_cmd;
621*f3bdbd42SAbhijit Gangurde 
622*f3bdbd42SAbhijit Gangurde 	return aq;
623*f3bdbd42SAbhijit Gangurde 
624*f3bdbd42SAbhijit Gangurde err_cmd:
625*f3bdbd42SAbhijit Gangurde 	__ionic_destroy_rdma_adminq(dev, aq);
626*f3bdbd42SAbhijit Gangurde 
627*f3bdbd42SAbhijit Gangurde 	return ERR_PTR(rc);
628*f3bdbd42SAbhijit Gangurde }
629*f3bdbd42SAbhijit Gangurde 
630*f3bdbd42SAbhijit Gangurde static void ionic_kill_ibdev(struct ionic_ibdev *dev, bool fatal_path)
631*f3bdbd42SAbhijit Gangurde {
632*f3bdbd42SAbhijit Gangurde 	unsigned long irqflags;
633*f3bdbd42SAbhijit Gangurde 	bool do_flush = false;
634*f3bdbd42SAbhijit Gangurde 	int i;
635*f3bdbd42SAbhijit Gangurde 
636*f3bdbd42SAbhijit Gangurde 	/* Mark AQs for drain and flush the QPs while irq is disabled */
637*f3bdbd42SAbhijit Gangurde 	local_irq_save(irqflags);
638*f3bdbd42SAbhijit Gangurde 
639*f3bdbd42SAbhijit Gangurde 	/* Mark the admin queue, flushing at most once */
640*f3bdbd42SAbhijit Gangurde 	for (i = 0; i < dev->lif_cfg.aq_count; i++) {
641*f3bdbd42SAbhijit Gangurde 		struct ionic_aq *aq = dev->aq_vec[i];
642*f3bdbd42SAbhijit Gangurde 
643*f3bdbd42SAbhijit Gangurde 		spin_lock(&aq->lock);
644*f3bdbd42SAbhijit Gangurde 		if (atomic_read(&aq->admin_state) != IONIC_ADMIN_KILLED) {
645*f3bdbd42SAbhijit Gangurde 			atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED);
646*f3bdbd42SAbhijit Gangurde 			/* Flush incomplete admin commands */
647*f3bdbd42SAbhijit Gangurde 			ionic_admin_poll_locked(aq);
648*f3bdbd42SAbhijit Gangurde 			do_flush = true;
649*f3bdbd42SAbhijit Gangurde 		}
650*f3bdbd42SAbhijit Gangurde 		spin_unlock(&aq->lock);
651*f3bdbd42SAbhijit Gangurde 	}
652*f3bdbd42SAbhijit Gangurde 
653*f3bdbd42SAbhijit Gangurde 	local_irq_restore(irqflags);
654*f3bdbd42SAbhijit Gangurde 
655*f3bdbd42SAbhijit Gangurde 	/* Post a fatal event if requested */
656*f3bdbd42SAbhijit Gangurde 	if (fatal_path) {
657*f3bdbd42SAbhijit Gangurde 		struct ib_event ev;
658*f3bdbd42SAbhijit Gangurde 
659*f3bdbd42SAbhijit Gangurde 		ev.device = &dev->ibdev;
660*f3bdbd42SAbhijit Gangurde 		ev.element.port_num = 1;
661*f3bdbd42SAbhijit Gangurde 		ev.event = IB_EVENT_DEVICE_FATAL;
662*f3bdbd42SAbhijit Gangurde 
663*f3bdbd42SAbhijit Gangurde 		ib_dispatch_event(&ev);
664*f3bdbd42SAbhijit Gangurde 	}
665*f3bdbd42SAbhijit Gangurde 
666*f3bdbd42SAbhijit Gangurde 	atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED);
667*f3bdbd42SAbhijit Gangurde }
668*f3bdbd42SAbhijit Gangurde 
669*f3bdbd42SAbhijit Gangurde void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path)
670*f3bdbd42SAbhijit Gangurde {
671*f3bdbd42SAbhijit Gangurde 	enum ionic_admin_state old_state;
672*f3bdbd42SAbhijit Gangurde 	unsigned long irqflags = 0;
673*f3bdbd42SAbhijit Gangurde 	int i, rc;
674*f3bdbd42SAbhijit Gangurde 
675*f3bdbd42SAbhijit Gangurde 	if (!dev->aq_vec)
676*f3bdbd42SAbhijit Gangurde 		return;
677*f3bdbd42SAbhijit Gangurde 
678*f3bdbd42SAbhijit Gangurde 	/*
679*f3bdbd42SAbhijit Gangurde 	 * Admin queues are transitioned from active to paused to killed state.
680*f3bdbd42SAbhijit Gangurde 	 * When in paused state, no new commands are issued to the device,
681*f3bdbd42SAbhijit Gangurde 	 * nor are any completed locally. After resetting the lif, it will be
682*f3bdbd42SAbhijit Gangurde 	 * safe to resume the rdma admin queues in the killed state. Commands
683*f3bdbd42SAbhijit Gangurde 	 * will not be issued to the device, but will complete locally with status
684*f3bdbd42SAbhijit Gangurde 	 * IONIC_ADMIN_KILLED. Handling completion will ensure that creating or
685*f3bdbd42SAbhijit Gangurde 	 * modifying resources fails, but destroying resources succeeds.
686*f3bdbd42SAbhijit Gangurde 	 * If there was a failure resetting the lif using this strategy,
687*f3bdbd42SAbhijit Gangurde 	 * then the state of the device is unknown.
688*f3bdbd42SAbhijit Gangurde 	 */
689*f3bdbd42SAbhijit Gangurde 	old_state = atomic_cmpxchg(&dev->admin_state, IONIC_ADMIN_ACTIVE,
690*f3bdbd42SAbhijit Gangurde 				   IONIC_ADMIN_PAUSED);
691*f3bdbd42SAbhijit Gangurde 	if (old_state != IONIC_ADMIN_ACTIVE)
692*f3bdbd42SAbhijit Gangurde 		return;
693*f3bdbd42SAbhijit Gangurde 
694*f3bdbd42SAbhijit Gangurde 	/* Pause all the AQs */
695*f3bdbd42SAbhijit Gangurde 	local_irq_save(irqflags);
696*f3bdbd42SAbhijit Gangurde 	for (i = 0; i < dev->lif_cfg.aq_count; i++) {
697*f3bdbd42SAbhijit Gangurde 		struct ionic_aq *aq = dev->aq_vec[i];
698*f3bdbd42SAbhijit Gangurde 
699*f3bdbd42SAbhijit Gangurde 		spin_lock(&aq->lock);
700*f3bdbd42SAbhijit Gangurde 		/* pause rdma admin queues to reset lif */
701*f3bdbd42SAbhijit Gangurde 		if (atomic_read(&aq->admin_state) == IONIC_ADMIN_ACTIVE)
702*f3bdbd42SAbhijit Gangurde 			atomic_set(&aq->admin_state, IONIC_ADMIN_PAUSED);
703*f3bdbd42SAbhijit Gangurde 		spin_unlock(&aq->lock);
704*f3bdbd42SAbhijit Gangurde 	}
705*f3bdbd42SAbhijit Gangurde 	local_irq_restore(irqflags);
706*f3bdbd42SAbhijit Gangurde 
707*f3bdbd42SAbhijit Gangurde 	rc = ionic_rdma_reset_devcmd(dev);
708*f3bdbd42SAbhijit Gangurde 	if (unlikely(rc)) {
709*f3bdbd42SAbhijit Gangurde 		ibdev_err(&dev->ibdev, "failed to reset rdma %d\n", rc);
710*f3bdbd42SAbhijit Gangurde 		ionic_request_rdma_reset(dev->lif_cfg.lif);
711*f3bdbd42SAbhijit Gangurde 	}
712*f3bdbd42SAbhijit Gangurde 
713*f3bdbd42SAbhijit Gangurde 	ionic_kill_ibdev(dev, fatal_path);
714*f3bdbd42SAbhijit Gangurde }
715*f3bdbd42SAbhijit Gangurde 
716*f3bdbd42SAbhijit Gangurde static void ionic_reset_work(struct work_struct *ws)
717*f3bdbd42SAbhijit Gangurde {
718*f3bdbd42SAbhijit Gangurde 	struct ionic_ibdev *dev =
719*f3bdbd42SAbhijit Gangurde 		container_of(ws, struct ionic_ibdev, reset_work);
720*f3bdbd42SAbhijit Gangurde 
721*f3bdbd42SAbhijit Gangurde 	ionic_kill_rdma_admin(dev, true);
722*f3bdbd42SAbhijit Gangurde }
723*f3bdbd42SAbhijit Gangurde 
724*f3bdbd42SAbhijit Gangurde static bool ionic_next_eqe(struct ionic_eq *eq, struct ionic_v1_eqe *eqe)
725*f3bdbd42SAbhijit Gangurde {
726*f3bdbd42SAbhijit Gangurde 	struct ionic_v1_eqe *qeqe;
727*f3bdbd42SAbhijit Gangurde 	bool color;
728*f3bdbd42SAbhijit Gangurde 
729*f3bdbd42SAbhijit Gangurde 	qeqe = ionic_queue_at_prod(&eq->q);
730*f3bdbd42SAbhijit Gangurde 	color = ionic_v1_eqe_color(qeqe);
731*f3bdbd42SAbhijit Gangurde 
732*f3bdbd42SAbhijit Gangurde 	/* cons is color for eq */
733*f3bdbd42SAbhijit Gangurde 	if (eq->q.cons != color)
734*f3bdbd42SAbhijit Gangurde 		return false;
735*f3bdbd42SAbhijit Gangurde 
736*f3bdbd42SAbhijit Gangurde 	/* Prevent out-of-order reads of the EQE */
737*f3bdbd42SAbhijit Gangurde 	dma_rmb();
738*f3bdbd42SAbhijit Gangurde 
739*f3bdbd42SAbhijit Gangurde 	ibdev_dbg(&eq->dev->ibdev, "poll eq prod %u\n", eq->q.prod);
740*f3bdbd42SAbhijit Gangurde 	print_hex_dump_debug("eqe ", DUMP_PREFIX_OFFSET, 16, 1,
741*f3bdbd42SAbhijit Gangurde 			     qeqe, BIT(eq->q.stride_log2), true);
742*f3bdbd42SAbhijit Gangurde 	*eqe = *qeqe;
743*f3bdbd42SAbhijit Gangurde 
744*f3bdbd42SAbhijit Gangurde 	return true;
745*f3bdbd42SAbhijit Gangurde }
746*f3bdbd42SAbhijit Gangurde 
747*f3bdbd42SAbhijit Gangurde static void ionic_cq_event(struct ionic_ibdev *dev, u32 cqid, u8 code)
748*f3bdbd42SAbhijit Gangurde {
749*f3bdbd42SAbhijit Gangurde 	unsigned long irqflags;
750*f3bdbd42SAbhijit Gangurde 	struct ib_event ibev;
751*f3bdbd42SAbhijit Gangurde 	struct ionic_cq *cq;
752*f3bdbd42SAbhijit Gangurde 
753*f3bdbd42SAbhijit Gangurde 	xa_lock_irqsave(&dev->cq_tbl, irqflags);
754*f3bdbd42SAbhijit Gangurde 	cq = xa_load(&dev->cq_tbl, cqid);
755*f3bdbd42SAbhijit Gangurde 	if (cq)
756*f3bdbd42SAbhijit Gangurde 		kref_get(&cq->cq_kref);
757*f3bdbd42SAbhijit Gangurde 	xa_unlock_irqrestore(&dev->cq_tbl, irqflags);
758*f3bdbd42SAbhijit Gangurde 
759*f3bdbd42SAbhijit Gangurde 	if (!cq) {
760*f3bdbd42SAbhijit Gangurde 		ibdev_dbg(&dev->ibdev,
761*f3bdbd42SAbhijit Gangurde 			  "missing cqid %#x code %u\n", cqid, code);
762*f3bdbd42SAbhijit Gangurde 		return;
763*f3bdbd42SAbhijit Gangurde 	}
764*f3bdbd42SAbhijit Gangurde 
765*f3bdbd42SAbhijit Gangurde 	switch (code) {
766*f3bdbd42SAbhijit Gangurde 	case IONIC_V1_EQE_CQ_NOTIFY:
767*f3bdbd42SAbhijit Gangurde 		if (cq->vcq->ibcq.comp_handler)
768*f3bdbd42SAbhijit Gangurde 			cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq,
769*f3bdbd42SAbhijit Gangurde 						   cq->vcq->ibcq.cq_context);
770*f3bdbd42SAbhijit Gangurde 		break;
771*f3bdbd42SAbhijit Gangurde 
772*f3bdbd42SAbhijit Gangurde 	case IONIC_V1_EQE_CQ_ERR:
773*f3bdbd42SAbhijit Gangurde 		if (cq->vcq->ibcq.event_handler) {
774*f3bdbd42SAbhijit Gangurde 			ibev.event = IB_EVENT_CQ_ERR;
775*f3bdbd42SAbhijit Gangurde 			ibev.device = &dev->ibdev;
776*f3bdbd42SAbhijit Gangurde 			ibev.element.cq = &cq->vcq->ibcq;
777*f3bdbd42SAbhijit Gangurde 
778*f3bdbd42SAbhijit Gangurde 			cq->vcq->ibcq.event_handler(&ibev,
779*f3bdbd42SAbhijit Gangurde 						    cq->vcq->ibcq.cq_context);
780*f3bdbd42SAbhijit Gangurde 		}
781*f3bdbd42SAbhijit Gangurde 		break;
782*f3bdbd42SAbhijit Gangurde 
783*f3bdbd42SAbhijit Gangurde 	default:
784*f3bdbd42SAbhijit Gangurde 		ibdev_dbg(&dev->ibdev,
785*f3bdbd42SAbhijit Gangurde 			  "unrecognized cqid %#x code %u\n", cqid, code);
786*f3bdbd42SAbhijit Gangurde 		break;
787*f3bdbd42SAbhijit Gangurde 	}
788*f3bdbd42SAbhijit Gangurde 
789*f3bdbd42SAbhijit Gangurde 	kref_put(&cq->cq_kref, ionic_cq_complete);
790*f3bdbd42SAbhijit Gangurde }
791*f3bdbd42SAbhijit Gangurde 
792*f3bdbd42SAbhijit Gangurde static u16 ionic_poll_eq(struct ionic_eq *eq, u16 budget)
793*f3bdbd42SAbhijit Gangurde {
794*f3bdbd42SAbhijit Gangurde 	struct ionic_ibdev *dev = eq->dev;
795*f3bdbd42SAbhijit Gangurde 	struct ionic_v1_eqe eqe;
796*f3bdbd42SAbhijit Gangurde 	u16 npolled = 0;
797*f3bdbd42SAbhijit Gangurde 	u8 type, code;
798*f3bdbd42SAbhijit Gangurde 	u32 evt, qid;
799*f3bdbd42SAbhijit Gangurde 
800*f3bdbd42SAbhijit Gangurde 	while (npolled < budget) {
801*f3bdbd42SAbhijit Gangurde 		if (!ionic_next_eqe(eq, &eqe))
802*f3bdbd42SAbhijit Gangurde 			break;
803*f3bdbd42SAbhijit Gangurde 
804*f3bdbd42SAbhijit Gangurde 		ionic_queue_produce(&eq->q);
805*f3bdbd42SAbhijit Gangurde 
806*f3bdbd42SAbhijit Gangurde 		/* cons is color for eq */
807*f3bdbd42SAbhijit Gangurde 		eq->q.cons = ionic_color_wrap(eq->q.prod, eq->q.cons);
808*f3bdbd42SAbhijit Gangurde 
809*f3bdbd42SAbhijit Gangurde 		++npolled;
810*f3bdbd42SAbhijit Gangurde 
811*f3bdbd42SAbhijit Gangurde 		evt = ionic_v1_eqe_evt(&eqe);
812*f3bdbd42SAbhijit Gangurde 		type = ionic_v1_eqe_evt_type(evt);
813*f3bdbd42SAbhijit Gangurde 		code = ionic_v1_eqe_evt_code(evt);
814*f3bdbd42SAbhijit Gangurde 		qid = ionic_v1_eqe_evt_qid(evt);
815*f3bdbd42SAbhijit Gangurde 
816*f3bdbd42SAbhijit Gangurde 		switch (type) {
817*f3bdbd42SAbhijit Gangurde 		case IONIC_V1_EQE_TYPE_CQ:
818*f3bdbd42SAbhijit Gangurde 			ionic_cq_event(dev, qid, code);
819*f3bdbd42SAbhijit Gangurde 			break;
820*f3bdbd42SAbhijit Gangurde 
821*f3bdbd42SAbhijit Gangurde 		default:
822*f3bdbd42SAbhijit Gangurde 			ibdev_dbg(&dev->ibdev,
823*f3bdbd42SAbhijit Gangurde 				  "unknown event %#x type %u\n", evt, type);
824*f3bdbd42SAbhijit Gangurde 		}
825*f3bdbd42SAbhijit Gangurde 	}
826*f3bdbd42SAbhijit Gangurde 
827*f3bdbd42SAbhijit Gangurde 	return npolled;
828*f3bdbd42SAbhijit Gangurde }
829*f3bdbd42SAbhijit Gangurde 
830*f3bdbd42SAbhijit Gangurde static void ionic_poll_eq_work(struct work_struct *work)
831*f3bdbd42SAbhijit Gangurde {
832*f3bdbd42SAbhijit Gangurde 	struct ionic_eq *eq = container_of(work, struct ionic_eq, work);
833*f3bdbd42SAbhijit Gangurde 	u32 npolled;
834*f3bdbd42SAbhijit Gangurde 
835*f3bdbd42SAbhijit Gangurde 	if (unlikely(!eq->enable) || WARN_ON(eq->armed))
836*f3bdbd42SAbhijit Gangurde 		return;
837*f3bdbd42SAbhijit Gangurde 
838*f3bdbd42SAbhijit Gangurde 	npolled = ionic_poll_eq(eq, IONIC_EQ_WORK_BUDGET);
839*f3bdbd42SAbhijit Gangurde 	if (npolled == IONIC_EQ_WORK_BUDGET) {
840*f3bdbd42SAbhijit Gangurde 		ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
841*f3bdbd42SAbhijit Gangurde 				   npolled, 0);
842*f3bdbd42SAbhijit Gangurde 		queue_work(ionic_evt_workq, &eq->work);
843*f3bdbd42SAbhijit Gangurde 	} else {
844*f3bdbd42SAbhijit Gangurde 		xchg(&eq->armed, true);
845*f3bdbd42SAbhijit Gangurde 		ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
846*f3bdbd42SAbhijit Gangurde 				   0, IONIC_INTR_CRED_UNMASK);
847*f3bdbd42SAbhijit Gangurde 	}
848*f3bdbd42SAbhijit Gangurde }
849*f3bdbd42SAbhijit Gangurde 
850*f3bdbd42SAbhijit Gangurde static irqreturn_t ionic_poll_eq_isr(int irq, void *eqptr)
851*f3bdbd42SAbhijit Gangurde {
852*f3bdbd42SAbhijit Gangurde 	struct ionic_eq *eq = eqptr;
853*f3bdbd42SAbhijit Gangurde 	bool was_armed;
854*f3bdbd42SAbhijit Gangurde 	u32 npolled;
855*f3bdbd42SAbhijit Gangurde 
856*f3bdbd42SAbhijit Gangurde 	was_armed = xchg(&eq->armed, false);
857*f3bdbd42SAbhijit Gangurde 
858*f3bdbd42SAbhijit Gangurde 	if (unlikely(!eq->enable) || !was_armed)
859*f3bdbd42SAbhijit Gangurde 		return IRQ_HANDLED;
860*f3bdbd42SAbhijit Gangurde 
861*f3bdbd42SAbhijit Gangurde 	npolled = ionic_poll_eq(eq, IONIC_EQ_ISR_BUDGET);
862*f3bdbd42SAbhijit Gangurde 	if (npolled == IONIC_EQ_ISR_BUDGET) {
863*f3bdbd42SAbhijit Gangurde 		ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
864*f3bdbd42SAbhijit Gangurde 				   npolled, 0);
865*f3bdbd42SAbhijit Gangurde 		queue_work(ionic_evt_workq, &eq->work);
866*f3bdbd42SAbhijit Gangurde 	} else {
867*f3bdbd42SAbhijit Gangurde 		xchg(&eq->armed, true);
868*f3bdbd42SAbhijit Gangurde 		ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
869*f3bdbd42SAbhijit Gangurde 				   0, IONIC_INTR_CRED_UNMASK);
870*f3bdbd42SAbhijit Gangurde 	}
871*f3bdbd42SAbhijit Gangurde 
872*f3bdbd42SAbhijit Gangurde 	return IRQ_HANDLED;
873*f3bdbd42SAbhijit Gangurde }
874*f3bdbd42SAbhijit Gangurde 
875*f3bdbd42SAbhijit Gangurde static struct ionic_eq *ionic_create_eq(struct ionic_ibdev *dev, int eqid)
876*f3bdbd42SAbhijit Gangurde {
877*f3bdbd42SAbhijit Gangurde 	struct ionic_intr_info intr_obj = { };
878*f3bdbd42SAbhijit Gangurde 	struct ionic_eq *eq;
879*f3bdbd42SAbhijit Gangurde 	int rc;
880*f3bdbd42SAbhijit Gangurde 
881*f3bdbd42SAbhijit Gangurde 	eq = kzalloc(sizeof(*eq), GFP_KERNEL);
882*f3bdbd42SAbhijit Gangurde 	if (!eq)
883*f3bdbd42SAbhijit Gangurde 		return ERR_PTR(-ENOMEM);
884*f3bdbd42SAbhijit Gangurde 
885*f3bdbd42SAbhijit Gangurde 	eq->dev = dev;
886*f3bdbd42SAbhijit Gangurde 
887*f3bdbd42SAbhijit Gangurde 	rc = ionic_queue_init(&eq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
888*f3bdbd42SAbhijit Gangurde 			      sizeof(struct ionic_v1_eqe));
889*f3bdbd42SAbhijit Gangurde 	if (rc)
890*f3bdbd42SAbhijit Gangurde 		goto err_q;
891*f3bdbd42SAbhijit Gangurde 
892*f3bdbd42SAbhijit Gangurde 	eq->eqid = eqid;
893*f3bdbd42SAbhijit Gangurde 
894*f3bdbd42SAbhijit Gangurde 	eq->armed = true;
895*f3bdbd42SAbhijit Gangurde 	eq->enable = false;
896*f3bdbd42SAbhijit Gangurde 	INIT_WORK(&eq->work, ionic_poll_eq_work);
897*f3bdbd42SAbhijit Gangurde 
898*f3bdbd42SAbhijit Gangurde 	rc = ionic_intr_alloc(dev->lif_cfg.lif, &intr_obj);
899*f3bdbd42SAbhijit Gangurde 	if (rc < 0)
900*f3bdbd42SAbhijit Gangurde 		goto err_intr;
901*f3bdbd42SAbhijit Gangurde 
902*f3bdbd42SAbhijit Gangurde 	eq->irq = intr_obj.vector;
903*f3bdbd42SAbhijit Gangurde 	eq->intr = intr_obj.index;
904*f3bdbd42SAbhijit Gangurde 
905*f3bdbd42SAbhijit Gangurde 	ionic_queue_dbell_init(&eq->q, eq->eqid);
906*f3bdbd42SAbhijit Gangurde 
907*f3bdbd42SAbhijit Gangurde 	/* cons is color for eq */
908*f3bdbd42SAbhijit Gangurde 	eq->q.cons = true;
909*f3bdbd42SAbhijit Gangurde 
910*f3bdbd42SAbhijit Gangurde 	snprintf(eq->name, sizeof(eq->name), "%s-%d-%d-eq",
911*f3bdbd42SAbhijit Gangurde 		 "ionr", dev->lif_cfg.lif_index, eq->eqid);
912*f3bdbd42SAbhijit Gangurde 
913*f3bdbd42SAbhijit Gangurde 	ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET);
914*f3bdbd42SAbhijit Gangurde 	ionic_intr_mask_assert(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET);
915*f3bdbd42SAbhijit Gangurde 	ionic_intr_coal_init(dev->lif_cfg.intr_ctrl, eq->intr, 0);
916*f3bdbd42SAbhijit Gangurde 	ionic_intr_clean(dev->lif_cfg.intr_ctrl, eq->intr);
917*f3bdbd42SAbhijit Gangurde 
918*f3bdbd42SAbhijit Gangurde 	eq->enable = true;
919*f3bdbd42SAbhijit Gangurde 
920*f3bdbd42SAbhijit Gangurde 	rc = request_irq(eq->irq, ionic_poll_eq_isr, 0, eq->name, eq);
921*f3bdbd42SAbhijit Gangurde 	if (rc)
922*f3bdbd42SAbhijit Gangurde 		goto err_irq;
923*f3bdbd42SAbhijit Gangurde 
924*f3bdbd42SAbhijit Gangurde 	rc = ionic_rdma_queue_devcmd(dev, &eq->q, eq->eqid, eq->intr,
925*f3bdbd42SAbhijit Gangurde 				     IONIC_CMD_RDMA_CREATE_EQ);
926*f3bdbd42SAbhijit Gangurde 	if (rc)
927*f3bdbd42SAbhijit Gangurde 		goto err_cmd;
928*f3bdbd42SAbhijit Gangurde 
929*f3bdbd42SAbhijit Gangurde 	ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_CLEAR);
930*f3bdbd42SAbhijit Gangurde 
931*f3bdbd42SAbhijit Gangurde 	return eq;
932*f3bdbd42SAbhijit Gangurde 
933*f3bdbd42SAbhijit Gangurde err_cmd:
934*f3bdbd42SAbhijit Gangurde 	eq->enable = false;
935*f3bdbd42SAbhijit Gangurde 	free_irq(eq->irq, eq);
936*f3bdbd42SAbhijit Gangurde 	flush_work(&eq->work);
937*f3bdbd42SAbhijit Gangurde err_irq:
938*f3bdbd42SAbhijit Gangurde 	ionic_intr_free(dev->lif_cfg.lif, eq->intr);
939*f3bdbd42SAbhijit Gangurde err_intr:
940*f3bdbd42SAbhijit Gangurde 	ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
941*f3bdbd42SAbhijit Gangurde err_q:
942*f3bdbd42SAbhijit Gangurde 	kfree(eq);
943*f3bdbd42SAbhijit Gangurde 
944*f3bdbd42SAbhijit Gangurde 	return ERR_PTR(rc);
945*f3bdbd42SAbhijit Gangurde }
946*f3bdbd42SAbhijit Gangurde 
947*f3bdbd42SAbhijit Gangurde static void ionic_destroy_eq(struct ionic_eq *eq)
948*f3bdbd42SAbhijit Gangurde {
949*f3bdbd42SAbhijit Gangurde 	struct ionic_ibdev *dev = eq->dev;
950*f3bdbd42SAbhijit Gangurde 
951*f3bdbd42SAbhijit Gangurde 	eq->enable = false;
952*f3bdbd42SAbhijit Gangurde 	free_irq(eq->irq, eq);
953*f3bdbd42SAbhijit Gangurde 	flush_work(&eq->work);
954*f3bdbd42SAbhijit Gangurde 
955*f3bdbd42SAbhijit Gangurde 	ionic_intr_free(dev->lif_cfg.lif, eq->intr);
956*f3bdbd42SAbhijit Gangurde 	ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
957*f3bdbd42SAbhijit Gangurde 	kfree(eq);
958*f3bdbd42SAbhijit Gangurde }
959*f3bdbd42SAbhijit Gangurde 
960*f3bdbd42SAbhijit Gangurde int ionic_create_rdma_admin(struct ionic_ibdev *dev)
961*f3bdbd42SAbhijit Gangurde {
962*f3bdbd42SAbhijit Gangurde 	int eq_i = 0, aq_i = 0, rc = 0;
963*f3bdbd42SAbhijit Gangurde 	struct ionic_vcq *vcq;
964*f3bdbd42SAbhijit Gangurde 	struct ionic_aq *aq;
965*f3bdbd42SAbhijit Gangurde 	struct ionic_eq *eq;
966*f3bdbd42SAbhijit Gangurde 
967*f3bdbd42SAbhijit Gangurde 	dev->eq_vec = NULL;
968*f3bdbd42SAbhijit Gangurde 	dev->aq_vec = NULL;
969*f3bdbd42SAbhijit Gangurde 
970*f3bdbd42SAbhijit Gangurde 	INIT_WORK(&dev->reset_work, ionic_reset_work);
971*f3bdbd42SAbhijit Gangurde 	INIT_DELAYED_WORK(&dev->admin_dwork, ionic_admin_dwork);
972*f3bdbd42SAbhijit Gangurde 	atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED);
973*f3bdbd42SAbhijit Gangurde 
974*f3bdbd42SAbhijit Gangurde 	if (dev->lif_cfg.aq_count > IONIC_AQ_COUNT) {
975*f3bdbd42SAbhijit Gangurde 		ibdev_dbg(&dev->ibdev, "limiting adminq count to %d\n",
976*f3bdbd42SAbhijit Gangurde 			  IONIC_AQ_COUNT);
977*f3bdbd42SAbhijit Gangurde 		dev->lif_cfg.aq_count = IONIC_AQ_COUNT;
978*f3bdbd42SAbhijit Gangurde 	}
979*f3bdbd42SAbhijit Gangurde 
980*f3bdbd42SAbhijit Gangurde 	if (dev->lif_cfg.eq_count > IONIC_EQ_COUNT) {
981*f3bdbd42SAbhijit Gangurde 		dev_dbg(&dev->ibdev.dev, "limiting eventq count to %d\n",
982*f3bdbd42SAbhijit Gangurde 			IONIC_EQ_COUNT);
983*f3bdbd42SAbhijit Gangurde 		dev->lif_cfg.eq_count = IONIC_EQ_COUNT;
984*f3bdbd42SAbhijit Gangurde 	}
985*f3bdbd42SAbhijit Gangurde 
986*f3bdbd42SAbhijit Gangurde 	/* need at least two eq and one aq */
987*f3bdbd42SAbhijit Gangurde 	if (dev->lif_cfg.eq_count < IONIC_EQ_COUNT_MIN ||
988*f3bdbd42SAbhijit Gangurde 	    dev->lif_cfg.aq_count < IONIC_AQ_COUNT_MIN) {
989*f3bdbd42SAbhijit Gangurde 		rc = -EINVAL;
990*f3bdbd42SAbhijit Gangurde 		goto out;
991*f3bdbd42SAbhijit Gangurde 	}
992*f3bdbd42SAbhijit Gangurde 
993*f3bdbd42SAbhijit Gangurde 	dev->eq_vec = kmalloc_array(dev->lif_cfg.eq_count, sizeof(*dev->eq_vec),
994*f3bdbd42SAbhijit Gangurde 				    GFP_KERNEL);
995*f3bdbd42SAbhijit Gangurde 	if (!dev->eq_vec) {
996*f3bdbd42SAbhijit Gangurde 		rc = -ENOMEM;
997*f3bdbd42SAbhijit Gangurde 		goto out;
998*f3bdbd42SAbhijit Gangurde 	}
999*f3bdbd42SAbhijit Gangurde 
1000*f3bdbd42SAbhijit Gangurde 	for (eq_i = 0; eq_i < dev->lif_cfg.eq_count; ++eq_i) {
1001*f3bdbd42SAbhijit Gangurde 		eq = ionic_create_eq(dev, eq_i + dev->lif_cfg.eq_base);
1002*f3bdbd42SAbhijit Gangurde 		if (IS_ERR(eq)) {
1003*f3bdbd42SAbhijit Gangurde 			rc = PTR_ERR(eq);
1004*f3bdbd42SAbhijit Gangurde 
1005*f3bdbd42SAbhijit Gangurde 			if (eq_i < IONIC_EQ_COUNT_MIN) {
1006*f3bdbd42SAbhijit Gangurde 				ibdev_err(&dev->ibdev,
1007*f3bdbd42SAbhijit Gangurde 					  "fail create eq %d\n", rc);
1008*f3bdbd42SAbhijit Gangurde 				goto out;
1009*f3bdbd42SAbhijit Gangurde 			}
1010*f3bdbd42SAbhijit Gangurde 
1011*f3bdbd42SAbhijit Gangurde 			/* ok, just fewer eq than device supports */
1012*f3bdbd42SAbhijit Gangurde 			ibdev_dbg(&dev->ibdev, "eq count %d want %d rc %d\n",
1013*f3bdbd42SAbhijit Gangurde 				  eq_i, dev->lif_cfg.eq_count, rc);
1014*f3bdbd42SAbhijit Gangurde 
1015*f3bdbd42SAbhijit Gangurde 			rc = 0;
1016*f3bdbd42SAbhijit Gangurde 			break;
1017*f3bdbd42SAbhijit Gangurde 		}
1018*f3bdbd42SAbhijit Gangurde 
1019*f3bdbd42SAbhijit Gangurde 		dev->eq_vec[eq_i] = eq;
1020*f3bdbd42SAbhijit Gangurde 	}
1021*f3bdbd42SAbhijit Gangurde 
1022*f3bdbd42SAbhijit Gangurde 	dev->lif_cfg.eq_count = eq_i;
1023*f3bdbd42SAbhijit Gangurde 
1024*f3bdbd42SAbhijit Gangurde 	dev->aq_vec = kmalloc_array(dev->lif_cfg.aq_count, sizeof(*dev->aq_vec),
1025*f3bdbd42SAbhijit Gangurde 				    GFP_KERNEL);
1026*f3bdbd42SAbhijit Gangurde 	if (!dev->aq_vec) {
1027*f3bdbd42SAbhijit Gangurde 		rc = -ENOMEM;
1028*f3bdbd42SAbhijit Gangurde 		goto out;
1029*f3bdbd42SAbhijit Gangurde 	}
1030*f3bdbd42SAbhijit Gangurde 
1031*f3bdbd42SAbhijit Gangurde 	/* Create one CQ per AQ */
1032*f3bdbd42SAbhijit Gangurde 	for (aq_i = 0; aq_i < dev->lif_cfg.aq_count; ++aq_i) {
1033*f3bdbd42SAbhijit Gangurde 		vcq = ionic_create_rdma_admincq(dev, aq_i % eq_i);
1034*f3bdbd42SAbhijit Gangurde 		if (IS_ERR(vcq)) {
1035*f3bdbd42SAbhijit Gangurde 			rc = PTR_ERR(vcq);
1036*f3bdbd42SAbhijit Gangurde 
1037*f3bdbd42SAbhijit Gangurde 			if (!aq_i) {
1038*f3bdbd42SAbhijit Gangurde 				ibdev_err(&dev->ibdev,
1039*f3bdbd42SAbhijit Gangurde 					  "failed to create acq %d\n", rc);
1040*f3bdbd42SAbhijit Gangurde 				goto out;
1041*f3bdbd42SAbhijit Gangurde 			}
1042*f3bdbd42SAbhijit Gangurde 
1043*f3bdbd42SAbhijit Gangurde 			/* ok, just fewer adminq than device supports */
1044*f3bdbd42SAbhijit Gangurde 			ibdev_dbg(&dev->ibdev, "acq count %d want %d rc %d\n",
1045*f3bdbd42SAbhijit Gangurde 				  aq_i, dev->lif_cfg.aq_count, rc);
1046*f3bdbd42SAbhijit Gangurde 			break;
1047*f3bdbd42SAbhijit Gangurde 		}
1048*f3bdbd42SAbhijit Gangurde 
1049*f3bdbd42SAbhijit Gangurde 		aq = ionic_create_rdma_adminq(dev, aq_i + dev->lif_cfg.aq_base,
1050*f3bdbd42SAbhijit Gangurde 					      vcq->cq[0].cqid);
1051*f3bdbd42SAbhijit Gangurde 		if (IS_ERR(aq)) {
1052*f3bdbd42SAbhijit Gangurde 			/* Clean up the dangling CQ */
1053*f3bdbd42SAbhijit Gangurde 			ionic_destroy_cq_common(dev, &vcq->cq[0]);
1054*f3bdbd42SAbhijit Gangurde 			kfree(vcq);
1055*f3bdbd42SAbhijit Gangurde 
1056*f3bdbd42SAbhijit Gangurde 			rc = PTR_ERR(aq);
1057*f3bdbd42SAbhijit Gangurde 
1058*f3bdbd42SAbhijit Gangurde 			if (!aq_i) {
1059*f3bdbd42SAbhijit Gangurde 				ibdev_err(&dev->ibdev,
1060*f3bdbd42SAbhijit Gangurde 					  "failed to create aq %d\n", rc);
1061*f3bdbd42SAbhijit Gangurde 				goto out;
1062*f3bdbd42SAbhijit Gangurde 			}
1063*f3bdbd42SAbhijit Gangurde 
1064*f3bdbd42SAbhijit Gangurde 			/* ok, just fewer adminq than device supports */
1065*f3bdbd42SAbhijit Gangurde 			ibdev_dbg(&dev->ibdev, "aq count %d want %d rc %d\n",
1066*f3bdbd42SAbhijit Gangurde 				  aq_i, dev->lif_cfg.aq_count, rc);
1067*f3bdbd42SAbhijit Gangurde 			break;
1068*f3bdbd42SAbhijit Gangurde 		}
1069*f3bdbd42SAbhijit Gangurde 
1070*f3bdbd42SAbhijit Gangurde 		vcq->ibcq.cq_context = aq;
1071*f3bdbd42SAbhijit Gangurde 		aq->vcq = vcq;
1072*f3bdbd42SAbhijit Gangurde 
1073*f3bdbd42SAbhijit Gangurde 		atomic_set(&aq->admin_state, IONIC_ADMIN_ACTIVE);
1074*f3bdbd42SAbhijit Gangurde 		dev->aq_vec[aq_i] = aq;
1075*f3bdbd42SAbhijit Gangurde 	}
1076*f3bdbd42SAbhijit Gangurde 
1077*f3bdbd42SAbhijit Gangurde 	atomic_set(&dev->admin_state, IONIC_ADMIN_ACTIVE);
1078*f3bdbd42SAbhijit Gangurde out:
1079*f3bdbd42SAbhijit Gangurde 	dev->lif_cfg.eq_count = eq_i;
1080*f3bdbd42SAbhijit Gangurde 	dev->lif_cfg.aq_count = aq_i;
1081*f3bdbd42SAbhijit Gangurde 
1082*f3bdbd42SAbhijit Gangurde 	return rc;
1083*f3bdbd42SAbhijit Gangurde }
1084*f3bdbd42SAbhijit Gangurde 
1085*f3bdbd42SAbhijit Gangurde void ionic_destroy_rdma_admin(struct ionic_ibdev *dev)
1086*f3bdbd42SAbhijit Gangurde {
1087*f3bdbd42SAbhijit Gangurde 	struct ionic_vcq *vcq;
1088*f3bdbd42SAbhijit Gangurde 	struct ionic_aq *aq;
1089*f3bdbd42SAbhijit Gangurde 	struct ionic_eq *eq;
1090*f3bdbd42SAbhijit Gangurde 
1091*f3bdbd42SAbhijit Gangurde 	/*
1092*f3bdbd42SAbhijit Gangurde 	 * Killing the admin before destroy makes sure all admin and
1093*f3bdbd42SAbhijit Gangurde 	 * completions are flushed. admin_state = IONIC_ADMIN_KILLED
1094*f3bdbd42SAbhijit Gangurde 	 * stops queueing up further works.
1095*f3bdbd42SAbhijit Gangurde 	 */
1096*f3bdbd42SAbhijit Gangurde 	cancel_delayed_work_sync(&dev->admin_dwork);
1097*f3bdbd42SAbhijit Gangurde 	cancel_work_sync(&dev->reset_work);
1098*f3bdbd42SAbhijit Gangurde 
1099*f3bdbd42SAbhijit Gangurde 	if (dev->aq_vec) {
1100*f3bdbd42SAbhijit Gangurde 		while (dev->lif_cfg.aq_count > 0) {
1101*f3bdbd42SAbhijit Gangurde 			aq = dev->aq_vec[--dev->lif_cfg.aq_count];
1102*f3bdbd42SAbhijit Gangurde 			vcq = aq->vcq;
1103*f3bdbd42SAbhijit Gangurde 
1104*f3bdbd42SAbhijit Gangurde 			cancel_work_sync(&aq->work);
1105*f3bdbd42SAbhijit Gangurde 
1106*f3bdbd42SAbhijit Gangurde 			__ionic_destroy_rdma_adminq(dev, aq);
1107*f3bdbd42SAbhijit Gangurde 			if (vcq) {
1108*f3bdbd42SAbhijit Gangurde 				ionic_destroy_cq_common(dev, &vcq->cq[0]);
1109*f3bdbd42SAbhijit Gangurde 				kfree(vcq);
1110*f3bdbd42SAbhijit Gangurde 			}
1111*f3bdbd42SAbhijit Gangurde 		}
1112*f3bdbd42SAbhijit Gangurde 
1113*f3bdbd42SAbhijit Gangurde 		kfree(dev->aq_vec);
1114*f3bdbd42SAbhijit Gangurde 	}
1115*f3bdbd42SAbhijit Gangurde 
1116*f3bdbd42SAbhijit Gangurde 	if (dev->eq_vec) {
1117*f3bdbd42SAbhijit Gangurde 		while (dev->lif_cfg.eq_count > 0) {
1118*f3bdbd42SAbhijit Gangurde 			eq = dev->eq_vec[--dev->lif_cfg.eq_count];
1119*f3bdbd42SAbhijit Gangurde 			ionic_destroy_eq(eq);
1120*f3bdbd42SAbhijit Gangurde 		}
1121*f3bdbd42SAbhijit Gangurde 
1122*f3bdbd42SAbhijit Gangurde 		kfree(dev->eq_vec);
1123*f3bdbd42SAbhijit Gangurde 	}
1124*f3bdbd42SAbhijit Gangurde }
1125